From 81a4d7cda85976c77fabf181cbf6c0f099f0687c Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 8 Jun 2022 18:53:46 +0800 Subject: [PATCH 001/204] remote --> local --- det-yolov5-tmi/cuda102.dockerfile | 40 +++++ det-yolov5-tmi/cuda111-devel.dockerfile | 43 +++++ det-yolov5-tmi/cuda111.dockerfile | 42 +++++ det-yolov5-tmi/infer-template.yaml | 12 ++ det-yolov5-tmi/mining-template.yaml | 12 ++ det-yolov5-tmi/mining/data_augment.py | 198 ++++++++++++++++++++ det-yolov5-tmi/mining/mining_cald.py | 144 +++++++++++++++ det-yolov5-tmi/mypy.ini | 8 + det-yolov5-tmi/requirements.txt | 9 +- det-yolov5-tmi/start.py | 128 +++++++++++++ det-yolov5-tmi/train.py | 29 ++- det-yolov5-tmi/training-template.yaml | 14 ++ det-yolov5-tmi/utils/datasets.py | 47 ++--- det-yolov5-tmi/utils/general.py | 4 + det-yolov5-tmi/utils/ymir_yolov5.py | 230 ++++++++++++++++++++++++ 15 files changed, 930 insertions(+), 30 deletions(-) create mode 100644 det-yolov5-tmi/cuda102.dockerfile create mode 100644 det-yolov5-tmi/cuda111-devel.dockerfile create mode 100644 det-yolov5-tmi/cuda111.dockerfile create mode 100644 det-yolov5-tmi/infer-template.yaml create mode 100644 det-yolov5-tmi/mining-template.yaml create mode 100644 det-yolov5-tmi/mining/data_augment.py create mode 100644 det-yolov5-tmi/mining/mining_cald.py create mode 100644 det-yolov5-tmi/mypy.ini create mode 100644 det-yolov5-tmi/start.py create mode 100644 det-yolov5-tmi/training-template.yaml create mode 100644 det-yolov5-tmi/utils/ymir_yolov5.py diff --git a/det-yolov5-tmi/cuda102.dockerfile b/det-yolov5-tmi/cuda102.dockerfile new file mode 100644 index 0000000..3c359ee --- /dev/null +++ b/det-yolov5-tmi/cuda102.dockerfile @@ -0,0 +1,40 @@ +ARG PYTORCH="1.8.1" +ARG CUDA="10.2" +ARG CUDNN="7" + +FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-runtime + +ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" +ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" +ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" +ENV LANG=C.UTF-8 + +# Install linux package +RUN apt-get update && apt-get install -y gnupg2 git libglib2.0-0 \ + libgl1-mesa-glx ffmpeg build-essential curl wget zip \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# Install python package +RUN pip install -U pip && \ + pip install cython xtcocotools onnx onnx-simplifier loguru \ + tensorboard==2.5.0 numba progress yacs pthflops imagesize pydantic pytest \ + scipy pyyaml opencv-python thop pandas seaborn + +# Install ymir-exc sdk +RUN pip install ymir-exc + +# Copy file from host to docker +ADD ./det-yolov5-tmi /app +RUN mkdir /img-man && mv /app/*-template.yaml /img-man/ + +# Download pretrained weight and font file +RUN cd /app && bash data/scripts/download_weights.sh +RUN mkdir -p /root/.config/Ultralytics && \ + wget https://ultralytics.com/assets/Arial.ttf -O /root/.config/Ultralytics/Arial.ttf + +# Make PYTHONPATH find local package +ENV PYTHONPATH=. + +WORKDIR /app +CMD python3 /app/start.py \ No newline at end of file diff --git a/det-yolov5-tmi/cuda111-devel.dockerfile b/det-yolov5-tmi/cuda111-devel.dockerfile new file mode 100644 index 0000000..6378b8b --- /dev/null +++ b/det-yolov5-tmi/cuda111-devel.dockerfile @@ -0,0 +1,43 @@ +ARG PYTORCH="1.8.0" +ARG CUDA="11.1" +ARG CUDNN="8" + +# cuda11.1 + pytorch 1.9.0 + cudnn8 not work!!! +FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel + +ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" +ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" +ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" +ENV LANG=C.UTF-8 + +# Install linux package +RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC && \ + apt-get update && apt-get install -y gnupg2 git ninja-build libglib2.0-0 libsm6 \ + libxrender-dev libxext6 libgl1-mesa-glx ffmpeg sudo openssh-server \ + libyaml-dev vim tmux tree curl wget zip \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# Install python package +RUN pip install -U pip && \ + pip install cython xtcocotools jupyter onnx onnx-simplifier loguru \ + tensorboard==2.5.0 numba progress yacs pthflops pytest \ + scipy pydantic pyyaml imagesize opencv-python thop pandas seaborn + +# Install ymir-exc sdk +RUN pip install ymir-exc + +# Copy file from host to docker +ADD ./det-yolov5-tmi /app +RUN mkdir /img-man && mv /app/*-template.yaml /img-man/ + +# Download pretrained weight and font file +RUN cd /app && bash data/scripts/download_weights.sh +RUN mkdir -p /root/.config/Ultralytics && \ + wget https://ultralytics.com/assets/Arial.ttf -O /root/.config/Ultralytics/Arial.ttf + +# setup PYTHONPATH to find local package +ENV PYTHONPATH=. + +WORKDIR /app +CMD python3 /app/start.py \ No newline at end of file diff --git a/det-yolov5-tmi/cuda111.dockerfile b/det-yolov5-tmi/cuda111.dockerfile new file mode 100644 index 0000000..4b637ec --- /dev/null +++ b/det-yolov5-tmi/cuda111.dockerfile @@ -0,0 +1,42 @@ +ARG PYTORCH="1.8.0" +ARG CUDA="11.1" +ARG CUDNN="8" + +# cuda11.1 + pytorch 1.9.0 + cudnn8 not work!!! +FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-runtime + +ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" +ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" +ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" +ENV LANG=C.UTF-8 + +# Install linux package +RUN apt-get update && apt-get install -y gnupg2 git ninja-build libglib2.0-0 libsm6 \ + libxrender-dev libxext6 libgl1-mesa-glx ffmpeg sudo openssh-server \ + libyaml-dev vim tmux tree curl wget zip \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# Install python package +RUN pip install -U pip && \ + pip install cython xtcocotools onnx onnx-simplifier loguru \ + tensorboard==2.5.0 numba progress yacs pthflops imagesize pydantic pytest \ + scipy pyyaml opencv-python thop pandas seaborn + +# Install ymir-exc sdk +RUN pip install ymir-exc + +# Copy file from host to docker +ADD ./det-yolov5-tmi /app +RUN mkdir /img-man && mv /app/*-template.yaml /img-man/ + +# Download pretrained weight and font file +RUN cd /app && bash data/scripts/download_weights.sh +RUN mkdir -p /root/.config/Ultralytics && \ + wget https://ultralytics.com/assets/Arial.ttf -O /root/.config/Ultralytics/Arial.ttf + +# Make PYTHONPATH find local package +ENV PYTHONPATH=. + +WORKDIR /app +CMD python3 /app/start.py \ No newline at end of file diff --git a/det-yolov5-tmi/infer-template.yaml b/det-yolov5-tmi/infer-template.yaml new file mode 100644 index 0000000..7574512 --- /dev/null +++ b/det-yolov5-tmi/infer-template.yaml @@ -0,0 +1,12 @@ +# infer template for your executor app +# after build image, it should at /img-man/infer-template.yaml +# key: gpu_id, task_id, model_params_path, class_names should be preserved + +gpu_id: '0' +task_id: 'default-infer-task' +model_params_path: [] +class_names: [] + +img_size: 640 +conf_thres: 0.25 +iou_thres: 0.45 diff --git a/det-yolov5-tmi/mining-template.yaml b/det-yolov5-tmi/mining-template.yaml new file mode 100644 index 0000000..5f2a3b2 --- /dev/null +++ b/det-yolov5-tmi/mining-template.yaml @@ -0,0 +1,12 @@ +# mining template for your executor app +# after build image, it should at /img-man/mining-template.yaml +# key: gpu_id, task_id, model_params_path, class_names should be preserved + +gpu_id: '0' +task_id: 'default-training-task' +model_params_path: [] +class_names: [] + +img_size: 640 +conf_thres: 0.25 +iou_thres: 0.45 diff --git a/det-yolov5-tmi/mining/data_augment.py b/det-yolov5-tmi/mining/data_augment.py new file mode 100644 index 0000000..47b1d50 --- /dev/null +++ b/det-yolov5-tmi/mining/data_augment.py @@ -0,0 +1,198 @@ +""" +data augmentations for CALD method, including horizontal_flip, rotate(5'), cutout +official code: https://github.com/we1pingyu/CALD/blob/master/cald/cald_helper.py +""" +import random +from typing import Any, List, Tuple + +import cv2 +import numpy as np +from nptyping import NDArray + +from utils.ymir_yolov5 import BBOX, CV_IMAGE + + +def intersect(boxes1: BBOX, boxes2: BBOX) -> NDArray: + ''' + Find intersection of every box combination between two sets of box + boxes1: bounding boxes 1, a tensor of dimensions (n1, 4) + boxes2: bounding boxes 2, a tensor of dimensions (n2, 4) + + Out: Intersection each of boxes1 with respect to each of boxes2, + a tensor of dimensions (n1, n2) + ''' + n1 = boxes1.shape[0] + n2 = boxes2.shape[0] + max_xy = np.minimum(np.expand_dims(boxes1[:, 2:], axis=1).repeat(n2, axis=1), + np.expand_dims(boxes2[:, 2:], axis=0).repeat(n1, axis=0)) + + min_xy = np.maximum(np.expand_dims(boxes1[:, :2], axis=1).repeat(n2, axis=1), + np.expand_dims(boxes2[:, :2], axis=0).repeat(n1, axis=0)) + inter = np.clip(max_xy - min_xy, a_min=0, a_max=None) # (n1, n2, 2) + return inter[:, :, 0] * inter[:, :, 1] # (n1, n2) + + +def horizontal_flip(image: CV_IMAGE, bbox: BBOX) \ + -> Tuple[CV_IMAGE, BBOX]: + """ + image: opencv image, [height,width,channels] + bbox: numpy.ndarray, [N,4] --> [x1,y1,x2,y2] + """ + image = image.copy() + + width = image.shape[1] + # Flip image horizontally + image = image[:, ::-1, :] + if len(bbox) > 0: + bbox = bbox.copy() + # Flip bbox horizontally + bbox[:, [0, 2]] = width - bbox[:, [2, 0]] + return image, bbox + + +def cutout(image: CV_IMAGE, bbox: BBOX, cut_num: int = 2, fill_val: int = 0, + bbox_remove_thres: float = 0.4, bbox_min_thres: float = 0.1) -> Tuple[CV_IMAGE, BBOX]: + ''' + Cutout augmentation + image: A PIL image + boxes: bounding boxes, a tensor of dimensions (#objects, 4) + labels: labels of object, a tensor of dimensions (#objects) + fill_val: Value filled in cut out + bbox_remove_thres: Theshold to remove bbox cut by cutout + + Out: new image, new_boxes, new_labels + ''' + image = image.copy() + bbox = bbox.copy() + + if len(bbox) == 0: + return image, bbox + + original_h, original_w, original_channel = image.shape + count = 0 + for _ in range(50): + # Random cutout size: [0.15, 0.5] of original dimension + cutout_size_h = random.uniform(0.05 * original_h, 0.2 * original_h) + cutout_size_w = random.uniform(0.05 * original_w, 0.2 * original_w) + + # Random position for cutout + left = random.uniform(0, original_w - cutout_size_w) + right = left + cutout_size_w + top = random.uniform(0, original_h - cutout_size_h) + bottom = top + cutout_size_h + cutout = np.array([[float(left), float(top), float(right), float(bottom)]]) + + # Calculate intersect between cutout and bounding boxes + overlap_size = intersect(cutout, bbox) + area_boxes = (bbox[:, 2] - bbox[:, 0]) * (bbox[:, 3] - bbox[:, 1]) + ratio = overlap_size / (area_boxes + 1e-14) + # If all boxes have Iou greater than bbox_remove_thres, try again + if ratio.max() > bbox_remove_thres or ratio.max() < bbox_min_thres: + continue + + image[int(top):int(bottom), int(left):int(right), :] = fill_val + count += 1 + if count >= cut_num: + break + return image, bbox + + +def rotate(image: CV_IMAGE, bbox: BBOX, rot: float = 5) -> Tuple[CV_IMAGE, BBOX]: + image = image.copy() + bbox = bbox.copy() + h, w, c = image.shape + center = np.array([w / 2.0, h / 2.0]) + s = max(h, w) * 1.0 + trans = get_affine_transform(center, s, rot, [w, h]) + if len(bbox) > 0: + for i in range(bbox.shape[0]): + x1, y1 = affine_transform(bbox[i, :2], trans) + x2, y2 = affine_transform(bbox[i, 2:], trans) + x3, y3 = affine_transform(bbox[i, [2, 1]], trans) + x4, y4 = affine_transform(bbox[i, [0, 3]], trans) + bbox[i, :2] = [min(x1, x2, x3, x4), min(y1, y2, y3, y4)] + bbox[i, 2:] = [max(x1, x2, x3, x4), max(y1, y2, y3, y4)] + image = cv2.warpAffine(image, trans, (w, h), flags=cv2.INTER_LINEAR) + return image, bbox + + +def get_3rd_point(a: NDArray, b: NDArray) -> NDArray: + direct = a - b + return b + np.array([-direct[1], direct[0]], dtype=np.float32) + + +def get_dir(src_point: NDArray, rot_rad: float) -> List: + sn, cs = np.sin(rot_rad), np.cos(rot_rad) + + src_result = [0, 0] + src_result[0] = src_point[0] * cs - src_point[1] * sn + src_result[1] = src_point[0] * sn + src_point[1] * cs + + return src_result + + +def transform_preds(coords: NDArray, center: NDArray, scale: Any, rot: float, output_size: List) -> NDArray: + trans = get_affine_transform(center, scale, rot, output_size, inv=True) + target_coords = affine_transform(coords, trans) + return target_coords + + +def get_affine_transform(center: NDArray, + scale: Any, + rot: float, + output_size: List, + shift: NDArray = np.array([0, 0], dtype=np.float32), + inv: bool = False) -> NDArray: + if not isinstance(scale, np.ndarray) and not isinstance(scale, list): + scale = np.array([scale, scale], dtype=np.float32) + + scale_tmp = scale + src_w = scale_tmp[0] + dst_w = output_size[0] + dst_h = output_size[1] + + rot_rad = np.pi * rot / 180 + src_dir = get_dir([0, src_w * -0.5], rot_rad) + dst_dir = np.array([0, dst_w * -0.5], np.float32) + + src = np.zeros((3, 2), dtype=np.float32) + dst = np.zeros((3, 2), dtype=np.float32) + src[0, :] = center + scale_tmp * shift + src[1, :] = center + src_dir + scale_tmp * shift + dst[0, :] = [dst_w * 0.5, dst_h * 0.5] + dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir + + src[2:, :] = get_3rd_point(src[0, :], src[1, :]) + dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :]) + + if inv: + trans = cv2.getAffineTransform(np.float32(dst), np.float32(src)) + else: + trans = cv2.getAffineTransform(np.float32(src), np.float32(dst)) + + return trans + + +def affine_transform(pt: NDArray, t: NDArray) -> NDArray: + new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T + new_pt = np.dot(t, new_pt) + return new_pt[:2] + + +def resize(img: CV_IMAGE, boxes: BBOX, ratio: float = 0.8) -> Tuple[CV_IMAGE, BBOX]: + """ + ratio: <= 1.0 + """ + assert ratio <= 1.0, f'resize ratio {ratio} must <= 1.0' + + h, w, _ = img.shape + ow = int(w * ratio) + oh = int(h * ratio) + resize_img = cv2.resize(img, (ow, oh)) + new_img = np.zeros_like(img) + new_img[:oh, :ow] = resize_img + + if len(boxes) == 0: + return new_img, boxes + else: + return new_img, boxes * ratio diff --git a/det-yolov5-tmi/mining/mining_cald.py b/det-yolov5-tmi/mining/mining_cald.py new file mode 100644 index 0000000..77bfcf6 --- /dev/null +++ b/det-yolov5-tmi/mining/mining_cald.py @@ -0,0 +1,144 @@ +""" +Consistency-based Active Learning for Object Detection CVPR 2022 workshop +official code: https://github.com/we1pingyu/CALD/blob/master/cald_train.py +""" +import sys +from typing import Dict, List, Tuple + +import cv2 +import numpy as np +from nptyping import NDArray +from scipy.stats import entropy +from tqdm import tqdm +from ymir_exc import dataset_reader as dr +from ymir_exc import env, monitor +from ymir_exc import result_writer as rw + +from mining.data_augment import cutout, horizontal_flip, intersect, resize, rotate +from utils.ymir_yolov5 import BBOX, CV_IMAGE, YmirYolov5, YmirStage, get_ymir_process + + +def split_result(result: NDArray) -> Tuple[BBOX, NDArray, NDArray]: + if len(result) > 0: + bboxes = result[:, :4].astype(np.int32) + conf = result[:, 4] + class_id = result[:, 5] + else: + bboxes = np.zeros(shape=(0, 4), dtype=np.int32) + conf = np.zeros(shape=(0, 1), dtype=np.float32) + class_id = np.zeros(shape=(0, 1), dtype=np.int32) + + return bboxes, conf, class_id + + +class MiningCald(YmirYolov5): + def mining(self) -> List: + N = dr.items_count(env.DatasetType.CANDIDATE) + monitor_gap = max(1, N // 100) + idx = -1 + beta = 1.3 + mining_result = [] + for asset_path, _ in tqdm(dr.item_paths(dataset_type=env.DatasetType.CANDIDATE)): + img = cv2.imread(asset_path) + # xyxy,conf,cls + result = self.predict(img) + bboxes, conf, _ = split_result(result) + if len(result) == 0: + # no result for the image without augmentation + mining_result.append((asset_path, -beta)) + continue + + consistency = 0.0 + aug_bboxes_dict, aug_results_dict = self.aug_predict(img, bboxes) + for key in aug_results_dict: + # no result for the image with augmentation f'{key}' + if len(aug_results_dict[key]) == 0: + consistency += beta + continue + + bboxes_key, conf_key, _ = split_result(aug_results_dict[key]) + cls_scores_aug = 1 - conf_key + cls_scores = 1 - conf + + consistency_per_aug = 2.0 + ious = get_ious(bboxes_key, aug_bboxes_dict[key]) + aug_idxs = np.argmax(ious, axis=0) + for origin_idx, aug_idx in enumerate(aug_idxs): + max_iou = ious[aug_idx, origin_idx] + if max_iou == 0: + consistency_per_aug = min(consistency_per_aug, beta) + p = cls_scores_aug[aug_idx] + q = cls_scores[origin_idx] + m = (p + q) / 2. + js = 0.5 * entropy(p, m) + 0.5 * entropy(q, m) + if js < 0: + js = 0 + consistency_box = max_iou + consistency_cls = 0.5 * (conf[origin_idx] + conf_key[aug_idx]) * (1 - js) + consistency_per_inst = abs(consistency_box + consistency_cls - beta) + consistency_per_aug = min(consistency_per_aug, consistency_per_inst.item()) + + consistency += consistency_per_aug + + consistency /= len(aug_results_dict) + + mining_result.append((asset_path, consistency)) + idx += 1 + + if idx % monitor_gap == 0: + percent = get_ymir_process(stage=YmirStage.TASK, p=idx / N) + monitor.write_monitor_logger(percent=percent) + + return mining_result + + def aug_predict(self, image: CV_IMAGE, bboxes: BBOX) -> Tuple[Dict[str, BBOX], Dict[str, NDArray]]: + """ + for different augmentation methods: flip, cutout, rotate and resize + augment the image and bbox and use model to predict them. + + return the predict result and augment bbox. + """ + aug_dict = dict(flip=horizontal_flip, + cutout=cutout, + rotate=rotate, + resize=resize) + + aug_bboxes = dict() + aug_results = dict() + for key in aug_dict: + aug_img, aug_bbox = aug_dict[key](image, bboxes) + + aug_result = self.predict(aug_img) + aug_bboxes[key] = aug_bbox + aug_results[key] = aug_result + + return aug_bboxes, aug_results + + +def get_ious(boxes1: BBOX, boxes2: BBOX) -> NDArray: + """ + args: + boxes1: np.array, (N, 4), xyxy + boxes2: np.array, (M, 4), xyxy + return: + iou: np.array, (N, M) + """ + area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1]) + area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1]) + iner_area = intersect(boxes1, boxes2) + area1 = area1.reshape(-1, 1).repeat(area2.shape[0], axis=1) + area2 = area2.reshape(1, -1).repeat(area1.shape[0], axis=0) + iou = iner_area / (area1 + area2 - iner_area + 1e-14) + return iou + + +def main(): + miner = MiningCald() + mining_result = miner.mining() + rw.write_mining_result(mining_result=mining_result) + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/det-yolov5-tmi/mypy.ini b/det-yolov5-tmi/mypy.ini new file mode 100644 index 0000000..85e751a --- /dev/null +++ b/det-yolov5-tmi/mypy.ini @@ -0,0 +1,8 @@ +[mypy] +ignore_missing_imports = True +disallow_untyped_defs = False +files = [mining/*.py, utils/ymir_yolov5.py, start.py, train.py] +exclude = [utils/general.py] + +[mypy-torch.*] +ignore_errors = True diff --git a/det-yolov5-tmi/requirements.txt b/det-yolov5-tmi/requirements.txt index 96fc9d1..3e65c34 100755 --- a/det-yolov5-tmi/requirements.txt +++ b/det-yolov5-tmi/requirements.txt @@ -22,8 +22,8 @@ seaborn>=0.11.0 # Export -------------------------------------- # coremltools>=4.1 # CoreML export -# onnx>=1.9.0 # ONNX export -# onnx-simplifier>=0.3.6 # ONNX simplifier +onnx>=1.9.0 # ONNX export +onnx-simplifier>=0.3.6 # ONNX simplifier # scikit-learn==0.19.2 # CoreML quantization # tensorflow>=2.4.1 # TFLite export # tensorflowjs>=3.9.0 # TF.js export @@ -35,3 +35,8 @@ seaborn>=0.11.0 # pycocotools>=2.0 # COCO mAP # roboflow thop # FLOPs computation + +# Ymir --------------------------------------- +imagesize # fast obtain image size without load image +nptyping # numpy type hint +easydict \ No newline at end of file diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py new file mode 100644 index 0000000..d22b3b8 --- /dev/null +++ b/det-yolov5-tmi/start.py @@ -0,0 +1,128 @@ +import logging +import os +import os.path as osp +import shutil +import subprocess +import sys + +import cv2 +from easydict import EasyDict as edict +from ymir_exc import dataset_reader as dr +from ymir_exc import env, monitor +from ymir_exc import result_writer as rw + +from utils.ymir_yolov5 import (YmirStage, YmirYolov5, convert_ymir_to_yolov5, download_weight_file, get_merged_config, + get_weight_file, get_ymir_process) + + +def start() -> int: + cfg = get_merged_config() + + logging.info(f'merged config: {cfg}') + + if cfg.ymir.run_training: + _run_training(cfg) + elif cfg.ymir.run_mining: + _run_mining(cfg) + elif cfg.ymir.run_infer: + _run_infer(cfg) + else: + logging.warning('no task running') + + return 0 + + +def _run_training(cfg: edict) -> None: + """ + function for training task + 1. convert dataset + 2. training model + 3. save model weight/hyperparameter/... to design directory + """ + # 1. convert dataset + logging.info('convert ymir dataset to yolov5 dataset') + out_dir = cfg.ymir.output.root_dir + convert_ymir_to_yolov5(cfg, out_dir) + monitor.write_monitor_logger(percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0)) + + # 2. training model + epochs = cfg.param.epochs + batch_size = cfg.param.batch_size + model = cfg.param.model + img_size = cfg.param.img_size + weights = get_weight_file(cfg) + if not weights: + # download pretrained weight + weights = download_weight_file(model) + + models_dir = cfg.ymir.output.models_dir + command = f'python3 train.py --epochs {epochs} ' + \ + f'--batch-size {batch_size} --data {out_dir}/data.yaml --project /out ' + \ + f'--cfg models/{model}.yaml --name models --weights {weights} ' + \ + f'--img-size {img_size} --hyp data/hyps/hyp.scratch-low.yaml ' + \ + '--exist-ok' + logging.info(f'start training: {command}') + + subprocess.run(command.split(), check=True) + monitor.write_monitor_logger(percent=get_ymir_process(stage=YmirStage.TASK, p=1.0)) + + # 3. convert to onnx and save model weight to design directory + opset = cfg.param.opset + command = f'python3 export.py --weights {models_dir}/best.pt --opset {opset} --include onnx' + logging.info(f'export onnx weight: {command}') + subprocess.run(command.split(), check=True) + + # save hyperparameter + shutil.copy(f'models/{model}.yaml', f'{models_dir}/{model}.yaml') + + # if task done, write 100% percent log + monitor.write_monitor_logger(percent=1.0) + + +def _run_mining(cfg: edict()) -> None: + logging.info('convert ymir dataset to yolov5 dataset') + out_dir = osp.join(cfg.ymir.output.root_dir, 'yolov5_dataset') + convert_ymir_to_yolov5(cfg, out_dir) + monitor.write_monitor_logger(percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0)) + + command = 'python3 mining/mining_cald.py' + logging.info(f'mining: {command}') + subprocess.run(command.split(), check=True) + monitor.write_monitor_logger(percent=1.0) + + +def _run_infer(cfg: edict) -> None: + # generate data.yaml for infer + logging.info('convert ymir dataset to yolov5 dataset') + out_dir = osp.join(cfg.ymir.output.root_dir, 'yolov5_dataset') + convert_ymir_to_yolov5(cfg, out_dir) + monitor.write_monitor_logger(percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0)) + + N = dr.items_count(env.DatasetType.CANDIDATE) + infer_result = dict() + model = YmirYolov5(cfg) + idx = -1 + + monitor_gap = max(1, N // 100) + for asset_path, _ in dr.item_paths(dataset_type=env.DatasetType.CANDIDATE): + img = cv2.imread(asset_path) + result = model.infer(img) + infer_result[asset_path] = result + idx += 1 + + if idx % monitor_gap == 0: + percent = get_ymir_process(stage=YmirStage.TASK, p=idx / N) + monitor.write_monitor_logger(percent=percent) + + rw.write_infer_result(infer_result=infer_result) + monitor.write_monitor_logger(percent=1.0) + + +if __name__ == '__main__': + logging.basicConfig(stream=sys.stdout, + format='%(levelname)-8s: [%(asctime)s] %(message)s', + datefmt='%Y%m%d-%H:%M:%S', + level=logging.INFO) + + os.environ.setdefault('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', 'python') + sys.exit(start()) diff --git a/det-yolov5-tmi/train.py b/det-yolov5-tmi/train.py index d8df31b..6dd190e 100644 --- a/det-yolov5-tmi/train.py +++ b/det-yolov5-tmi/train.py @@ -47,7 +47,7 @@ from utils.datasets import create_dataloader from utils.downloads import attempt_download from utils.general import (LOGGER, check_dataset, check_file, check_git_status, check_img_size, check_requirements, - check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, + check_suffix, check_version, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer) from utils.loggers import Loggers @@ -56,6 +56,8 @@ from utils.metrics import fitness from utils.plots import plot_evolve, plot_labels from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, select_device, torch_distributed_zero_first +from utils.ymir_yolov5 import write_ymir_training_result, YmirStage, get_ymir_process, get_merged_config +from ymir_exc import monitor LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) @@ -70,9 +72,12 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze + ymir_cfg = opt.ymir_cfg + opt.ymir_cfg = '' # yaml cannot dump edict, remove it here + log_dir = Path(ymir_cfg.ymir.output.tensorboard_dir) # Directories - w = save_dir / 'weights' # weights dir + w = save_dir # weights dir (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir last, best = w / 'last.pt', w / 'best.pt' @@ -92,7 +97,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Loggers data_dict = None if RANK in [-1, 0]: - loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance + loggers = Loggers(log_dir, weights, opt, hyp, LOGGER) # loggers instance if loggers.wandb: data_dict = loggers.wandb.data_dict if resume: @@ -253,7 +258,10 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # DDP mode if cuda and RANK != -1: - model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) + if check_version(torch.__version__, '1.11.0'): + model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK, static_graph=True) + else: + model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) # Model attributes nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) @@ -281,9 +289,16 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' f"Logging results to {colorstr('bold', save_dir)}\n" f'Starting training for {epochs} epochs...') + + monitor_gap = max(1, (epochs - start_epoch + 1) // 100) for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ model.train() + # ymir monitor + if epoch % monitor_gap == 0: + percent = get_ymir_process(stage=YmirStage.TASK, p=epoch/(epochs-start_epoch+1)) + monitor.write_monitor_logger(percent=percent) + # Update image weights (optional, single-GPU only) if opt.image_weights: cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights @@ -398,8 +413,10 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Save last, best and delete torch.save(ckpt, last) + write_ymir_training_result(ymir_cfg, results, maps, rewrite=False) if best_fitness == fi: torch.save(ckpt, best) + write_ymir_training_result(ymir_cfg, results, maps, rewrite=True) if (epoch > 0) and (opt.save_period > 0) and (epoch % opt.save_period == 0): torch.save(ckpt, w / f'epoch{epoch}.pt') del ckpt @@ -518,8 +535,10 @@ def main(opt, callbacks=Callbacks()): if opt.evolve: if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve opt.project = str(ROOT / 'runs/evolve') - opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) + ymir_cfg = get_merged_config() + opt.ymir_cfg = ymir_cfg + # DDP mode device = select_device(opt.device, batch_size=opt.batch_size) diff --git a/det-yolov5-tmi/training-template.yaml b/det-yolov5-tmi/training-template.yaml new file mode 100644 index 0000000..8cacec8 --- /dev/null +++ b/det-yolov5-tmi/training-template.yaml @@ -0,0 +1,14 @@ +# training template for your executor app +# after build image, it should at /img-man/training-template.yaml +# key: gpu_id, task_id, pretrained_model_paths, class_names should be preserved + +gpu_id: '0' +task_id: 'default-training-task' +pretrained_model_paths: [] +class_names: [] + +model: 'yolov5s' +batch_size: 16 +epochs: 300 +img_size: 640 +opset: 11 diff --git a/det-yolov5-tmi/utils/datasets.py b/det-yolov5-tmi/utils/datasets.py index e132e04..cb36851 100755 --- a/det-yolov5-tmi/utils/datasets.py +++ b/det-yolov5-tmi/utils/datasets.py @@ -18,6 +18,7 @@ from zipfile import ZipFile import cv2 +import imagesize import numpy as np import torch import torch.nn.functional as F @@ -28,9 +29,9 @@ from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, - segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) + segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn, ymir_xyxy2xywh) from utils.torch_utils import torch_distributed_zero_first - +from loguru import logger # Parameters HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' IMG_FORMATS = ['bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp'] # include image suffixes @@ -369,10 +370,8 @@ def __len__(self): return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years -def img2label_paths(img_paths): - # Define label paths as a function of image paths - sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings - return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths] +def img2label_paths(img_paths, img2label_map={}): + return [img2label_map[img] for img in img_paths] class LoadImagesAndLabels(Dataset): @@ -394,19 +393,19 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r try: f = [] # image files + img2label_map = dict() # map image files to label files for p in path if isinstance(path, list) else [path]: p = Path(p) # os-agnostic - if p.is_dir(): # dir - f += glob.glob(str(p / '**' / '*.*'), recursive=True) - # f = list(p.rglob('*.*')) # pathlib - elif p.is_file(): # file + if p.is_file(): # ymir index file with open(p) as t: t = t.read().strip().splitlines() - parent = str(p.parent) + os.sep - f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path - # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) + for x in t: + # x = f'{image_path}\t{label_path}\n' + image_path, label_path = x.split() + f.append(image_path) + img2label_map[image_path] = label_path else: - raise Exception(f'{prefix}{p} does not exist') + raise Exception(f'{prefix}{p} is not valid ymir index file') self.img_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib assert self.img_files, f'{prefix}No images found' @@ -414,7 +413,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}') # Check cache - self.label_files = img2label_paths(self.img_files) # labels + self.label_files = img2label_paths(self.img_files, img2label_map) # labels cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') try: cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict @@ -438,7 +437,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r self.labels = list(labels) self.shapes = np.array(shapes, dtype=np.float64) self.img_files = list(cache.keys()) # update - self.label_files = img2label_paths(cache.keys()) # update + self.label_files = img2label_paths(cache.keys(), img2label_map) # update n = len(shapes) # number of images bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index nb = bi[-1] + 1 # number of batches @@ -841,7 +840,7 @@ def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.datasets import lb_file = Path(img2label_paths([str(im_file)])[0]) if Path(lb_file).exists(): with open(lb_file) as f: - lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels + lb = np.array([x.split(',') for x in f.read().strip().splitlines()], dtype=np.float32) # labels for j, x in enumerate(lb): c = int(x[0]) # class @@ -905,14 +904,16 @@ def verify_image_label(args): if os.path.isfile(lb_file): nf = 1 # label found with open(lb_file) as f: - lb = [x.split() for x in f.read().strip().splitlines() if len(x)] - if any([len(x) > 8 for x in lb]): # is segment - classes = np.array([x[0] for x in lb], dtype=np.float32) - segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...) - lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) - lb = np.array(lb, dtype=np.float32) + lb = [x.split(',') for x in f.read().strip().splitlines() if len(x)] + nl = len(lb) if nl: + classes = np.array([x[0] for x in lb], dtype=np.float32) + width, height = imagesize.get(im_file) + ymir_xyxy = np.array([x[1:] for x in lb], dtype=np.float32) + lb = np.concatenate( + (classes.reshape(-1, 1), ymir_xyxy2xywh(ymir_xyxy, width, height)), 1) # (cls, xywh) + assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected' assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}' assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}' diff --git a/det-yolov5-tmi/utils/general.py b/det-yolov5-tmi/utils/general.py index 3044b9c..a2a1971 100755 --- a/det-yolov5-tmi/utils/general.py +++ b/det-yolov5-tmi/utils/general.py @@ -578,6 +578,10 @@ def xyxy2xywh(x): y[:, 3] = x[:, 3] - x[:, 1] # height return y +def ymir_xyxy2xywh(x, width, height): + x[:,0:3:2]/=width # normal x1,x2 + x[:,1:4:2]/=height # normal y1,y2 + return xyxy2xywh(x) def xywh2xyxy(x): # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right diff --git a/det-yolov5-tmi/utils/ymir_yolov5.py b/det-yolov5-tmi/utils/ymir_yolov5.py new file mode 100644 index 0000000..64ce9be --- /dev/null +++ b/det-yolov5-tmi/utils/ymir_yolov5.py @@ -0,0 +1,230 @@ +""" +utils function for ymir and yolov5 +""" +import os.path as osp +from enum import IntEnum +from typing import Any, List, Tuple + +import numpy as np +import torch +import yaml +from easydict import EasyDict as edict +from nptyping import NDArray, Shape, UInt8 +from ymir_exc import env +from ymir_exc import result_writer as rw + +from models.common import DetectMultiBackend +from models.experimental import attempt_download +from utils.augmentations import letterbox +from utils.general import check_img_size, non_max_suppression, scale_coords +from utils.torch_utils import select_device + + +class YmirStage(IntEnum): + PREPROCESS = 1 # convert dataset + TASK = 2 # training/mining/infer + POSTPROCESS = 3 # export model + + +BBOX = NDArray[Shape['*,4'], Any] +CV_IMAGE = NDArray[Shape['*,*,3'], UInt8] + + +def get_ymir_process(stage: YmirStage, p: float) -> float: + # const value for ymir process + PREPROCESS_PERCENT = 0.1 + TASK_PERCENT = 0.8 + POSTPROCESS_PERCENT = 0.1 + + if p < 0 or p > 1.0: + raise Exception(f'p not in [0,1], p={p}') + + if stage == YmirStage.PREPROCESS: + return PREPROCESS_PERCENT * p + elif stage == YmirStage.TASK: + return PREPROCESS_PERCENT + TASK_PERCENT * p + elif stage == YmirStage.POSTPROCESS: + return PREPROCESS_PERCENT + TASK_PERCENT + POSTPROCESS_PERCENT * p + else: + raise NotImplementedError(f'unknown stage {stage}') + + +def get_merged_config() -> edict: + """ + merge ymir_config and executor_config + """ + merged_cfg = edict() + # the hyperparameter information + merged_cfg.param = env.get_executor_config() + + # the ymir path information + merged_cfg.ymir = env.get_current_env() + return merged_cfg + +def get_weight_file(cfg: edict, try_download: bool = True) -> str: + """ + return the weight file path by priority + + 1. find weight file in cfg.param.model_params_path or cfg.param.model_params_path + 2. if try_download and no weight file offered + for training task, yolov5 will download it from github. + """ + if cfg.ymir.run_training: + model_params_path = cfg.param.pretrained_model_paths + else: + model_params_path = cfg.param.model_params_path + + model_dir = osp.join(cfg.ymir.input.root_dir, + cfg.ymir.input.models_dir) + model_params_path = [p for p in model_params_path if osp.exists(osp.join(model_dir, p))] + + # choose weight file by priority, best.pt > xxx.pt + if 'best.pt' in model_params_path: + return osp.join(model_dir, 'best.pt') + else: + for f in model_params_path: + if f.endswith('.pt'): + return osp.join(model_dir, f) + + return "" + + +def download_weight_file(model_name): + weights = attempt_download(f'{model_name}.pt') + return weights + + +class YmirYolov5(): + """ + used for mining and inference to init detector and predict. + """ + + def __init__(self, cfg: edict): + self.cfg = cfg + device = select_device(cfg.param.get('gpu_id', 'cpu')) + + self.model = self.init_detector(device) + self.device = device + self.class_names = cfg.param.class_names + self.stride = self.model.stride + self.conf_thres = float(cfg.param.conf_thres) + self.iou_thres = float(cfg.param.iou_thres) + + img_size = int(cfg.param.img_size) + imgsz = (img_size, img_size) + imgsz = check_img_size(imgsz, s=self.stride) + + self.model.warmup(imgsz=(1, 3, *imgsz), half=False) # warmup + self.img_size = imgsz + + def init_detector(self, device: torch.device) -> DetectMultiBackend: + weights = get_weight_file(self.cfg) + + model = DetectMultiBackend(weights=weights, + device=device, + dnn=False, # not use opencv dnn for onnx inference + data='data.yaml') # dataset.yaml path + + return model + + def predict(self, img: CV_IMAGE) -> NDArray: + """ + predict single image and return bbox information + img: opencv BGR, uint8 format + """ + # preprocess: padded resize + img1 = letterbox(img, self.img_size, stride=self.stride, auto=True)[0] + + # preprocess: convert data format + img1 = img1.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img1 = np.ascontiguousarray(img1) + img1 = torch.from_numpy(img1).to(self.device) + + img1 = img1 / 255 # 0 - 255 to 0.0 - 1.0 + img1.unsqueeze_(dim=0) # expand for batch dim + pred = self.model(img1) + + # postprocess + conf_thres = self.conf_thres + iou_thres = self.iou_thres + classes = None # not filter class_idx in results + agnostic_nms = False + max_det = 1000 + + pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) + + result = [] + for det in pred: + if len(det): + # Rescale boxes from img_size to img size + det[:, :4] = scale_coords(img1.shape[2:], det[:, :4], img.shape).round() + result.append(det) + + # xyxy, conf, cls + if len(result) > 0: + tensor_result = torch.cat(result, dim=0) + numpy_result = tensor_result.data.cpu().numpy() + else: + numpy_result = np.zeros(shape=(0, 6), dtype=np.float32) + + return numpy_result + + def infer(self, img: CV_IMAGE) -> List[rw.Annotation]: + anns = [] + result = self.predict(img) + + for i in range(result.shape[0]): + xmin, ymin, xmax, ymax, conf, cls = result[i, :6].tolist() + ann = rw.Annotation(class_name=self.class_names[int(cls)], score=conf, box=rw.Box( + x=int(xmin), y=int(ymin), w=int(xmax - xmin), h=int(ymax - ymin))) + + anns.append(ann) + + return anns + + +def convert_ymir_to_yolov5(cfg: edict, output_root_dir: str) -> None: + """ + convert ymir format dataset to yolov5 format + generate data.yaml for training/mining/infer + output_root_dir: the output root dir + """ + data = dict(path=cfg.ymir.input.root_dir, + train=cfg.ymir.input.training_index_file, + val=cfg.ymir.input.val_index_file, + test=cfg.ymir.input.candidate_index_file, + nc=len(cfg.param.class_names), + names=cfg.param.class_names) + + with open(osp.join(output_root_dir, 'data.yaml'), 'w') as fw: + fw.write(yaml.safe_dump(data)) + + +def write_ymir_training_result(cfg: edict, results: Tuple, maps: NDArray, rewrite=False) -> int: + """ + cfg: ymir config + results: (mp, mr, map50, map, loss) + maps: map@0.5:0.95 for all classes + rewrite: set true to ensure write the best result + """ + if not rewrite: + training_result_file = cfg.ymir.output.training_result_file + if osp.exists(training_result_file): + return 0 + + model = cfg.param.model + class_names = cfg.param.class_names + mp = results[0] # mean of precision + mr = results[1] # mean of recall + map50 = results[2] # mean of ap@0.5 + map = results[3] # mean of ap@0.5:0.95 + + # use `rw.write_training_result` to save training result + rw.write_training_result(model_names=[f'{model}.yaml', 'best.pt', 'last.pt', 'best.onnx'], + mAP=float(map), + mAP50=float(map50), + precision=float(mp), + recall=float(mr), + classAPs={class_name: v + for class_name, v in zip(class_names, maps.tolist())}) + return 0 From 4991ee7b61a9540ff097c2c91a2933a0abed8703 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 8 Jun 2022 19:08:52 +0800 Subject: [PATCH 002/204] update docker file --- det-yolov5-tmi/cuda102.dockerfile | 12 ++---------- det-yolov5-tmi/cuda111-devel.dockerfile | 17 ++++------------- det-yolov5-tmi/cuda111.dockerfile | 15 +++------------ 3 files changed, 9 insertions(+), 35 deletions(-) diff --git a/det-yolov5-tmi/cuda102.dockerfile b/det-yolov5-tmi/cuda102.dockerfile index 3c359ee..eeaf599 100644 --- a/det-yolov5-tmi/cuda102.dockerfile +++ b/det-yolov5-tmi/cuda102.dockerfile @@ -11,22 +11,14 @@ ENV LANG=C.UTF-8 # Install linux package RUN apt-get update && apt-get install -y gnupg2 git libglib2.0-0 \ - libgl1-mesa-glx ffmpeg build-essential curl wget zip \ + libgl1-mesa-glx curl wget zip \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -# Install python package -RUN pip install -U pip && \ - pip install cython xtcocotools onnx onnx-simplifier loguru \ - tensorboard==2.5.0 numba progress yacs pthflops imagesize pydantic pytest \ - scipy pyyaml opencv-python thop pandas seaborn - -# Install ymir-exc sdk -RUN pip install ymir-exc - # Copy file from host to docker ADD ./det-yolov5-tmi /app RUN mkdir /img-man && mv /app/*-template.yaml /img-man/ +RUN pip install ymir-exc && pip install -r /app/requirements.txt # Download pretrained weight and font file RUN cd /app && bash data/scripts/download_weights.sh diff --git a/det-yolov5-tmi/cuda111-devel.dockerfile b/det-yolov5-tmi/cuda111-devel.dockerfile index 6378b8b..77389b9 100644 --- a/det-yolov5-tmi/cuda111-devel.dockerfile +++ b/det-yolov5-tmi/cuda111-devel.dockerfile @@ -12,31 +12,22 @@ ENV LANG=C.UTF-8 # Install linux package RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC && \ - apt-get update && apt-get install -y gnupg2 git ninja-build libglib2.0-0 libsm6 \ - libxrender-dev libxext6 libgl1-mesa-glx ffmpeg sudo openssh-server \ - libyaml-dev vim tmux tree curl wget zip \ + apt-get update && apt-get install -y gnupg2 git libglib2.0-0 libgl1-mesa-glx \ + curl wget zip \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -# Install python package -RUN pip install -U pip && \ - pip install cython xtcocotools jupyter onnx onnx-simplifier loguru \ - tensorboard==2.5.0 numba progress yacs pthflops pytest \ - scipy pydantic pyyaml imagesize opencv-python thop pandas seaborn - -# Install ymir-exc sdk -RUN pip install ymir-exc - # Copy file from host to docker ADD ./det-yolov5-tmi /app RUN mkdir /img-man && mv /app/*-template.yaml /img-man/ +RUN pip install ymir-exc && pip install -r /app/requirements.txt # Download pretrained weight and font file RUN cd /app && bash data/scripts/download_weights.sh RUN mkdir -p /root/.config/Ultralytics && \ wget https://ultralytics.com/assets/Arial.ttf -O /root/.config/Ultralytics/Arial.ttf -# setup PYTHONPATH to find local package +# Make PYTHONPATH find local package ENV PYTHONPATH=. WORKDIR /app diff --git a/det-yolov5-tmi/cuda111.dockerfile b/det-yolov5-tmi/cuda111.dockerfile index 4b637ec..9c8c061 100644 --- a/det-yolov5-tmi/cuda111.dockerfile +++ b/det-yolov5-tmi/cuda111.dockerfile @@ -11,24 +11,15 @@ ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" ENV LANG=C.UTF-8 # Install linux package -RUN apt-get update && apt-get install -y gnupg2 git ninja-build libglib2.0-0 libsm6 \ - libxrender-dev libxext6 libgl1-mesa-glx ffmpeg sudo openssh-server \ - libyaml-dev vim tmux tree curl wget zip \ +RUN apt-get update && apt-get install -y gnupg2 git libglib2.0-0 \ + libgl1-mesa-glx curl wget zip \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -# Install python package -RUN pip install -U pip && \ - pip install cython xtcocotools onnx onnx-simplifier loguru \ - tensorboard==2.5.0 numba progress yacs pthflops imagesize pydantic pytest \ - scipy pyyaml opencv-python thop pandas seaborn - -# Install ymir-exc sdk -RUN pip install ymir-exc - # Copy file from host to docker ADD ./det-yolov5-tmi /app RUN mkdir /img-man && mv /app/*-template.yaml /img-man/ +RUN pip install ymir-exc && pip install -r /app/requirements.txt # Download pretrained weight and font file RUN cd /app && bash data/scripts/download_weights.sh From 9d16def7a12f5bdebf63263b7136a660c8150f20 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 10 Jun 2022 15:24:56 +0800 Subject: [PATCH 003/204] add many file --- det-mmdetection-tmi/README_ymir.md | 8 + .../mmdet/core/evaluation/eval_hooks.py | 25 ++- .../mmdet/datasets/__init__.py | 3 +- det-mmdetection-tmi/mmdet/datasets/coco.py | 11 +- det-mmdetection-tmi/mmdet/datasets/ymir.py | 201 ++++++++++++++++++ det-mmdetection-tmi/mmdet/utils/util_ymir.py | 149 +++++++++++++ det-mmdetection-tmi/start.py | 95 +++++++++ det-mmdetection-tmi/ymir_log.py | 53 +++++ det-mmdetection-tmi/ymir_train.py | 121 +++++++++++ 9 files changed, 663 insertions(+), 3 deletions(-) create mode 100644 det-mmdetection-tmi/README_ymir.md create mode 100644 det-mmdetection-tmi/mmdet/datasets/ymir.py create mode 100644 det-mmdetection-tmi/mmdet/utils/util_ymir.py create mode 100644 det-mmdetection-tmi/start.py create mode 100644 det-mmdetection-tmi/ymir_log.py create mode 100644 det-mmdetection-tmi/ymir_train.py diff --git a/det-mmdetection-tmi/README_ymir.md b/det-mmdetection-tmi/README_ymir.md new file mode 100644 index 0000000..de86768 --- /dev/null +++ b/det-mmdetection-tmi/README_ymir.md @@ -0,0 +1,8 @@ +# det-mmdetection-tmi + +`mmdetection` framework for object `det`ection `t`raining/`m`ining/`i`nfer task + +# changelog +- modify `mmdet/datasets/coco.py`, save the evaluation result to `os.environ.get('COCO_EVAL_TMP_FILE')` with json format +- modify `mmdet/core/evaluation/eval_hooks.py`, write training result file and monitor task process +- modify `mmdet/datasets/__init__.py` and add `mmdet/datasets/ymir.py`, add class `YmirDataset` to load YMIR dataset. diff --git a/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py b/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py index 7c1fbe9..15c47bc 100644 --- a/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py +++ b/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py @@ -7,7 +7,9 @@ from mmcv.runner import DistEvalHook as BaseDistEvalHook from mmcv.runner import EvalHook as BaseEvalHook from torch.nn.modules.batchnorm import _BatchNorm - +from ymir_exc import monitor +from mmdet.utils.util_ymir import update_training_result_file +import os.path as osp def _calc_dynamic_intervals(start_interval, dynamic_interval_list): assert mmcv.is_list_of(dynamic_interval_list, tuple) @@ -43,6 +45,12 @@ def before_train_epoch(self, runner): self._decide_interval(runner) super().before_train_epoch(runner) + def after_train_epoch(self, runner): + """Report the training process for ymir""" + percent=0.95*(runner.epoch/runner.max_epochs) + monitor.write_monitor_logger(percent=percent) + super().after_train_epoch(runner) + def before_train_iter(self, runner): self._decide_interval(runner) super().before_train_iter(runner) @@ -60,6 +68,10 @@ def _do_evaluate(self, runner): # the best checkpoint if self.save_best and key_score: self._save_ckpt(runner, key_score) + best_score = runner.meta['hook_msgs'].get( + 'best_score', self.init_value_map[self.rule]) + if self.compare_func(key_score, best_score): + update_training_result_file(key_score) # Note: Considering that MMCV's EvalHook updated its interface in V1.3.16, @@ -87,6 +99,12 @@ def before_train_epoch(self, runner): self._decide_interval(runner) super().before_train_epoch(runner) + def after_train_epoch(self, runner): + """Report the training process for ymir""" + percent=0.1+0.8*(runner.epoch/runner.max_epochs) + monitor.write_monitor_logger(percent=percent) + super().after_train_epoch(runner) + def before_train_iter(self, runner): self._decide_interval(runner) super().before_train_iter(runner) @@ -128,3 +146,8 @@ def _do_evaluate(self, runner): # the action to save the best checkpoint if self.save_best and key_score: self._save_ckpt(runner, key_score) + + best_score = runner.meta['hook_msgs'].get( + 'best_score', self.init_value_map[self.rule]) + if self.compare_func(key_score, best_score): + update_training_result_file(key_score) diff --git a/det-mmdetection-tmi/mmdet/datasets/__init__.py b/det-mmdetection-tmi/mmdet/datasets/__init__.py index f251d07..ff66046 100644 --- a/det-mmdetection-tmi/mmdet/datasets/__init__.py +++ b/det-mmdetection-tmi/mmdet/datasets/__init__.py @@ -15,6 +15,7 @@ from .voc import VOCDataset from .wider_face import WIDERFaceDataset from .xml_style import XMLDataset +from .ymir import YmirDataset __all__ = [ 'CustomDataset', 'XMLDataset', 'CocoDataset', 'DeepFashionDataset', @@ -24,5 +25,5 @@ 'ClassBalancedDataset', 'WIDERFaceDataset', 'DATASETS', 'PIPELINES', 'build_dataset', 'replace_ImageToTensor', 'get_loading_pipeline', 'NumClassCheckHook', 'CocoPanopticDataset', 'MultiImageMixDataset', - 'OpenImagesDataset', 'OpenImagesChallengeDataset' + 'OpenImagesDataset', 'OpenImagesChallengeDataset', 'YmirDataset' ] diff --git a/det-mmdetection-tmi/mmdet/datasets/coco.py b/det-mmdetection-tmi/mmdet/datasets/coco.py index efd6949..cde2de7 100644 --- a/det-mmdetection-tmi/mmdet/datasets/coco.py +++ b/det-mmdetection-tmi/mmdet/datasets/coco.py @@ -3,6 +3,7 @@ import io import itertools import logging +import os import os.path as osp import tempfile import warnings @@ -12,7 +13,6 @@ import numpy as np from mmcv.utils import print_log from terminaltables import AsciiTable - from mmdet.core import eval_recalls from .api_wrappers import COCO, COCOeval from .builder import DATASETS @@ -562,6 +562,15 @@ def evaluate(self, results_per_category.append( (f'{nm["name"]}', f'{float(ap):0.3f}')) + + COCO_EVAL_TMP_FILE = os.getenv('COCO_EVAL_TMP_FILE') + if COCO_EVAL_TMP_FILE is not None: + mmcv.dump({name:value for name,value in results_per_category}, COCO_EVAL_TMP_FILE, file_format='json') + else: + raise Exception('please set valid environment variable COCO_EVAL_TMP_FILE to write result into json file') + + print_log(f'\n write eval result to {COCO_EVAL_TMP_FILE}', logger=logger) + num_columns = min(6, len(results_per_category) * 2) results_flatten = list( itertools.chain(*results_per_category)) diff --git a/det-mmdetection-tmi/mmdet/datasets/ymir.py b/det-mmdetection-tmi/mmdet/datasets/ymir.py new file mode 100644 index 0000000..5cbbbfa --- /dev/null +++ b/det-mmdetection-tmi/mmdet/datasets/ymir.py @@ -0,0 +1,201 @@ +# Copyright (c) OpenMMLab voc.py. All rights reserved. +# wangjiaxin 2022-04-25 + +from collections import OrderedDict +import os.path as osp + +# from PIL import Image +import imagesize + +import json +from .builder import DATASETS +from .api_wrappers import COCO +from .coco import CocoDataset + +@DATASETS.register_module() +class YmirDataset(CocoDataset): + """ + converted dataset by ymir system 1.0.0 + /in/assets: image files directory + /in/annotations: annotation files directory + /in/train-index.tsv: image_file \t annotation_file + /in/val-index.tsv: image_file \t annotation_file + """ + def __init__(self, + min_size=0, + ann_prefix='annotations', + **kwargs): + self.min_size=min_size + self.ann_prefix=ann_prefix + super(YmirDataset, self).__init__(**kwargs) + + def load_annotations(self, ann_file): + """Load annotation from TXT style ann_file. + + Args: + ann_file (str): Path of TXT file. + + Returns: + list[dict]: Annotation info from TXT file. + """ + + images = [] + categories = [] + # category_id is from 1 for coco, not 0 + for i, name in enumerate(self.CLASSES): + categories.append({'supercategory':'none', + 'id': i+1, + 'name': name}) + + annotations = [] + instance_counter = 1 + image_counter = 1 + + with open(ann_file,'r') as fp: + lines=fp.readlines() + + for line in lines: + # split any white space + img_path, ann_path = line.strip().split() + img_path = osp.join(self.data_root, self.img_prefix, img_path) + ann_path = osp.join(self.data_root, self.ann_prefix, ann_path) + # img = Image.open(img_path) + # width, height = img.size + width, height = imagesize.get(img_path) + images.append( + dict(id=image_counter, + file_name=img_path, + ann_path=ann_path, + width=width, + height=height)) + + try: + anns = self.get_txt_ann_info(ann_path) + except Exception as e: + print(f'bad annotation for {ann_path} with {e}') + anns = [] + + for ann in anns: + ann['image_id']=image_counter + ann['id']=instance_counter + annotations.append(ann) + instance_counter+=1 + + image_counter+=1 + + ### pycocotool coco init + self.coco = COCO() + self.coco.dataset['type']='instances' + self.coco.dataset['categories']=categories + self.coco.dataset['images']=images + self.coco.dataset['annotations']=annotations + self.coco.createIndex() + + ### mmdetection coco init + # avoid the filter problem in CocoDataset, view coco_api.py for detail + self.coco.img_ann_map = self.coco.imgToAnns + self.coco.cat_img_map = self.coco.catToImgs + + # get valid category_id (in annotation, start from 1, arbitary) + self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES) + # convert category_id to label(train_id, start from 0) + self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} + self.img_ids = self.coco.get_img_ids() + # self.img_ids = list(self.coco.imgs.keys()) + assert len(self.img_ids) > 0, 'image number must > 0' + N=len(self.img_ids) + print(f'load {N} image from YMIR dataset') + + data_infos = [] + total_ann_ids = [] + for i in self.img_ids: + info = self.coco.load_imgs([i])[0] + info['filename'] = info['file_name'] + data_infos.append(info) + ann_ids = self.coco.get_ann_ids(img_ids=[i]) + total_ann_ids.extend(ann_ids) + assert len(set(total_ann_ids)) == len( + total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!" + return data_infos + + def dump(self, ann_file): + with open(ann_file,'w') as fp: + json.dump(self.coco.dataset, fp) + + def get_ann_path_from_img_path(self,img_path): + img_id=osp.splitext(osp.basename(img_path))[0] + return osp.join(self.data_root, self.ann_prefix, img_id+'.txt') + + def get_txt_ann_info(self, txt_path): + """Get annotation from TXT file by index. + + Args: + idx (int): Index of data. + + Returns: + dict: Annotation info of specified index. + """ + + # img_id = self.data_infos[idx]['id'] + # txt_path = osp.splitext(img_path)[0]+'.txt' + # txt_path = self.get_ann_path_from_img_path(img_path) + anns = [] + if osp.exists(txt_path): + with open(txt_path,'r') as fp: + lines=fp.readlines() + else: + lines=[] + for line in lines: + obj=[int(x) for x in line.strip().split(',')] + # YMIR category id starts from 0, coco from 1 + category_id, xmin, ymin, xmax, ymax = obj + bbox = [xmin, ymin, xmax, ymax] + h,w=ymax-ymin,xmax-xmin + ignore = 0 + if self.min_size: + assert not self.test_mode + w = bbox[2] - bbox[0] + h = bbox[3] - bbox[1] + if w < self.min_size or h < self.min_size: + ignore = 1 + + ann = dict( + segmentation=[[xmin, ymin, xmax, ymin, xmax, ymax, xmin, ymax]], + area=w*h, + iscrowd=0, + image_id=None, + bbox=[xmin, ymin, w, h], + category_id=category_id+1, # category id is from 1 for coco + id=None, + ignore=ignore + ) + anns.append(ann) + return anns + + def get_cat_ids(self, idx): + """Get category ids in TXT file by index. + + Args: + idx (int): Index of data. + + Returns: + list[int]: All categories in the image of specified index. + """ + + cat_ids = [] + # img_path = self.data_infos[idx]['file_name'] + # txt_path = self.get_ann_path_from_img_path(img_path) + txt_path = self.data_infos[idx]['ann_path'] + txt_path = osp.join(self.data_root, self.ann_prefix, txt_path) + if osp.exists(txt_path): + with open(txt_path,'r') as fp: + lines = fp.readlines() + else: + lines = [] + + for line in lines: + obj = [int(x) for x in line.strip().split(',')] + # label, xmin, ymin, xmax, ymax = obj + cat_ids.append(obj[0]) + + return cat_ids diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py new file mode 100644 index 0000000..3b5008b --- /dev/null +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -0,0 +1,149 @@ +""" +utils function for ymir and yolov5 +""" +import glob +import os +import os.path as osp +import sys +from enum import IntEnum +from typing import Any, List, Tuple +from urllib.parse import urlparse + +import mmcv +from easydict import EasyDict as edict +from nptyping import NDArray, Shape, UInt8 +from torch.hub import HASH_REGEX, _get_torch_home, download_url_to_file +from ymir_exc import env +from ymir_exc import result_writer as rw + + +class YmirStage(IntEnum): + PREPROCESS = 1 # convert dataset + TASK = 2 # training/mining/infer + POSTPROCESS = 3 # export model + + +BBOX = NDArray[Shape['*,4'], Any] +CV_IMAGE = NDArray[Shape['*,*,3'], UInt8] + + +def get_ymir_process(stage: YmirStage, p: float = 0.0) -> float: + # const value for ymir process + PREPROCESS_PERCENT = 0.1 + TASK_PERCENT = 0.8 + POSTPROCESS_PERCENT = 0.1 + + if p < 0 or p > 1.0: + raise Exception(f'p not in [0,1], p={p}') + + if stage == YmirStage.PREPROCESS: + return PREPROCESS_PERCENT * p + elif stage == YmirStage.TASK: + return PREPROCESS_PERCENT + TASK_PERCENT * p + elif stage == YmirStage.POSTPROCESS: + return PREPROCESS_PERCENT + TASK_PERCENT + POSTPROCESS_PERCENT * p + else: + raise NotImplementedError(f'unknown stage {stage}') + + +def get_merged_config() -> edict: + """ + merge ymir_config and executor_config + """ + merged_cfg = edict() + # the hyperparameter information + merged_cfg.param = env.get_executor_config() + + # the ymir path information + merged_cfg.ymir = env.get_current_env() + return merged_cfg + + +def get_weight_file(cfg: edict) -> str: + """ + return the weight file path by priority + find weight file in cfg.param.model_params_path or cfg.param.model_params_path + """ + if cfg.ymir.run_training: + model_params_path = cfg.param.pretrained_model_paths + else: + model_params_path = cfg.param.model_params_path + + model_dir = osp.join(cfg.ymir.input.root_dir, + cfg.ymir.input.models_dir) + model_params_path = [ + p for p in model_params_path if osp.exists(osp.join(model_dir, p))] + + # choose weight file by priority, best.pt > xxx.pt + if 'best.pt' in model_params_path: + return osp.join(model_dir, 'best.pt') + else: + for f in model_params_path: + if f.endswith('.pt'): + return osp.join(model_dir, f) + + return "" + + +def download_weight_file(model: str) -> str: + """ + download weight file from web if not exist. + """ + model_to_url = dict( + faster_rcnn_r50_fpn='https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth', + faster_rcnn_r101_fpn='https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_1x_coco/faster_rcnn_r101_fpn_1x_coco_20200130-f513f705.pth', + yolox_tiny='https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth', + yolox_s='https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_20211121_095711-4592a793.pth', + yolox_l='https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth', + yolox_x='https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254-1ef88d67.pth', + yolox_nano='https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth' + ) + + url = model_to_url[model] + torch_home = _get_torch_home() + model_dir = os.path.join(torch_home, 'checkpoints') + + os.makedirs(model_dir, exist_ok=True) + parts = urlparse(url) + filename = os.path.basename(parts.path) + cached_file = os.path.join(model_dir, filename) + + if not os.path.exists(cached_file): + sys.stderr.write('Downloading: "{}" to {}\n'.format( + url, cached_file)) + r = HASH_REGEX.search(filename) # r is Optional[Match[str]] + hash_prefix = r.group(1) if r else None + download_url_to_file( + url, cached_file, hash_prefix, progress=True) + + return cached_file + + +def update_training_result_file(key_score): + COCO_EVAL_TMP_FILE = os.getenv('COCO_EVAL_TMP_FILE') + if COCO_EVAL_TMP_FILE is None: + raise Exception( + 'please set valid environment variable COCO_EVAL_TMP_FILE to write result into json file') + + results_per_category = mmcv.load(COCO_EVAL_TMP_FILE) + + work_dir = os.getenv('YMIR_MODELS_DIR') + if work_dir is None or osp.isdir(work_dir): + raise Exception( + f'please set valid environment variable YMIR_MODELS_DIR, invalid directory {work_dir}') + + # assert only one model config file in work_dir + model_config_file = glob.glob(osp.join(work_dir, '*.py'))[0] + weight_files = glob.glob(osp.join(work_dir, 'best_bbox_mAP_epoch_*.pth')) + if len(weight_files) == 0: + weight_files = glob.glob(osp.join(work_dir, 'epoch_*.pth')) + + if len(weight_files) == 0: + raise Exception(f'no weight file found in {work_dir}') + + # sort the weight files by time, use the latest file. + weight_files.sort(key=lambda fn: osp.getmtime(fn)) + model_weight_file = osp.basename(weight_files[-1]) + rw.write_training_result(model_names=[model_weight_file, osp.basename(model_config_file)], + mAP=key_score, + classAPs=results_per_category) \ No newline at end of file diff --git a/det-mmdetection-tmi/start.py b/det-mmdetection-tmi/start.py new file mode 100644 index 0000000..16e8e89 --- /dev/null +++ b/det-mmdetection-tmi/start.py @@ -0,0 +1,95 @@ +import logging +import os +import os.path as osp +import shutil +import subprocess +import sys + +import cv2 +from easydict import EasyDict as edict +from ymir_exc import dataset_reader as dr +from ymir_exc import env, monitor +from ymir_exc import result_writer as rw + +from mmdet.utils.util_ymir import (YmirStage, YmirYolov5, convert_ymir_to_yolov5, download_weight_file, get_merged_config, + get_weight_file, get_ymir_process) + + +def start() -> int: + cfg = get_merged_config() + + logging.info(f'merged config: {cfg}') + + if cfg.ymir.run_training: + _run_training(cfg) + elif cfg.ymir.run_mining: + _run_mining(cfg) + elif cfg.ymir.run_infer: + _run_infer(cfg) + else: + logging.warning('no task running') + + return 0 + + +def _run_training(cfg: edict) -> None: + """ + function for training task + 1. convert dataset + 2. training model + 3. save model weight/hyperparameter/... to design directory + """ + command = 'python3 ymir_train.py' + logging.info(f'start training: {command}') + subprocess.run(command.split(), check=True) + # if task done, write 100% percent log + monitor.write_monitor_logger(percent=1.0) + + +def _run_mining(cfg: edict()) -> None: + logging.info('convert ymir dataset to yolov5 dataset') + out_dir = osp.join(cfg.ymir.output.root_dir, 'yolov5_dataset') + convert_ymir_to_yolov5(cfg, out_dir) + monitor.write_monitor_logger(percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0)) + + command = 'python3 mining/mining_cald.py' + logging.info(f'mining: {command}') + subprocess.run(command.split(), check=True) + monitor.write_monitor_logger(percent=1.0) + + +def _run_infer(cfg: edict) -> None: + # generate data.yaml for infer + logging.info('convert ymir dataset to yolov5 dataset') + out_dir = osp.join(cfg.ymir.output.root_dir, 'yolov5_dataset') + convert_ymir_to_yolov5(cfg, out_dir) + monitor.write_monitor_logger(percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0)) + + N = dr.items_count(env.DatasetType.CANDIDATE) + infer_result = dict() + model = YmirYolov5(cfg) + idx = -1 + + monitor_gap = max(1, N // 100) + for asset_path, _ in dr.item_paths(dataset_type=env.DatasetType.CANDIDATE): + img = cv2.imread(asset_path) + result = model.infer(img) + infer_result[asset_path] = result + idx += 1 + + if idx % monitor_gap == 0: + percent = get_ymir_process(stage=YmirStage.TASK, p=idx / N) + monitor.write_monitor_logger(percent=percent) + + rw.write_infer_result(infer_result=infer_result) + monitor.write_monitor_logger(percent=1.0) + + +if __name__ == '__main__': + logging.basicConfig(stream=sys.stdout, + format='%(levelname)-8s: [%(asctime)s] %(message)s', + datefmt='%Y%m%d-%H:%M:%S', + level=logging.INFO) + + os.environ.setdefault('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', 'python') + sys.exit(start()) diff --git a/det-mmdetection-tmi/ymir_log.py b/det-mmdetection-tmi/ymir_log.py new file mode 100644 index 0000000..29f2ec8 --- /dev/null +++ b/det-mmdetection-tmi/ymir_log.py @@ -0,0 +1,53 @@ +import time +import os.path as osp +from typing import Generator +from pygtail import Pygtail +from mmcv.util import TORCH_VERSION, digit_version + +if (TORCH_VERSION == 'parrots' + or digit_version(TORCH_VERSION) < digit_version('1.1')): + try: + from tensorboardX import SummaryWriter + except ImportError: + raise ImportError('Please install tensorboardX to use ' + 'TensorboardLoggerHook.') +else: + try: + from torch.utils.tensorboard import SummaryWriter + except ImportError: + raise ImportError( + 'Please run "pip install future tensorboard" to install ' + 'the dependencies to use torch.utils.tensorboard ' + '(applicable to PyTorch 1.1 or higher)') + + +def read_log(f: str, wait: bool = True, sleep: float = 0.1) -> Generator[str]: + """ + Basically tail -f with a configurable sleep + """ + with open(f) as logfile: + # logfile.seek(0, os.SEEK_END) + while True: + new_line = logfile.readline() + if new_line: + yield new_line + else: + if wait: + # wait for new line + time.sleep(sleep) + else: + # read all line in file + break + +def write_tensorboard_text(tb_log_file: str, executor_log_file: str) -> None: + global _TENSORBOARD_GLOBAL_STEP + # tb_log_file = osp.join(cfg.ymir.output.tensorboard_dir, 'tensorboard_text.log') + # executor_log_file = cfg.ymir.output.executor_log_file + writer = SummaryWriter(tb_log_file) + + # Pygtail always return the new lines + for line in Pygtail(executor_log_file): + writer.add_text(tag='ymir-executor', text_string=line, global_step=_TENSORBOARD_GLOBAL_STEP) + _TENSORBOARD_GLOBAL_STEP += 1 + + writer.close() \ No newline at end of file diff --git a/det-mmdetection-tmi/ymir_train.py b/det-mmdetection-tmi/ymir_train.py new file mode 100644 index 0000000..205bbc7 --- /dev/null +++ b/det-mmdetection-tmi/ymir_train.py @@ -0,0 +1,121 @@ +import glob +import logging +import os +import os.path as osp +import subprocess +import sys + +from easydict import EasyDict as edict +from ymir_exc import monitor +from mmdet.utils.util_ymir import get_merged_config, get_weight_file, download_weight_file, get_ymir_process, YmirStage, update_training_result_file + + +def main(cfg: edict) -> int: + # default ymir config + gpu_id = cfg.param.get("gpu_id", '0') + num_gpus = len(gpu_id.split(",")) + if num_gpus == 0: + raise Exception(f'gpu_id = {gpu_id} is not valid, eg: 0 or 2,4') + + classes = cfg.param.class_names + num_classes = len(classes) + model = cfg.param.model + if num_classes==0: + raise Exception('not find class_names in config!') + + weight_file = get_weight_file(cfg) + if not weight_file: + weight_file = download_weight_file(model) + + # user define config + learning_rate = cfg.param.learning_rate + epochs = cfg.param.epochs + + samples_per_gpu = cfg.param.samples_per_gpu + workers_per_gpu = min(4, max(1, samples_per_gpu//2)) + + supported_models = [] + if model.startswith("faster_rcnn"): + files = glob.glob( + osp.join('configs/faster_rcnn/faster_rcnn_*_ymir_coco.py')) + supported_models = ['faster_rcnn_r50_fpn', 'faster_rcnn_r101_fpn'] + elif model.startswith("yolox"): + files = glob.glob(osp.join('configs/yolox/yolox_*_8x8_300e_ymir_coco.py')) + supported_models = ['yolox_nano', 'yolox_tiny', + 'yolox_s', 'yolox_m', 'yolox_l', 'yolox_x'] + else: + files = glob.glob(osp.join('configs/*/*_ymir_coco.py')) + supported_models = [osp.basename(f) for f in files] + + assert model in supported_models, f'unknown model {model}, not in {supported_models}' + + # modify base config file + base_config_file = './configs/_base_/datasets/ymir_coco.py' + + modify_dict = dict( + classes=classes, + num_classes=num_classes, + max_epochs=epochs, + lr=learning_rate, + samples_per_gpu=samples_per_gpu, + workers_per_gpu=workers_per_gpu, + data_root=cfg.ymir.input.root_dir, + img_prefix=cfg.ymir.input.assets_dir, + ann_prefix=cfg.ymir.input.annotations_dir, + train_ann_file=cfg.ymir.input.training_index_file, + val_ann_file=cfg.ymir.input.val_index_file, + tensorboard_dir=cfg.ymir.output.tensorboard_dir, + work_dir=cfg.ymir.output.models_dir, + checkpoints_path=weight_file + ) + + logging.info(f'modified config is {modify_dict}') + with open(base_config_file, 'r') as fp: + lines = fp.readlines() + + fw = open(base_config_file, 'w') + for line in lines: + for key in modify_dict: + if line.startswith((f"{key}=", f"{key} =")): + value = modify_dict[key] + if isinstance(value, str): + line = f"{key} = '{value}' \n" + else: + line = f"{key} = {value} \n" + break + fw.write(line) + fw.close() + + # train_config_file will use the config in base_config_file + train_config_file = '' + for f in files: + if osp.basename(f).startswith(model): + train_config_file = f + + monitor.write_monitor_logger(percent=get_ymir_process(YmirStage.PREPROCESS, p=0.2)) + + work_dir = cfg.ymir.output.models_dir + if num_gpus == 1: + cmd = f"python tools/train.py {train_config_file} " + \ + f"--work-dir {work_dir} --gpu-id {gpu_id}" + else: + os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id + cmd = f"./tools/dist_train.sh {train_config_file} {num_gpus} " + \ + f"--work-dir {work_dir}" + + logging.info(f"training command: {cmd}") + subprocess.run(cmd.split(), check=True) + + # eval_hooks will generate training_result_file if current map is best. + # create a fake map = 0 if no training_result_file generate in eval_hooks + if not osp.exists(cfg.ymir.output.training_result_file): + update_training_result_file(0) + + return 0 + +if __name__ == '__main__': + cfg = get_merged_config() + os.environ.setdefault('YMIR_MODELS_DIR','') + os.environ.setdefault('COCO_EVAL_TMP_FILE', '') + os.environ.setdefault('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', 'python') + sys.exit(main(cfg)) From 7d8d091a0c1ddd018b948f19a63612ce902d675f Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 10 Jun 2022 15:28:58 +0800 Subject: [PATCH 004/204] clone code to /workspace/app instead of /app --- live-code-executor/ymir_start.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/live-code-executor/ymir_start.py b/live-code-executor/ymir_start.py index bd1ae27..96680dc 100644 --- a/live-code-executor/ymir_start.py +++ b/live-code-executor/ymir_start.py @@ -15,7 +15,7 @@ def show_ymir_info(executor_config: dict) -> None: def main(): - # step 1. read config.yaml and clone git_url:git_branch to /app + # step 1. read config.yaml and clone git_url:git_branch to /workspace/app executor_config = env.get_executor_config() show_ymir_info(executor_config) @@ -23,14 +23,14 @@ def main(): git_branch = executor_config.get('git_branch', '') if not git_branch: - cmd = f'git clone {git_url} /app' + cmd = f'git clone {git_url} /workspace/app' else: - cmd = f'git clone {git_url} -b {git_branch} /app' + cmd = f'git clone {git_url} -b {git_branch} /workspace/app' logger.info(f'clone code: {cmd}') subprocess.check_output(cmd.split()) - # step 2. read /app/extra-requirements.txt and install it. - pypi_file = '/app/extra-requirements.txt' + # step 2. read /workspace/app/extra-requirements.txt and install it. + pypi_file = '/workspace/app/extra-requirements.txt' if osp.exists(pypi_file): pypi_mirror = executor_config.get('pypi_mirror', '') @@ -42,10 +42,10 @@ def main(): else: logger.info('no python package needs to install') - # step 3. run /app/start.py + # step 3. run /workspace/app/start.py cmd = 'python3 start.py' logger.info(f'run task: {cmd}') - subprocess.check_output(cmd.split(), cwd='/app') + subprocess.check_output(cmd.split(), cwd='/workspace/app') logger.info('live code executor run successfully') return 0 From a79deba854ff25c2945eab0962a37dea5eaaf27e Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 10 Jun 2022 17:44:12 +0800 Subject: [PATCH 005/204] update for non-root user, git clone to /workspace/app instead of /app --- live-code-executor/mxnet.dockerfile | 8 ++++++++ live-code-executor/torch.dockerfile | 8 ++++++++ 2 files changed, 16 insertions(+) diff --git a/live-code-executor/mxnet.dockerfile b/live-code-executor/mxnet.dockerfile index 7056b80..f66d60a 100644 --- a/live-code-executor/mxnet.dockerfile +++ b/live-code-executor/mxnet.dockerfile @@ -4,6 +4,9 @@ ARG BUILD="runtime" # runtime/devel ARG SYSTEM="ubuntu20.04" FROM nvidia/cuda:${CUDA}-cudnn${CUDNN}-${BUILD}-${SYSTEM} +ARG USER_GID=1000 +ARG USER_UID=1000 +ARG USER=ymir ARG MXNET="1.9.1" ENV LANG=C.UTF-8 @@ -29,4 +32,9 @@ COPY ymir_start.py /workspace/ymir_start.py # set up python path ENV PYTHONPATH=. +# Create non-root user and chown /workspace +RUN groupadd --gid $USER_GID $USER \ + && useradd --uid $USER_UID --gid $USER_GID -m $USER --create-home \ + && chown ${USER_GID}:${USER_GID} /workspace + CMD bash /usr/bin/start.sh diff --git a/live-code-executor/torch.dockerfile b/live-code-executor/torch.dockerfile index e2c606d..cd848ab 100644 --- a/live-code-executor/torch.dockerfile +++ b/live-code-executor/torch.dockerfile @@ -4,6 +4,9 @@ ARG CUDNN="8" # cuda11.1 + pytorch 1.9.0 not work!!! FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-runtime +ARG USER_GID=1000 +ARG USER_UID=1000 +ARG USER=ymir ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" @@ -34,4 +37,9 @@ COPY ymir_start.py /workspace/ymir_start.py # set up python path ENV PYTHONPATH=. +# Create non-root user and chown /workspace +RUN groupadd --gid $USER_GID $USER \ + && useradd --uid $USER_UID --gid $USER_GID -m $USER --create-home \ + && chown ${USER_GID}:${USER_GID} /workspace + CMD bash /usr/bin/start.sh From 26b18106e5afa31bdbae56d13c2b20a3c1fdd301 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 10 Jun 2022 18:41:36 +0800 Subject: [PATCH 006/204] output the subprocess to main process directly --- live-code-executor/ymir_start.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/live-code-executor/ymir_start.py b/live-code-executor/ymir_start.py index 96680dc..e807bd4 100644 --- a/live-code-executor/ymir_start.py +++ b/live-code-executor/ymir_start.py @@ -27,7 +27,7 @@ def main(): else: cmd = f'git clone {git_url} -b {git_branch} /workspace/app' logger.info(f'clone code: {cmd}') - subprocess.check_output(cmd.split()) + subprocess.run(cmd.split(), check=True) # step 2. read /workspace/app/extra-requirements.txt and install it. pypi_file = '/workspace/app/extra-requirements.txt' @@ -38,14 +38,14 @@ def main(): cmd += ' -i {pypi_mirror}' if pypi_mirror else '' logger.info(f'install python package: {cmd}') - subprocess.check_output(cmd.split()) + subprocess.run(cmd.split(), check=True) else: logger.info('no python package needs to install') # step 3. run /workspace/app/start.py cmd = 'python3 start.py' logger.info(f'run task: {cmd}') - subprocess.check_output(cmd.split(), cwd='/workspace/app') + subprocess.run(cmd.split(), check=True, cwd='/workspace/app') logger.info('live code executor run successfully') return 0 From 9479327ecf67dc0d8f0ef717d845f7d2bb32cbdb Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 13 Jun 2022 14:37:07 +0800 Subject: [PATCH 007/204] sigmoid --> hardswish --- det-yolov5-tmi/models/common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/det-yolov5-tmi/models/common.py b/det-yolov5-tmi/models/common.py index 0dae024..5dda9ce 100644 --- a/det-yolov5-tmi/models/common.py +++ b/det-yolov5-tmi/models/common.py @@ -26,7 +26,7 @@ make_divisible, non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import copy_attr, time_sync - +from utils.activations import SiLU, Hardswish def autopad(k, p=None): # kernel, padding # Pad to 'same' @@ -41,7 +41,7 @@ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, k super().__init__() self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) self.bn = nn.BatchNorm2d(c2) - self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) + self.act = Hardswish() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) def forward(self, x): return self.act(self.bn(self.conv(x))) From db6abd3118bd942f2c3cb898a42a67210bb91ef6 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 15 Jun 2022 11:52:25 +0800 Subject: [PATCH 008/204] fix mining and infer bug --- det-yolov5-tmi/mining/mining_cald.py | 5 +++-- det-yolov5-tmi/models/common.py | 2 +- det-yolov5-tmi/start.py | 17 +++++++++-------- det-yolov5-tmi/utils/ymir_yolov5.py | 15 ++++++--------- 4 files changed, 19 insertions(+), 20 deletions(-) diff --git a/det-yolov5-tmi/mining/mining_cald.py b/det-yolov5-tmi/mining/mining_cald.py index 77bfcf6..d93fb43 100644 --- a/det-yolov5-tmi/mining/mining_cald.py +++ b/det-yolov5-tmi/mining/mining_cald.py @@ -15,7 +15,7 @@ from ymir_exc import result_writer as rw from mining.data_augment import cutout, horizontal_flip, intersect, resize, rotate -from utils.ymir_yolov5 import BBOX, CV_IMAGE, YmirYolov5, YmirStage, get_ymir_process +from utils.ymir_yolov5 import BBOX, CV_IMAGE, YmirYolov5, YmirStage, get_ymir_process, get_merged_config def split_result(result: NDArray) -> Tuple[BBOX, NDArray, NDArray]: @@ -133,7 +133,8 @@ def get_ious(boxes1: BBOX, boxes2: BBOX) -> NDArray: def main(): - miner = MiningCald() + cfg = get_merged_config() + miner = MiningCald(cfg) mining_result = miner.mining() rw.write_mining_result(mining_result=mining_result) diff --git a/det-yolov5-tmi/models/common.py b/det-yolov5-tmi/models/common.py index 5dda9ce..a2ec35a 100644 --- a/det-yolov5-tmi/models/common.py +++ b/det-yolov5-tmi/models/common.py @@ -41,7 +41,7 @@ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, k super().__init__() self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) self.bn = nn.BatchNorm2d(c2) - self.act = Hardswish() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) + self.act = nn.Hardswish() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) def forward(self, x): return self.act(self.bn(self.conv(x))) diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py index d22b3b8..ba06400 100644 --- a/det-yolov5-tmi/start.py +++ b/det-yolov5-tmi/start.py @@ -40,9 +40,9 @@ def _run_training(cfg: edict) -> None: 3. save model weight/hyperparameter/... to design directory """ # 1. convert dataset - logging.info('convert ymir dataset to yolov5 dataset') out_dir = cfg.ymir.output.root_dir - convert_ymir_to_yolov5(cfg, out_dir) + convert_ymir_to_yolov5(cfg) + logging.info(f'generate {out_dir}/data.yaml') monitor.write_monitor_logger(percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0)) # 2. training model @@ -80,9 +80,10 @@ def _run_training(cfg: edict) -> None: def _run_mining(cfg: edict()) -> None: - logging.info('convert ymir dataset to yolov5 dataset') - out_dir = osp.join(cfg.ymir.output.root_dir, 'yolov5_dataset') - convert_ymir_to_yolov5(cfg, out_dir) + # generate data.yaml for mining + out_dir = cfg.ymir.output.root_dir + convert_ymir_to_yolov5(cfg) + logging.info(f'generate {out_dir}/data.yaml') monitor.write_monitor_logger(percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0)) command = 'python3 mining/mining_cald.py' @@ -93,9 +94,9 @@ def _run_mining(cfg: edict()) -> None: def _run_infer(cfg: edict) -> None: # generate data.yaml for infer - logging.info('convert ymir dataset to yolov5 dataset') - out_dir = osp.join(cfg.ymir.output.root_dir, 'yolov5_dataset') - convert_ymir_to_yolov5(cfg, out_dir) + out_dir = cfg.ymir.output.root_dir + convert_ymir_to_yolov5(cfg) + logging.info(f'generate {out_dir}/data.yaml') monitor.write_monitor_logger(percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0)) N = dr.items_count(env.DatasetType.CANDIDATE) diff --git a/det-yolov5-tmi/utils/ymir_yolov5.py b/det-yolov5-tmi/utils/ymir_yolov5.py index 64ce9be..9010340 100644 --- a/det-yolov5-tmi/utils/ymir_yolov5.py +++ b/det-yolov5-tmi/utils/ymir_yolov5.py @@ -61,13 +61,10 @@ def get_merged_config() -> edict: merged_cfg.ymir = env.get_current_env() return merged_cfg -def get_weight_file(cfg: edict, try_download: bool = True) -> str: +def get_weight_file(cfg: edict) -> str: """ return the weight file path by priority - - 1. find weight file in cfg.param.model_params_path or cfg.param.model_params_path - 2. if try_download and no weight file offered - for training task, yolov5 will download it from github. + find weight file in cfg.param.model_params_path or cfg.param.model_params_path """ if cfg.ymir.run_training: model_params_path = cfg.param.pretrained_model_paths @@ -120,10 +117,11 @@ def __init__(self, cfg: edict): def init_detector(self, device: torch.device) -> DetectMultiBackend: weights = get_weight_file(self.cfg) + data_yaml = osp.join(self.cfg.ymir.output.root_dir, 'data.yaml') model = DetectMultiBackend(weights=weights, device=device, dnn=False, # not use opencv dnn for onnx inference - data='data.yaml') # dataset.yaml path + data=data_yaml) # dataset.yaml path return model @@ -183,11 +181,10 @@ def infer(self, img: CV_IMAGE) -> List[rw.Annotation]: return anns -def convert_ymir_to_yolov5(cfg: edict, output_root_dir: str) -> None: +def convert_ymir_to_yolov5(cfg: edict) -> None: """ convert ymir format dataset to yolov5 format generate data.yaml for training/mining/infer - output_root_dir: the output root dir """ data = dict(path=cfg.ymir.input.root_dir, train=cfg.ymir.input.training_index_file, @@ -196,7 +193,7 @@ def convert_ymir_to_yolov5(cfg: edict, output_root_dir: str) -> None: nc=len(cfg.param.class_names), names=cfg.param.class_names) - with open(osp.join(output_root_dir, 'data.yaml'), 'w') as fw: + with open(osp.join(cfg.ymir.output.root_dir, 'data.yaml'), 'w') as fw: fw.write(yaml.safe_dump(data)) From 79c72a19bdbf7b05f85161f56346484edae6747d Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 15 Jun 2022 12:23:35 +0800 Subject: [PATCH 009/204] revert to /app --- live-code-executor/ymir_start.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/live-code-executor/ymir_start.py b/live-code-executor/ymir_start.py index e807bd4..71adf5c 100644 --- a/live-code-executor/ymir_start.py +++ b/live-code-executor/ymir_start.py @@ -15,7 +15,7 @@ def show_ymir_info(executor_config: dict) -> None: def main(): - # step 1. read config.yaml and clone git_url:git_branch to /workspace/app + # step 1. read config.yaml and clone git_url:git_branch to /app executor_config = env.get_executor_config() show_ymir_info(executor_config) @@ -23,14 +23,14 @@ def main(): git_branch = executor_config.get('git_branch', '') if not git_branch: - cmd = f'git clone {git_url} /workspace/app' + cmd = f'git clone {git_url} /app' else: - cmd = f'git clone {git_url} -b {git_branch} /workspace/app' + cmd = f'git clone {git_url} -b {git_branch} /app' logger.info(f'clone code: {cmd}') subprocess.run(cmd.split(), check=True) - # step 2. read /workspace/app/extra-requirements.txt and install it. - pypi_file = '/workspace/app/extra-requirements.txt' + # step 2. read /app/extra-requirements.txt and install it. + pypi_file = '/app/extra-requirements.txt' if osp.exists(pypi_file): pypi_mirror = executor_config.get('pypi_mirror', '') @@ -42,10 +42,10 @@ def main(): else: logger.info('no python package needs to install') - # step 3. run /workspace/app/start.py + # step 3. run /app/start.py cmd = 'python3 start.py' logger.info(f'run task: {cmd}') - subprocess.run(cmd.split(), check=True, cwd='/workspace/app') + subprocess.run(cmd.split(), check=True, cwd='/app') logger.info('live code executor run successfully') return 0 From 3f40dac76621cb588ccbb917de5ef2b1772ad64c Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 15 Jun 2022 12:24:24 +0800 Subject: [PATCH 010/204] Revert "update for non-root user, git clone to /workspace/app instead of /app" This reverts commit a79deba854ff25c2945eab0962a37dea5eaaf27e. --- live-code-executor/mxnet.dockerfile | 8 -------- live-code-executor/torch.dockerfile | 8 -------- 2 files changed, 16 deletions(-) diff --git a/live-code-executor/mxnet.dockerfile b/live-code-executor/mxnet.dockerfile index 449bfc4..e55f478 100644 --- a/live-code-executor/mxnet.dockerfile +++ b/live-code-executor/mxnet.dockerfile @@ -4,9 +4,6 @@ ARG BUILD="runtime" # runtime/devel ARG SYSTEM="ubuntu20.04" FROM nvidia/cuda:${CUDA}-cudnn${CUDNN}-${BUILD}-${SYSTEM} -ARG USER_GID=1000 -ARG USER_UID=1000 -ARG USER=ymir ARG MXNET="1.9.1" ENV LANG=C.UTF-8 @@ -41,9 +38,4 @@ COPY ymir_start.py /workspace/ymir_start.py # set up python path ENV PYTHONPATH=. -# Create non-root user and chown /workspace -RUN groupadd --gid $USER_GID $USER \ - && useradd --uid $USER_UID --gid $USER_GID -m $USER --create-home \ - && chown ${USER_GID}:${USER_GID} /workspace - CMD bash /usr/bin/start.sh diff --git a/live-code-executor/torch.dockerfile b/live-code-executor/torch.dockerfile index c2e9486..66de371 100644 --- a/live-code-executor/torch.dockerfile +++ b/live-code-executor/torch.dockerfile @@ -4,9 +4,6 @@ ARG CUDNN="8" # cuda11.1 + pytorch 1.9.0 not work!!! FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-runtime -ARG USER_GID=1000 -ARG USER_UID=1000 -ARG USER=ymir ARG SERVER_MODE=prod @@ -43,9 +40,4 @@ COPY ymir_start.py /workspace/ymir_start.py # set up python path ENV PYTHONPATH=. -# Create non-root user and chown /workspace -RUN groupadd --gid $USER_GID $USER \ - && useradd --uid $USER_UID --gid $USER_GID -m $USER --create-home \ - && chown ${USER_GID}:${USER_GID} /workspace - CMD bash /usr/bin/start.sh From 059ac5d87ec38a76cdce8af075bb3087772dd87f Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 15 Jun 2022 14:19:40 +0800 Subject: [PATCH 011/204] update yolov5 dockerfile --- det-yolov5-tmi/cuda102.dockerfile | 21 +++++++++++++++------ det-yolov5-tmi/cuda111-devel.dockerfile | 21 +++++++++++++++------ det-yolov5-tmi/cuda111.dockerfile | 21 +++++++++++++++------ det-yolov5-tmi/models/common.py | 2 +- det-yolov5-tmi/requirements.txt | 6 +++--- det-yolov5-tmi/utils/ymir_yolov5.py | 13 +++++++++---- 6 files changed, 58 insertions(+), 26 deletions(-) diff --git a/det-yolov5-tmi/cuda102.dockerfile b/det-yolov5-tmi/cuda102.dockerfile index eeaf599..22f7b98 100644 --- a/det-yolov5-tmi/cuda102.dockerfile +++ b/det-yolov5-tmi/cuda102.dockerfile @@ -3,6 +3,7 @@ ARG CUDA="10.2" ARG CUDNN="7" FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-runtime +ARG SERVER_MODE=prod ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" @@ -15,18 +16,26 @@ RUN apt-get update && apt-get install -y gnupg2 git libglib2.0-0 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -# Copy file from host to docker +# install ymir-exc sdk +RUN if [ "${SERVER_MODE}" = "dev" ]; then \ + pip install --force-reinstall -U "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ + else \ + pip install ymir-exc; \ + fi + +# Copy file from host to docker and install requirements ADD ./det-yolov5-tmi /app -RUN mkdir /img-man && mv /app/*-template.yaml /img-man/ -RUN pip install ymir-exc && pip install -r /app/requirements.txt +RUN mkdir /img-man && mv /app/*-template.yaml /img-man/ \ + && pip install -r /app/requirements.txt # Download pretrained weight and font file -RUN cd /app && bash data/scripts/download_weights.sh -RUN mkdir -p /root/.config/Ultralytics && \ - wget https://ultralytics.com/assets/Arial.ttf -O /root/.config/Ultralytics/Arial.ttf +RUN cd /app && bash data/scripts/download_weights.sh \ + && mkdir -p /root/.config/Ultralytics \ + && wget https://ultralytics.com/assets/Arial.ttf -O /root/.config/Ultralytics/Arial.ttf # Make PYTHONPATH find local package ENV PYTHONPATH=. WORKDIR /app +RUN echo "python3 /app/start.py" > /usr/bin/start.sh CMD python3 /app/start.py \ No newline at end of file diff --git a/det-yolov5-tmi/cuda111-devel.dockerfile b/det-yolov5-tmi/cuda111-devel.dockerfile index 77389b9..cd2eb03 100644 --- a/det-yolov5-tmi/cuda111-devel.dockerfile +++ b/det-yolov5-tmi/cuda111-devel.dockerfile @@ -4,6 +4,7 @@ ARG CUDNN="8" # cuda11.1 + pytorch 1.9.0 + cudnn8 not work!!! FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel +ARG SERVER_MODE=prod ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" @@ -17,18 +18,26 @@ RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC && && apt-get clean \ && rm -rf /var/lib/apt/lists/* -# Copy file from host to docker +# install ymir-exc sdk +RUN if [ "${SERVER_MODE}" = "dev" ]; then \ + pip install --force-reinstall -U "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ + else \ + pip install ymir-exc; \ + fi + +# Copy file from host to docker and install requirements ADD ./det-yolov5-tmi /app -RUN mkdir /img-man && mv /app/*-template.yaml /img-man/ -RUN pip install ymir-exc && pip install -r /app/requirements.txt +RUN mkdir /img-man && mv /app/*-template.yaml /img-man/ \ + && pip install -r /app/requirements.txt # Download pretrained weight and font file -RUN cd /app && bash data/scripts/download_weights.sh -RUN mkdir -p /root/.config/Ultralytics && \ - wget https://ultralytics.com/assets/Arial.ttf -O /root/.config/Ultralytics/Arial.ttf +RUN cd /app && bash data/scripts/download_weights.sh \ + && mkdir -p /root/.config/Ultralytics \ + && wget https://ultralytics.com/assets/Arial.ttf -O /root/.config/Ultralytics/Arial.ttf # Make PYTHONPATH find local package ENV PYTHONPATH=. WORKDIR /app +RUN echo "python3 /app/start.py" > /usr/bin/start.sh CMD python3 /app/start.py \ No newline at end of file diff --git a/det-yolov5-tmi/cuda111.dockerfile b/det-yolov5-tmi/cuda111.dockerfile index 9c8c061..db9b53b 100644 --- a/det-yolov5-tmi/cuda111.dockerfile +++ b/det-yolov5-tmi/cuda111.dockerfile @@ -4,6 +4,7 @@ ARG CUDNN="8" # cuda11.1 + pytorch 1.9.0 + cudnn8 not work!!! FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-runtime +ARG SERVER_MODE=prod ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" @@ -16,18 +17,26 @@ RUN apt-get update && apt-get install -y gnupg2 git libglib2.0-0 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -# Copy file from host to docker +# install ymir-exc sdk +RUN if [ "${SERVER_MODE}" = "dev" ]; then \ + pip install --force-reinstall -U "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ + else \ + pip install ymir-exc; \ + fi + +# Copy file from host to docker and install requirements ADD ./det-yolov5-tmi /app -RUN mkdir /img-man && mv /app/*-template.yaml /img-man/ -RUN pip install ymir-exc && pip install -r /app/requirements.txt +RUN mkdir /img-man && mv /app/*-template.yaml /img-man/ \ + && pip install -r /app/requirements.txt # Download pretrained weight and font file -RUN cd /app && bash data/scripts/download_weights.sh -RUN mkdir -p /root/.config/Ultralytics && \ - wget https://ultralytics.com/assets/Arial.ttf -O /root/.config/Ultralytics/Arial.ttf +RUN cd /app && bash data/scripts/download_weights.sh \ + && mkdir -p /root/.config/Ultralytics \ + && wget https://ultralytics.com/assets/Arial.ttf -O /root/.config/Ultralytics/Arial.ttf # Make PYTHONPATH find local package ENV PYTHONPATH=. WORKDIR /app +RUN echo "python3 /app/start.py" > /usr/bin/start.sh CMD python3 /app/start.py \ No newline at end of file diff --git a/det-yolov5-tmi/models/common.py b/det-yolov5-tmi/models/common.py index a2ec35a..d116aa5 100644 --- a/det-yolov5-tmi/models/common.py +++ b/det-yolov5-tmi/models/common.py @@ -26,7 +26,7 @@ make_divisible, non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import copy_attr, time_sync -from utils.activations import SiLU, Hardswish + def autopad(k, p=None): # kernel, padding # Pad to 'same' diff --git a/det-yolov5-tmi/requirements.txt b/det-yolov5-tmi/requirements.txt index 3e65c34..fa1d389 100755 --- a/det-yolov5-tmi/requirements.txt +++ b/det-yolov5-tmi/requirements.txt @@ -37,6 +37,6 @@ onnx-simplifier>=0.3.6 # ONNX simplifier thop # FLOPs computation # Ymir --------------------------------------- -imagesize # fast obtain image size without load image -nptyping # numpy type hint -easydict \ No newline at end of file +imagesize>=1.3.0 # fast obtain image size without load image +nptyping>=2.1.1 # numpy type hint +easydict>=1.9 \ No newline at end of file diff --git a/det-yolov5-tmi/utils/ymir_yolov5.py b/det-yolov5-tmi/utils/ymir_yolov5.py index 9010340..68b5854 100644 --- a/det-yolov5-tmi/utils/ymir_yolov5.py +++ b/det-yolov5-tmi/utils/ymir_yolov5.py @@ -2,6 +2,7 @@ utils function for ymir and yolov5 """ import os.path as osp +import shutil from enum import IntEnum from typing import Any, List, Tuple @@ -186,12 +187,16 @@ def convert_ymir_to_yolov5(cfg: edict) -> None: convert ymir format dataset to yolov5 format generate data.yaml for training/mining/infer """ - data = dict(path=cfg.ymir.input.root_dir, - train=cfg.ymir.input.training_index_file, - val=cfg.ymir.input.val_index_file, - test=cfg.ymir.input.candidate_index_file, + + data = dict(path=cfg.ymir.output.root_dir, nc=len(cfg.param.class_names), names=cfg.param.class_names) + for split, prefix in zip(['train', 'val', 'test'], ['training', 'val', 'candidate']): + src_file = getattr(cfg.ymir.input, f'{prefix}_index_file') + if osp.exists(src_file): + shutil.copy(src_file, f'{cfg.ymir.output.root_dir}/{split}.tsv') + + data[split] = f'{split}.tsv' with open(osp.join(cfg.ymir.output.root_dir, 'data.yaml'), 'w') as fw: fw.write(yaml.safe_dump(data)) From 62056ea6346ae902760f9733633e518cea260f43 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 15 Jun 2022 18:22:12 +0800 Subject: [PATCH 012/204] add dockerfile for cuda11 --- det-mmdetection-tmi/docker/Dockerfile.cuda11 | 43 ++++++++++++++++++++ det-mmdetection-tmi/start.py | 17 +------- det-mmdetection-tmi/ymir_train.py | 2 +- 3 files changed, 46 insertions(+), 16 deletions(-) create mode 100644 det-mmdetection-tmi/docker/Dockerfile.cuda11 diff --git a/det-mmdetection-tmi/docker/Dockerfile.cuda11 b/det-mmdetection-tmi/docker/Dockerfile.cuda11 new file mode 100644 index 0000000..b00c88e --- /dev/null +++ b/det-mmdetection-tmi/docker/Dockerfile.cuda11 @@ -0,0 +1,43 @@ +ARG PYTORCH="1.8.0" +ARG CUDA="11.1" +ARG CUDNN="8" + +FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel + +ARG MMCV="1.4.3" +ARG SERVER_MODE=prod + +ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" +ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" +ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" + +# Set timezone +RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \ + && echo 'Asia/Shanghai' >/etc/timezone + +# Install apt package +RUN apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# Install ymir-exc sdk and MMCV +RUN pip install --no-cache-dir --upgrade pip wheel setuptools \ + && if [ "${SERVER_MODE}" = "dev" ]; then \ + pip install --force-reinstall -U "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ + else \ + pip install ymir-exc; \ + fi \ + && pip install --no-cache-dir mmcv-full==${MMCV} -f https://download.openmmlab.com/mmcv/dist/cu111/torch1.8.0/index.html \ + && conda clean --all + +# Install det-mmdetection-tmi +ADD det-mmdetection-tmi /app +WORKDIR /app +ENV FORCE_CUDA="1" +RUN pip install --no-cache-dir -r requirements/build.txt \ + && pip install --no-cache-dir -e . \ + && mkdir /img-man \ + && mv *-template.yaml /img-man + +RUN echo "python3 start.py" > /usr/bin/start.sh +CMD bash /usr/bin/start.sh diff --git a/det-mmdetection-tmi/start.py b/det-mmdetection-tmi/start.py index 16e8e89..553cfb5 100644 --- a/det-mmdetection-tmi/start.py +++ b/det-mmdetection-tmi/start.py @@ -1,7 +1,5 @@ import logging import os -import os.path as osp -import shutil import subprocess import sys @@ -11,8 +9,8 @@ from ymir_exc import env, monitor from ymir_exc import result_writer as rw -from mmdet.utils.util_ymir import (YmirStage, YmirYolov5, convert_ymir_to_yolov5, download_weight_file, get_merged_config, - get_weight_file, get_ymir_process) +from mmdet.utils.util_ymir import (YmirStage, get_merged_config, + get_ymir_process) def start() -> int: @@ -47,11 +45,6 @@ def _run_training(cfg: edict) -> None: def _run_mining(cfg: edict()) -> None: - logging.info('convert ymir dataset to yolov5 dataset') - out_dir = osp.join(cfg.ymir.output.root_dir, 'yolov5_dataset') - convert_ymir_to_yolov5(cfg, out_dir) - monitor.write_monitor_logger(percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0)) - command = 'python3 mining/mining_cald.py' logging.info(f'mining: {command}') subprocess.run(command.split(), check=True) @@ -59,12 +52,6 @@ def _run_mining(cfg: edict()) -> None: def _run_infer(cfg: edict) -> None: - # generate data.yaml for infer - logging.info('convert ymir dataset to yolov5 dataset') - out_dir = osp.join(cfg.ymir.output.root_dir, 'yolov5_dataset') - convert_ymir_to_yolov5(cfg, out_dir) - monitor.write_monitor_logger(percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0)) - N = dr.items_count(env.DatasetType.CANDIDATE) infer_result = dict() model = YmirYolov5(cfg) diff --git a/det-mmdetection-tmi/ymir_train.py b/det-mmdetection-tmi/ymir_train.py index 205bbc7..cd64cbd 100644 --- a/det-mmdetection-tmi/ymir_train.py +++ b/det-mmdetection-tmi/ymir_train.py @@ -115,7 +115,7 @@ def main(cfg: edict) -> int: if __name__ == '__main__': cfg = get_merged_config() - os.environ.setdefault('YMIR_MODELS_DIR','') + os.environ.setdefault('YMIR_MODELS_DIR',cfg.ymir.output.models_dir) os.environ.setdefault('COCO_EVAL_TMP_FILE', '') os.environ.setdefault('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', 'python') sys.exit(main(cfg)) From fec9ca8e2e6364930f246c884ff58e9ac346b8c9 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 15 Jun 2022 18:25:10 +0800 Subject: [PATCH 013/204] remove loguru --- det-yolov5-tmi/utils/datasets.py | 1 - 1 file changed, 1 deletion(-) diff --git a/det-yolov5-tmi/utils/datasets.py b/det-yolov5-tmi/utils/datasets.py index cb36851..d4bf7b9 100755 --- a/det-yolov5-tmi/utils/datasets.py +++ b/det-yolov5-tmi/utils/datasets.py @@ -31,7 +31,6 @@ from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn, ymir_xyxy2xywh) from utils.torch_utils import torch_distributed_zero_first -from loguru import logger # Parameters HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' IMG_FORMATS = ['bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp'] # include image suffixes From e56ea6e6f1f6963f7d4eda17fadaf206442a3d19 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 16 Jun 2022 15:52:58 +0800 Subject: [PATCH 014/204] update mxnet docker file --- live-code-executor/mxnet.dockerfile | 35 +++++++++++++++++++---------- 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/live-code-executor/mxnet.dockerfile b/live-code-executor/mxnet.dockerfile index e55f478..59731e6 100644 --- a/live-code-executor/mxnet.dockerfile +++ b/live-code-executor/mxnet.dockerfile @@ -1,20 +1,31 @@ -ARG CUDA="11.2.0" +ARG CUDA="11.2.1" ARG CUDNN="8" ARG BUILD="runtime" # runtime/devel -ARG SYSTEM="ubuntu20.04" +ARG SYSTEM="ubuntu18.04" FROM nvidia/cuda:${CUDA}-cudnn${CUDNN}-${BUILD}-${SYSTEM} ARG MXNET="1.9.1" -ENV LANG=C.UTF-8 - -ARG SERVER_MODE=prod +ARG DEBIAN_FRONTEND="noninteractive" +ARG MINICONDA_URL="https://mirrors.tuna.tsinghua.edu.cn/anaconda/miniconda/Miniconda3-py39_4.11.0-Linux-x86_64.sh" +ENV LANG=C.UTF-8 +ENV PATH /opt/conda/bin:$PATH # install linux package, needs to fix GPG error first. RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC && \ apt-get update && \ - apt-get install -y git wget curl python3-dev gcc zip libglib2.0-0 libgl1-mesa-glx && \ - wget https://bootstrap.pypa.io/get-pip.py && \ - python3 get-pip.py + apt-get install -y git wget curl zip libglib2.0-0 libgl1-mesa-glx && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* && \ + wget "${MINICONDA_URL}" -O miniconda.sh -q && \ + mkdir -p /opt && \ + sh miniconda.sh -b -p /opt/conda && \ + rm miniconda.sh && \ + ln -s /opt/conda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \ + echo ". /opt/conda/etc/profile.d/conda.sh" >> ~/.bashrc && \ + echo "conda activate base" >> ~/.bashrc && \ + find /opt/conda/ -follow -type f -name '*.a' -delete && \ + find /opt/conda/ -follow -type f -name '*.js.map' -delete && \ + /opt/conda/bin/conda clean -afy # Install python package # view https://mxnet.apache.org/versions/1.9.1/get_started for detail @@ -22,10 +33,10 @@ RUN pip3 install mxnet-cu112==${MXNET} loguru # install ymir-exc sdk RUN if [ "${SERVER_MODE}" = "dev" ]; then \ - pip install --force-reinstall -U "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ - else \ - pip install ymir-exc; \ - fi + pip install --force-reinstall -U "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ + else \ + pip install ymir-exc; \ + fi # copy template training/mining/infer config file RUN mkdir -p /img-man From f9ed5952132b26b2d65f82ff6b57648e7c6935ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E4=BD=B3=E6=AC=A3?= Date: Thu, 16 Jun 2022 22:54:35 +0800 Subject: [PATCH 015/204] change miniconda link --- live-code-executor/mxnet.dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/live-code-executor/mxnet.dockerfile b/live-code-executor/mxnet.dockerfile index 59731e6..a82758e 100644 --- a/live-code-executor/mxnet.dockerfile +++ b/live-code-executor/mxnet.dockerfile @@ -6,7 +6,7 @@ ARG SYSTEM="ubuntu18.04" FROM nvidia/cuda:${CUDA}-cudnn${CUDNN}-${BUILD}-${SYSTEM} ARG MXNET="1.9.1" ARG DEBIAN_FRONTEND="noninteractive" -ARG MINICONDA_URL="https://mirrors.tuna.tsinghua.edu.cn/anaconda/miniconda/Miniconda3-py39_4.11.0-Linux-x86_64.sh" +ARG MINICONDA_URL="https://repo.anaconda.com/miniconda/Miniconda3-py39_4.11.0-Linux-x86_64.sh" ENV LANG=C.UTF-8 ENV PATH /opt/conda/bin:$PATH From 9972b9c7380fd2a2387f89f7d4125a625760249a Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 17 Jun 2022 09:16:18 +0800 Subject: [PATCH 016/204] remove cuda111-dev, add empty line --- det-yolov5-tmi/cuda102.dockerfile | 2 +- det-yolov5-tmi/cuda111-devel.dockerfile | 43 ------------------------- det-yolov5-tmi/cuda111.dockerfile | 2 +- 3 files changed, 2 insertions(+), 45 deletions(-) delete mode 100644 det-yolov5-tmi/cuda111-devel.dockerfile diff --git a/det-yolov5-tmi/cuda102.dockerfile b/det-yolov5-tmi/cuda102.dockerfile index 22f7b98..49a29d3 100644 --- a/det-yolov5-tmi/cuda102.dockerfile +++ b/det-yolov5-tmi/cuda102.dockerfile @@ -38,4 +38,4 @@ ENV PYTHONPATH=. WORKDIR /app RUN echo "python3 /app/start.py" > /usr/bin/start.sh -CMD python3 /app/start.py \ No newline at end of file +CMD bash /usr/bin/start.sh diff --git a/det-yolov5-tmi/cuda111-devel.dockerfile b/det-yolov5-tmi/cuda111-devel.dockerfile deleted file mode 100644 index cd2eb03..0000000 --- a/det-yolov5-tmi/cuda111-devel.dockerfile +++ /dev/null @@ -1,43 +0,0 @@ -ARG PYTORCH="1.8.0" -ARG CUDA="11.1" -ARG CUDNN="8" - -# cuda11.1 + pytorch 1.9.0 + cudnn8 not work!!! -FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel -ARG SERVER_MODE=prod - -ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" -ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" -ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" -ENV LANG=C.UTF-8 - -# Install linux package -RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC && \ - apt-get update && apt-get install -y gnupg2 git libglib2.0-0 libgl1-mesa-glx \ - curl wget zip \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -# install ymir-exc sdk -RUN if [ "${SERVER_MODE}" = "dev" ]; then \ - pip install --force-reinstall -U "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ - else \ - pip install ymir-exc; \ - fi - -# Copy file from host to docker and install requirements -ADD ./det-yolov5-tmi /app -RUN mkdir /img-man && mv /app/*-template.yaml /img-man/ \ - && pip install -r /app/requirements.txt - -# Download pretrained weight and font file -RUN cd /app && bash data/scripts/download_weights.sh \ - && mkdir -p /root/.config/Ultralytics \ - && wget https://ultralytics.com/assets/Arial.ttf -O /root/.config/Ultralytics/Arial.ttf - -# Make PYTHONPATH find local package -ENV PYTHONPATH=. - -WORKDIR /app -RUN echo "python3 /app/start.py" > /usr/bin/start.sh -CMD python3 /app/start.py \ No newline at end of file diff --git a/det-yolov5-tmi/cuda111.dockerfile b/det-yolov5-tmi/cuda111.dockerfile index db9b53b..0c6e5dd 100644 --- a/det-yolov5-tmi/cuda111.dockerfile +++ b/det-yolov5-tmi/cuda111.dockerfile @@ -39,4 +39,4 @@ ENV PYTHONPATH=. WORKDIR /app RUN echo "python3 /app/start.py" > /usr/bin/start.sh -CMD python3 /app/start.py \ No newline at end of file +CMD bash /usr/bin/start.sh From ba8a738b3addf97a235f074f4e208270c8e456d7 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Sat, 18 Jun 2022 22:01:42 +0800 Subject: [PATCH 017/204] support ymir cfg-option and args-option --- .../{Dockerfile.cuda11 => Dockerfile.cuda111} | 15 ++-- det-mmdetection-tmi/mmdet/utils/util_ymir.py | 69 +++++++++++++- det-mmdetection-tmi/requirements/runtime.txt | 1 + det-mmdetection-tmi/start.py | 4 +- det-mmdetection-tmi/tools/train.py | 5 +- det-mmdetection-tmi/ymir_train.py | 90 ++++++------------- 6 files changed, 108 insertions(+), 76 deletions(-) rename det-mmdetection-tmi/docker/{Dockerfile.cuda11 => Dockerfile.cuda111} (76%) diff --git a/det-mmdetection-tmi/docker/Dockerfile.cuda11 b/det-mmdetection-tmi/docker/Dockerfile.cuda111 similarity index 76% rename from det-mmdetection-tmi/docker/Dockerfile.cuda11 rename to det-mmdetection-tmi/docker/Dockerfile.cuda111 index b00c88e..4b132f9 100644 --- a/det-mmdetection-tmi/docker/Dockerfile.cuda11 +++ b/det-mmdetection-tmi/docker/Dockerfile.cuda111 @@ -2,8 +2,9 @@ ARG PYTORCH="1.8.0" ARG CUDA="11.1" ARG CUDNN="8" -FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel +FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-runtime +# mmcv>=1.3.17, <=1.5.0 ARG MMCV="1.4.3" ARG SERVER_MODE=prod @@ -21,8 +22,8 @@ RUN apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build && rm -rf /var/lib/apt/lists/* # Install ymir-exc sdk and MMCV -RUN pip install --no-cache-dir --upgrade pip wheel setuptools \ - && if [ "${SERVER_MODE}" = "dev" ]; then \ +RUN pip install --no-cache-dir --upgrade pip wheel setuptools && \ + if [ "${SERVER_MODE}" = "dev" ]; then \ pip install --force-reinstall -U "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ else \ pip install ymir-exc; \ @@ -34,10 +35,10 @@ RUN pip install --no-cache-dir --upgrade pip wheel setuptools \ ADD det-mmdetection-tmi /app WORKDIR /app ENV FORCE_CUDA="1" -RUN pip install --no-cache-dir -r requirements/build.txt \ +RUN pip install --no-cache-dir -r requirements/runtime.txt \ && pip install --no-cache-dir -e . \ && mkdir /img-man \ - && mv *-template.yaml /img-man - -RUN echo "python3 start.py" > /usr/bin/start.sh + && mv *-template.yaml /img-man \ + && echo "python3 start.py" > /usr/bin/start.sh + CMD bash /usr/bin/start.sh diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index 3b5008b..6493f92 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -10,6 +10,8 @@ from urllib.parse import urlparse import mmcv +from mmcv import Config +from mmdet.apis import init_detector, inference_detector from easydict import EasyDict as edict from nptyping import NDArray, Shape, UInt8 from torch.hub import HASH_REGEX, _get_torch_home, download_url_to_file @@ -58,6 +60,57 @@ def get_merged_config() -> edict: merged_cfg.ymir = env.get_current_env() return merged_cfg +def modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: + """ + - modify dataset config + - modify model output channel + """ + ### modify dataset config + ymir_ann_files = dict( + train=ymir_cfg.ymir.input.training_index_file, + val=ymir_cfg.ymir.input.val_index_file, + test=ymir_cfg.ymir.input.candidate_index_file + ) + + samples_per_gpu = ymir_cfg.param.samples_per_gpu + workers_per_gpu = ymir_cfg.param.workers_per_gpu + mmdet_cfg.data.samples_per_gpu = samples_per_gpu + mmdet_cfg.data.workers_per_gpu = workers_per_gpu + + for split in ['train','val','test']: + ymir_dataset_cfg=dict(type='YmirDataset', + ann_file=ymir_ann_files[split], + img_prefix=ymir_cfg.ymir.input.assets_dir, + ann_prefix=ymir_cfg.ymir.input.annotations_dir, + classes=ymir_cfg.param.class_names, + data_root=ymir_cfg.ymir.input.root_dir, + filter_empty_gt=False + ) + ### modify dataset config + mmdet_dataset_cfg = mmdet_cfg.data[split] + if isinstance(mmdet_dataset_cfg, (list, tuple)): + for x in mmdet_dataset_cfg: + x.update(ymir_dataset_cfg) + else: + src_dataset_type = mmdet_dataset_cfg.type + if src_dataset_type in ['CocoDataset']: + mmdet_dataset_cfg.update(ymir_dataset_cfg) + elif src_dataset_type in ['MultiImageMixDataset','RepeatDataset']: + mmdet_dataset_cfg.dataset.update(ymir_dataset_cfg) + else: + raise Exception(f'unsupported source dataset type {src_dataset_type}') + + ### modify model output channel + mmdet_model_cfg = mmdet_cfg.model.bbox_head + mmdet_model_cfg.num_classes = len(ymir_cfg.param.class_names) + + ### epochs, checkpoint, tensorboard + mmdet_model_cfg.runner.max_epochs = ymir_cfg.param.max_epochs + mmdet_model_cfg.checkpoint_config['out_dir'] = ymir_cfg.ymir.output.models_dir + tensorboard_logger = dict(type='TensorboardLoggerHook', + log_dir = ymir_cfg.ymir.output.tensorboard_dir) + mmdet_model_cfg.log_config['hooks'].append(tensorboard_logger) + return mmdet_cfg def get_weight_file(cfg: edict) -> str: """ @@ -146,4 +199,18 @@ def update_training_result_file(key_score): model_weight_file = osp.basename(weight_files[-1]) rw.write_training_result(model_names=[model_weight_file, osp.basename(model_config_file)], mAP=key_score, - classAPs=results_per_category) \ No newline at end of file + classAPs=results_per_category) + +class YmirModel: + def __init__(self, cfg:edict): + self.cfg = cfg + + # Specify the path to model config and checkpoint file + config_file = 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' + checkpoint_file = 'checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' + + # build the model from a config file and a checkpoint file + self.model = init_detector(config_file, checkpoint_file, device='cuda:0') + + def infer(self, img): + return inference_detector(self.model, img) \ No newline at end of file diff --git a/det-mmdetection-tmi/requirements/runtime.txt b/det-mmdetection-tmi/requirements/runtime.txt index f7a2cc7..3c93f57 100644 --- a/det-mmdetection-tmi/requirements/runtime.txt +++ b/det-mmdetection-tmi/requirements/runtime.txt @@ -3,3 +3,4 @@ numpy pycocotools six terminaltables +easydict \ No newline at end of file diff --git a/det-mmdetection-tmi/start.py b/det-mmdetection-tmi/start.py index 553cfb5..54af3aa 100644 --- a/det-mmdetection-tmi/start.py +++ b/det-mmdetection-tmi/start.py @@ -10,7 +10,7 @@ from ymir_exc import result_writer as rw from mmdet.utils.util_ymir import (YmirStage, get_merged_config, - get_ymir_process) + get_ymir_process, YmirModel) def start() -> int: @@ -54,7 +54,7 @@ def _run_mining(cfg: edict()) -> None: def _run_infer(cfg: edict) -> None: N = dr.items_count(env.DatasetType.CANDIDATE) infer_result = dict() - model = YmirYolov5(cfg) + model = YmirModel(cfg) idx = -1 monitor_gap = max(1, N // 100) diff --git a/det-mmdetection-tmi/tools/train.py b/det-mmdetection-tmi/tools/train.py index b9e9981..b454553 100644 --- a/det-mmdetection-tmi/tools/train.py +++ b/det-mmdetection-tmi/tools/train.py @@ -17,7 +17,7 @@ from mmdet.datasets import build_dataset from mmdet.models import build_detector from mmdet.utils import collect_env, get_root_logger, setup_multi_processes - +from mmdet.utils.util_ymir import modify_mmdet_config def parse_args(): parser = argparse.ArgumentParser(description='Train a detector') @@ -98,6 +98,9 @@ def main(): args = parse_args() cfg = Config.fromfile(args.config) + # modify mmdet config from file + cfg = modify_mmdet_config(cfg) + if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) diff --git a/det-mmdetection-tmi/ymir_train.py b/det-mmdetection-tmi/ymir_train.py index cd64cbd..f328eff 100644 --- a/det-mmdetection-tmi/ymir_train.py +++ b/det-mmdetection-tmi/ymir_train.py @@ -11,7 +11,7 @@ def main(cfg: edict) -> int: - # default ymir config + ### default ymir config gpu_id = cfg.param.get("gpu_id", '0') num_gpus = len(gpu_id.split(",")) if num_gpus == 0: @@ -27,82 +27,42 @@ def main(cfg: edict) -> int: if not weight_file: weight_file = download_weight_file(model) - # user define config + ### user define config learning_rate = cfg.param.learning_rate - epochs = cfg.param.epochs + epochs = cfg.param.max_epochs samples_per_gpu = cfg.param.samples_per_gpu workers_per_gpu = min(4, max(1, samples_per_gpu//2)) - supported_models = [] - if model.startswith("faster_rcnn"): - files = glob.glob( - osp.join('configs/faster_rcnn/faster_rcnn_*_ymir_coco.py')) - supported_models = ['faster_rcnn_r50_fpn', 'faster_rcnn_r101_fpn'] - elif model.startswith("yolox"): - files = glob.glob(osp.join('configs/yolox/yolox_*_8x8_300e_ymir_coco.py')) - supported_models = ['yolox_nano', 'yolox_tiny', - 'yolox_s', 'yolox_m', 'yolox_l', 'yolox_x'] - else: - files = glob.glob(osp.join('configs/*/*_ymir_coco.py')) - supported_models = [osp.basename(f) for f in files] - - assert model in supported_models, f'unknown model {model}, not in {supported_models}' - - # modify base config file - base_config_file = './configs/_base_/datasets/ymir_coco.py' - - modify_dict = dict( - classes=classes, - num_classes=num_classes, - max_epochs=epochs, - lr=learning_rate, - samples_per_gpu=samples_per_gpu, - workers_per_gpu=workers_per_gpu, - data_root=cfg.ymir.input.root_dir, - img_prefix=cfg.ymir.input.assets_dir, - ann_prefix=cfg.ymir.input.annotations_dir, - train_ann_file=cfg.ymir.input.training_index_file, - val_ann_file=cfg.ymir.input.val_index_file, - tensorboard_dir=cfg.ymir.output.tensorboard_dir, - work_dir=cfg.ymir.output.models_dir, - checkpoints_path=weight_file - ) - - logging.info(f'modified config is {modify_dict}') - with open(base_config_file, 'r') as fp: - lines = fp.readlines() - - fw = open(base_config_file, 'w') - for line in lines: - for key in modify_dict: - if line.startswith((f"{key}=", f"{key} =")): - value = modify_dict[key] - if isinstance(value, str): - line = f"{key} = '{value}' \n" - else: - line = f"{key} = {value} \n" - break - fw.write(line) - fw.close() - - # train_config_file will use the config in base_config_file - train_config_file = '' - for f in files: - if osp.basename(f).startswith(model): - train_config_file = f + ### mmcv args config + config_file = cfg.param.get("config_file") + args_options = cfg.param.get("base_args",None) + cfg_options = cfg.param.get("cfg_options",None) monitor.write_monitor_logger(percent=get_ymir_process(YmirStage.PREPROCESS, p=0.2)) work_dir = cfg.ymir.output.models_dir - if num_gpus == 1: - cmd = f"python tools/train.py {train_config_file} " + \ + if num_gpus == 0: + # view https://mmdetection.readthedocs.io/en/stable/1_exist_data_model.html#training-on-cpu + os.environ.setdefault('CUDA_VISIBLE_DEVICES',"-1") + cmd = f"python tools/train.py {config_file} " + \ + f"--work-dir {work_dir}" + elif num_gpus == 1: + cmd = f"python tools/train.py {config_file} " + \ f"--work-dir {work_dir} --gpu-id {gpu_id}" else: - os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id - cmd = f"./tools/dist_train.sh {train_config_file} {num_gpus} " + \ + os.environ.setdefault('CUDA_VISIBLE_DEVICES', gpu_id) + port = cfg.param.get('PORT') + os.environ.setdefault('PORT', port) + cmd = f"./tools/dist_train.sh {config_file} {num_gpus} " + \ f"--work-dir {work_dir}" + if args_options: + cmd +=f" {args_options}" + + if cfg_options: + cmd +=f" --cfg-options {cfg_options}" + logging.info(f"training command: {cmd}") subprocess.run(cmd.split(), check=True) @@ -116,6 +76,6 @@ def main(cfg: edict) -> int: if __name__ == '__main__': cfg = get_merged_config() os.environ.setdefault('YMIR_MODELS_DIR',cfg.ymir.output.models_dir) - os.environ.setdefault('COCO_EVAL_TMP_FILE', '') + os.environ.setdefault('COCO_EVAL_TMP_FILE', osp.join(cfg.ymir.output.root_dir,'eval_tmp.json')) os.environ.setdefault('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', 'python') sys.exit(main(cfg)) From 748a522640d3c837080af38c602dd48cda53e950 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Sun, 19 Jun 2022 14:56:20 +0800 Subject: [PATCH 018/204] support extend dataset format --- det-mmdetection-tmi/mmdet/datasets/ymir.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/det-mmdetection-tmi/mmdet/datasets/ymir.py b/det-mmdetection-tmi/mmdet/datasets/ymir.py index 5cbbbfa..42771fb 100644 --- a/det-mmdetection-tmi/mmdet/datasets/ymir.py +++ b/det-mmdetection-tmi/mmdet/datasets/ymir.py @@ -146,7 +146,7 @@ def get_txt_ann_info(self, txt_path): else: lines=[] for line in lines: - obj=[int(x) for x in line.strip().split(',')] + obj=[int(x) for x in line.strip().split(',')[0:5]] # YMIR category id starts from 0, coco from 1 category_id, xmin, ymin, xmax, ymax = obj bbox = [xmin, ymin, xmax, ymax] From ea6a52e027274c09aac661a2d20f369fc73c80c9 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Sun, 19 Jun 2022 17:21:43 +0800 Subject: [PATCH 019/204] update config --- det-mmdetection-tmi/docker/Dockerfile.cuda111 | 4 +-- det-mmdetection-tmi/mmdet/utils/util_ymir.py | 25 ++++--------------- det-mmdetection-tmi/start.py | 3 ++- det-mmdetection-tmi/tools/train.py | 9 ++++--- det-mmdetection-tmi/ymir_infer.py | 16 ++++++++++++ det-mmdetection-tmi/ymir_train.py | 16 ++++++------ 6 files changed, 38 insertions(+), 35 deletions(-) create mode 100644 det-mmdetection-tmi/ymir_infer.py diff --git a/det-mmdetection-tmi/docker/Dockerfile.cuda111 b/det-mmdetection-tmi/docker/Dockerfile.cuda111 index 4b132f9..8441b6a 100644 --- a/det-mmdetection-tmi/docker/Dockerfile.cuda111 +++ b/det-mmdetection-tmi/docker/Dockerfile.cuda111 @@ -17,7 +17,7 @@ RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \ && echo 'Asia/Shanghai' >/etc/timezone # Install apt package -RUN apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \ +RUN apt-get update && apt-get install -y gcc ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* @@ -40,5 +40,5 @@ RUN pip install --no-cache-dir -r requirements/runtime.txt \ && mkdir /img-man \ && mv *-template.yaml /img-man \ && echo "python3 start.py" > /usr/bin/start.sh - + CMD bash /usr/bin/start.sh diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index 6493f92..2a4ab09 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -11,7 +11,6 @@ import mmcv from mmcv import Config -from mmdet.apis import init_detector, inference_detector from easydict import EasyDict as edict from nptyping import NDArray, Shape, UInt8 from torch.hub import HASH_REGEX, _get_torch_home, download_url_to_file @@ -76,7 +75,7 @@ def modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: workers_per_gpu = ymir_cfg.param.workers_per_gpu mmdet_cfg.data.samples_per_gpu = samples_per_gpu mmdet_cfg.data.workers_per_gpu = workers_per_gpu - + for split in ['train','val','test']: ymir_dataset_cfg=dict(type='YmirDataset', ann_file=ymir_ann_files[split], @@ -105,12 +104,12 @@ def modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: mmdet_model_cfg.num_classes = len(ymir_cfg.param.class_names) ### epochs, checkpoint, tensorboard - mmdet_model_cfg.runner.max_epochs = ymir_cfg.param.max_epochs - mmdet_model_cfg.checkpoint_config['out_dir'] = ymir_cfg.ymir.output.models_dir + mmdet_cfg.runner.max_epochs = ymir_cfg.param.max_epochs + mmdet_cfg.checkpoint_config['out_dir'] = ymir_cfg.ymir.output.models_dir tensorboard_logger = dict(type='TensorboardLoggerHook', log_dir = ymir_cfg.ymir.output.tensorboard_dir) - mmdet_model_cfg.log_config['hooks'].append(tensorboard_logger) - return mmdet_cfg + mmdet_cfg.log_config['hooks'].append(tensorboard_logger) + return mmdet_cfg def get_weight_file(cfg: edict) -> str: """ @@ -200,17 +199,3 @@ def update_training_result_file(key_score): rw.write_training_result(model_names=[model_weight_file, osp.basename(model_config_file)], mAP=key_score, classAPs=results_per_category) - -class YmirModel: - def __init__(self, cfg:edict): - self.cfg = cfg - - # Specify the path to model config and checkpoint file - config_file = 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' - checkpoint_file = 'checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' - - # build the model from a config file and a checkpoint file - self.model = init_detector(config_file, checkpoint_file, device='cuda:0') - - def infer(self, img): - return inference_detector(self.model, img) \ No newline at end of file diff --git a/det-mmdetection-tmi/start.py b/det-mmdetection-tmi/start.py index 54af3aa..89ea239 100644 --- a/det-mmdetection-tmi/start.py +++ b/det-mmdetection-tmi/start.py @@ -10,7 +10,8 @@ from ymir_exc import result_writer as rw from mmdet.utils.util_ymir import (YmirStage, get_merged_config, - get_ymir_process, YmirModel) + get_ymir_process) +from ymir_infer import YmirModel def start() -> int: diff --git a/det-mmdetection-tmi/tools/train.py b/det-mmdetection-tmi/tools/train.py index b454553..a65e130 100644 --- a/det-mmdetection-tmi/tools/train.py +++ b/det-mmdetection-tmi/tools/train.py @@ -17,7 +17,7 @@ from mmdet.datasets import build_dataset from mmdet.models import build_detector from mmdet.utils import collect_env, get_root_logger, setup_multi_processes -from mmdet.utils.util_ymir import modify_mmdet_config +from mmdet.utils.util_ymir import modify_mmdet_config, get_merged_config def parse_args(): parser = argparse.ArgumentParser(description='Train a detector') @@ -96,11 +96,12 @@ def parse_args(): def main(): args = parse_args() - + ymir_cfg = get_merged_config() cfg = Config.fromfile(args.config) + print(cfg) # modify mmdet config from file - cfg = modify_mmdet_config(cfg) - + cfg = modify_mmdet_config(mmdet_cfg=cfg, ymir_cfg=ymir_cfg) + if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) diff --git a/det-mmdetection-tmi/ymir_infer.py b/det-mmdetection-tmi/ymir_infer.py new file mode 100644 index 0000000..07dd043 --- /dev/null +++ b/det-mmdetection-tmi/ymir_infer.py @@ -0,0 +1,16 @@ +from mmdet.apis import init_detector, inference_detector +from easydict import EasyDict as edict + +class YmirModel: + def __init__(self, cfg:edict): + self.cfg = cfg + + # Specify the path to model config and checkpoint file + config_file = 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' + checkpoint_file = 'checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' + + # build the model from a config file and a checkpoint file + self.model = init_detector(config_file, checkpoint_file, device='cuda:0') + + def infer(self, img): + return inference_detector(self.model, img) diff --git a/det-mmdetection-tmi/ymir_train.py b/det-mmdetection-tmi/ymir_train.py index f328eff..ec44eec 100644 --- a/det-mmdetection-tmi/ymir_train.py +++ b/det-mmdetection-tmi/ymir_train.py @@ -23,16 +23,16 @@ def main(cfg: edict) -> int: if num_classes==0: raise Exception('not find class_names in config!') - weight_file = get_weight_file(cfg) - if not weight_file: - weight_file = download_weight_file(model) + # weight_file = get_weight_file(cfg) + # if not weight_file: + # weight_file = download_weight_file(model) ### user define config - learning_rate = cfg.param.learning_rate - epochs = cfg.param.max_epochs + # learning_rate = cfg.param.learning_rate + # epochs = cfg.param.max_epochs - samples_per_gpu = cfg.param.samples_per_gpu - workers_per_gpu = min(4, max(1, samples_per_gpu//2)) + # samples_per_gpu = cfg.param.samples_per_gpu + # workers_per_gpu = min(4, max(1, samples_per_gpu//2)) ### mmcv args config config_file = cfg.param.get("config_file") @@ -59,7 +59,7 @@ def main(cfg: edict) -> int: if args_options: cmd +=f" {args_options}" - + if cfg_options: cmd +=f" --cfg-options {cfg_options}" From 533fef1db02eb39e4c255bcf5d67ed12b46caf9e Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 20 Jun 2022 16:19:12 +0800 Subject: [PATCH 020/204] update mmdet --- det-mmdetection-tmi/mmdet/apis/train.py | 1 + det-mmdetection-tmi/mmdet/utils/util_ymir.py | 21 ++++++++++---------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/det-mmdetection-tmi/mmdet/apis/train.py b/det-mmdetection-tmi/mmdet/apis/train.py index f2c14e9..ebc995d 100644 --- a/det-mmdetection-tmi/mmdet/apis/train.py +++ b/det-mmdetection-tmi/mmdet/apis/train.py @@ -188,6 +188,7 @@ def train_detector(model, dist=distributed, shuffle=False) eval_cfg = cfg.get('evaluation', {}) + eval_cfg['classwise'] = True # Whether to evaluating the AP for each class eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner' eval_hook = DistEvalHook if distributed else EvalHook # In this PR (https://github.com/open-mmlab/mmcv/pull/1193), the diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index 2a4ab09..a806bd6 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -117,22 +117,23 @@ def get_weight_file(cfg: edict) -> str: find weight file in cfg.param.model_params_path or cfg.param.model_params_path """ if cfg.ymir.run_training: - model_params_path = cfg.param.pretrained_model_paths + model_params_path: List = cfg.param.pretrained_model_paths else: - model_params_path = cfg.param.model_params_path + model_params_path: List = cfg.param.model_params_path model_dir = osp.join(cfg.ymir.input.root_dir, cfg.ymir.input.models_dir) model_params_path = [ - p for p in model_params_path if osp.exists(osp.join(model_dir, p))] + osp.join(model_dir, p) for p in model_params_path if osp.exists(osp.join(model_dir, p)) and p.endswith('.pth')] - # choose weight file by priority, best.pt > xxx.pt - if 'best.pt' in model_params_path: - return osp.join(model_dir, 'best.pt') - else: - for f in model_params_path: - if f.endswith('.pt'): - return osp.join(model_dir, f) + # choose weight file by priority, best_xxx.pth > latest.pth > epoch_xxx.pth + best_pth_files = [f for f in model_params_path if f.startswith('best_')] + if len(best_pth_files) > 0: + return get_newest_file(best_pth_files) + + epoch_pth_files = [f for f in model_params_path if f.startswith('epoch_')] + if len(epoch_pth_files) > 0: + return get_newest_file(epoch_pth_files) return "" From 9d0ef65a4e5b369c66745f5ed432c6bde20686fd Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 20 Jun 2022 16:33:43 +0800 Subject: [PATCH 021/204] add gcc and make git clone faster --- live-code-executor/mxnet.dockerfile | 2 +- live-code-executor/ymir_start.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/live-code-executor/mxnet.dockerfile b/live-code-executor/mxnet.dockerfile index a82758e..1ff0a66 100644 --- a/live-code-executor/mxnet.dockerfile +++ b/live-code-executor/mxnet.dockerfile @@ -13,7 +13,7 @@ ENV PATH /opt/conda/bin:$PATH # install linux package, needs to fix GPG error first. RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC && \ apt-get update && \ - apt-get install -y git wget curl zip libglib2.0-0 libgl1-mesa-glx && \ + apt-get install -y git gcc wget curl zip libglib2.0-0 libgl1-mesa-glx && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* && \ wget "${MINICONDA_URL}" -O miniconda.sh -q && \ diff --git a/live-code-executor/ymir_start.py b/live-code-executor/ymir_start.py index 71adf5c..0ea1bd6 100644 --- a/live-code-executor/ymir_start.py +++ b/live-code-executor/ymir_start.py @@ -23,9 +23,9 @@ def main(): git_branch = executor_config.get('git_branch', '') if not git_branch: - cmd = f'git clone {git_url} /app' + cmd = f'git clone {git_url} --depth 1 /app' else: - cmd = f'git clone {git_url} -b {git_branch} /app' + cmd = f'git clone {git_url} --depth 1 -b {git_branch} /app' logger.info(f'clone code: {cmd}') subprocess.run(cmd.split(), check=True) From ac06c0ffaee2d377b479521d6327e7e2dd2d4e40 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 20 Jun 2022 17:56:08 +0800 Subject: [PATCH 022/204] add training ability --- det-mmdetection-tmi/mmdet/utils/util_ymir.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index a806bd6..2d67bdf 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -129,11 +129,11 @@ def get_weight_file(cfg: edict) -> str: # choose weight file by priority, best_xxx.pth > latest.pth > epoch_xxx.pth best_pth_files = [f for f in model_params_path if f.startswith('best_')] if len(best_pth_files) > 0: - return get_newest_file(best_pth_files) + return max(best_pth_files, key=os.path.getctime) epoch_pth_files = [f for f in model_params_path if f.startswith('epoch_')] if len(epoch_pth_files) > 0: - return get_newest_file(epoch_pth_files) + return max(epoch_pth_files, key=os.path.getctime) return "" @@ -181,7 +181,7 @@ def update_training_result_file(key_score): results_per_category = mmcv.load(COCO_EVAL_TMP_FILE) work_dir = os.getenv('YMIR_MODELS_DIR') - if work_dir is None or osp.isdir(work_dir): + if work_dir is None or not osp.isdir(work_dir): raise Exception( f'please set valid environment variable YMIR_MODELS_DIR, invalid directory {work_dir}') From 5402b3f4713f5de4021c2d88d1b81282dd432e08 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 21 Jun 2022 15:21:02 +0800 Subject: [PATCH 023/204] mmdet training model --- det-mmdetection-tmi/docker/Dockerfile.cuda111 | 2 +- det-mmdetection-tmi/mmdet/utils/util_ymir.py | 17 ++++------------- det-mmdetection-tmi/requirements/runtime.txt | 6 +++++- det-mmdetection-tmi/training-template.yaml | 7 +++++++ det-mmdetection-tmi/ymir_infer.py | 5 +++-- det-mmdetection-tmi/ymir_train.py | 5 ++--- 6 files changed, 22 insertions(+), 20 deletions(-) create mode 100644 det-mmdetection-tmi/training-template.yaml diff --git a/det-mmdetection-tmi/docker/Dockerfile.cuda111 b/det-mmdetection-tmi/docker/Dockerfile.cuda111 index 8441b6a..42a9004 100644 --- a/det-mmdetection-tmi/docker/Dockerfile.cuda111 +++ b/det-mmdetection-tmi/docker/Dockerfile.cuda111 @@ -17,7 +17,7 @@ RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \ && echo 'Asia/Shanghai' >/etc/timezone # Install apt package -RUN apt-get update && apt-get install -y gcc ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \ +RUN apt-get update && apt-get install -y build-essential ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index 2d67bdf..eb9ad3e 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -104,7 +104,8 @@ def modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: mmdet_model_cfg.num_classes = len(ymir_cfg.param.class_names) ### epochs, checkpoint, tensorboard - mmdet_cfg.runner.max_epochs = ymir_cfg.param.max_epochs + if ymir_cfg.param.get('max_epochs',None): + mmdet_cfg.runner.max_epochs = ymir_cfg.param.max_epochs mmdet_cfg.checkpoint_config['out_dir'] = ymir_cfg.ymir.output.models_dir tensorboard_logger = dict(type='TensorboardLoggerHook', log_dir = ymir_cfg.ymir.output.tensorboard_dir) @@ -186,17 +187,7 @@ def update_training_result_file(key_score): f'please set valid environment variable YMIR_MODELS_DIR, invalid directory {work_dir}') # assert only one model config file in work_dir - model_config_file = glob.glob(osp.join(work_dir, '*.py'))[0] - weight_files = glob.glob(osp.join(work_dir, 'best_bbox_mAP_epoch_*.pth')) - if len(weight_files) == 0: - weight_files = glob.glob(osp.join(work_dir, 'epoch_*.pth')) - - if len(weight_files) == 0: - raise Exception(f'no weight file found in {work_dir}') - - # sort the weight files by time, use the latest file. - weight_files.sort(key=lambda fn: osp.getmtime(fn)) - model_weight_file = osp.basename(weight_files[-1]) - rw.write_training_result(model_names=[model_weight_file, osp.basename(model_config_file)], + result_files = glob.glob(osp.join(work_dir, '*')) + rw.write_training_result(model_names=[osp.basename(f) for f in result_files], mAP=key_score, classAPs=results_per_category) diff --git a/det-mmdetection-tmi/requirements/runtime.txt b/det-mmdetection-tmi/requirements/runtime.txt index 3c93f57..9754131 100644 --- a/det-mmdetection-tmi/requirements/runtime.txt +++ b/det-mmdetection-tmi/requirements/runtime.txt @@ -3,4 +3,8 @@ numpy pycocotools six terminaltables -easydict \ No newline at end of file +easydict +nptyping +imagesize>=1.3.0 +future +tensorboard>=2.5.0 diff --git a/det-mmdetection-tmi/training-template.yaml b/det-mmdetection-tmi/training-template.yaml new file mode 100644 index 0000000..67b9aa3 --- /dev/null +++ b/det-mmdetection-tmi/training-template.yaml @@ -0,0 +1,7 @@ +samples_per_gpu: 2 +workers_per_gpu: 2 +max_epochs: 300 +config_file: 'configs/yolox/yolox_nano_8x8_300e_coco.py' +args: '' +cfg_options: '' +port: 12345 diff --git a/det-mmdetection-tmi/ymir_infer.py b/det-mmdetection-tmi/ymir_infer.py index 07dd043..6863f62 100644 --- a/det-mmdetection-tmi/ymir_infer.py +++ b/det-mmdetection-tmi/ymir_infer.py @@ -1,13 +1,14 @@ from mmdet.apis import init_detector, inference_detector from easydict import EasyDict as edict +from mmdet.utils.util_ymir import get_weight_file class YmirModel: def __init__(self, cfg:edict): self.cfg = cfg # Specify the path to model config and checkpoint file - config_file = 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' - checkpoint_file = 'checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' + config_file = cfg.param.config_file + checkpoint_file = get_weight_file(cfg) # build the model from a config file and a checkpoint file self.model = init_detector(config_file, checkpoint_file, device='cuda:0') diff --git a/det-mmdetection-tmi/ymir_train.py b/det-mmdetection-tmi/ymir_train.py index ec44eec..f2fc959 100644 --- a/det-mmdetection-tmi/ymir_train.py +++ b/det-mmdetection-tmi/ymir_train.py @@ -19,7 +19,6 @@ def main(cfg: edict) -> int: classes = cfg.param.class_names num_classes = len(classes) - model = cfg.param.model if num_classes==0: raise Exception('not find class_names in config!') @@ -36,7 +35,7 @@ def main(cfg: edict) -> int: ### mmcv args config config_file = cfg.param.get("config_file") - args_options = cfg.param.get("base_args",None) + args_options = cfg.param.get("args",None) cfg_options = cfg.param.get("cfg_options",None) monitor.write_monitor_logger(percent=get_ymir_process(YmirStage.PREPROCESS, p=0.2)) @@ -52,7 +51,7 @@ def main(cfg: edict) -> int: f"--work-dir {work_dir} --gpu-id {gpu_id}" else: os.environ.setdefault('CUDA_VISIBLE_DEVICES', gpu_id) - port = cfg.param.get('PORT') + port = cfg.param.get('port') os.environ.setdefault('PORT', port) cmd = f"./tools/dist_train.sh {config_file} {num_gpus} " + \ f"--work-dir {work_dir}" From 1aed247e897f57d9ba3fb78d9eed395d632c94cd Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 21 Jun 2022 17:25:45 +0800 Subject: [PATCH 024/204] add infer --- det-mmdetection-tmi/infer-template.yaml | 7 ++ .../mmdet/core/evaluation/eval_hooks.py | 12 ++- det-mmdetection-tmi/mmdet/utils/util_ymir.py | 94 +++++++------------ det-mmdetection-tmi/start.py | 20 +++- det-mmdetection-tmi/training-template.yaml | 2 +- det-mmdetection-tmi/ymir_infer.py | 60 +++++++++++- det-mmdetection-tmi/ymir_train.py | 48 ++++------ 7 files changed, 141 insertions(+), 102 deletions(-) create mode 100644 det-mmdetection-tmi/infer-template.yaml diff --git a/det-mmdetection-tmi/infer-template.yaml b/det-mmdetection-tmi/infer-template.yaml new file mode 100644 index 0000000..8be36b9 --- /dev/null +++ b/det-mmdetection-tmi/infer-template.yaml @@ -0,0 +1,7 @@ +samples_per_gpu: 2 +workers_per_gpu: 2 +max_epochs: 300 +config_file: 'configs/yolox/yolox_nano_8x8_300e_coco.py' +args_options: '' +cfg_options: '' +port: 12345 diff --git a/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py b/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py index 15c47bc..89bc7bb 100644 --- a/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py +++ b/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py @@ -8,8 +8,10 @@ from mmcv.runner import EvalHook as BaseEvalHook from torch.nn.modules.batchnorm import _BatchNorm from ymir_exc import monitor -from mmdet.utils.util_ymir import update_training_result_file -import os.path as osp + +from mmdet.utils.util_ymir import (YmirStage, get_ymir_process, + update_training_result_file) + def _calc_dynamic_intervals(start_interval, dynamic_interval_list): assert mmcv.is_list_of(dynamic_interval_list, tuple) @@ -47,7 +49,8 @@ def before_train_epoch(self, runner): def after_train_epoch(self, runner): """Report the training process for ymir""" - percent=0.95*(runner.epoch/runner.max_epochs) + percent = get_ymir_process( + stage=YmirStage.TASK, p=runner.epoch/runner.max_epochs) monitor.write_monitor_logger(percent=percent) super().after_train_epoch(runner) @@ -101,7 +104,8 @@ def before_train_epoch(self, runner): def after_train_epoch(self, runner): """Report the training process for ymir""" - percent=0.1+0.8*(runner.epoch/runner.max_epochs) + percent = get_ymir_process( + stage=YmirStage.TASK, p=runner.epoch/runner.max_epochs) monitor.write_monitor_logger(percent=percent) super().after_train_epoch(runner) diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index eb9ad3e..79982e0 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -4,16 +4,13 @@ import glob import os import os.path as osp -import sys from enum import IntEnum -from typing import Any, List, Tuple -from urllib.parse import urlparse +from typing import Any, List import mmcv -from mmcv import Config from easydict import EasyDict as edict +from mmcv import Config from nptyping import NDArray, Shape, UInt8 -from torch.hub import HASH_REGEX, _get_torch_home, download_url_to_file from ymir_exc import env from ymir_exc import result_writer as rw @@ -59,12 +56,14 @@ def get_merged_config() -> edict: merged_cfg.ymir = env.get_current_env() return merged_cfg + def modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: """ - modify dataset config - modify model output channel + - modify epochs, checkpoint, tensorboard config """ - ### modify dataset config + # modify dataset config ymir_ann_files = dict( train=ymir_cfg.ymir.input.training_index_file, val=ymir_cfg.ymir.input.val_index_file, @@ -76,17 +75,20 @@ def modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: mmdet_cfg.data.samples_per_gpu = samples_per_gpu mmdet_cfg.data.workers_per_gpu = workers_per_gpu - for split in ['train','val','test']: - ymir_dataset_cfg=dict(type='YmirDataset', - ann_file=ymir_ann_files[split], - img_prefix=ymir_cfg.ymir.input.assets_dir, - ann_prefix=ymir_cfg.ymir.input.annotations_dir, - classes=ymir_cfg.param.class_names, - data_root=ymir_cfg.ymir.input.root_dir, - filter_empty_gt=False - ) - ### modify dataset config - mmdet_dataset_cfg = mmdet_cfg.data[split] + for split in ['train', 'val', 'test']: + ymir_dataset_cfg = dict(type='YmirDataset', + ann_file=ymir_ann_files[split], + img_prefix=ymir_cfg.ymir.input.assets_dir, + ann_prefix=ymir_cfg.ymir.input.annotations_dir, + classes=ymir_cfg.param.class_names, + data_root=ymir_cfg.ymir.input.root_dir, + filter_empty_gt=False + ) + # modify dataset config for `split` + mmdet_dataset_cfg = mmdet_cfg.data.get(split, None) + if mmdet_dataset_cfg is None: + continue + if isinstance(mmdet_dataset_cfg, (list, tuple)): for x in mmdet_dataset_cfg: x.update(ymir_dataset_cfg) @@ -94,24 +96,26 @@ def modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: src_dataset_type = mmdet_dataset_cfg.type if src_dataset_type in ['CocoDataset']: mmdet_dataset_cfg.update(ymir_dataset_cfg) - elif src_dataset_type in ['MultiImageMixDataset','RepeatDataset']: + elif src_dataset_type in ['MultiImageMixDataset', 'RepeatDataset']: mmdet_dataset_cfg.dataset.update(ymir_dataset_cfg) else: - raise Exception(f'unsupported source dataset type {src_dataset_type}') + raise Exception( + f'unsupported source dataset type {src_dataset_type}') - ### modify model output channel + # modify model output channel mmdet_model_cfg = mmdet_cfg.model.bbox_head mmdet_model_cfg.num_classes = len(ymir_cfg.param.class_names) - ### epochs, checkpoint, tensorboard - if ymir_cfg.param.get('max_epochs',None): + # modify epochs, checkpoint, tensorboard config + if ymir_cfg.param.get('max_epochs', None): mmdet_cfg.runner.max_epochs = ymir_cfg.param.max_epochs mmdet_cfg.checkpoint_config['out_dir'] = ymir_cfg.ymir.output.models_dir tensorboard_logger = dict(type='TensorboardLoggerHook', - log_dir = ymir_cfg.ymir.output.tensorboard_dir) + log_dir=ymir_cfg.ymir.output.tensorboard_dir) mmdet_cfg.log_config['hooks'].append(tensorboard_logger) return mmdet_cfg + def get_weight_file(cfg: edict) -> str: """ return the weight file path by priority @@ -122,55 +126,21 @@ def get_weight_file(cfg: edict) -> str: else: model_params_path: List = cfg.param.model_params_path - model_dir = osp.join(cfg.ymir.input.root_dir, - cfg.ymir.input.models_dir) + model_dir = cfg.ymir.input.models_dir model_params_path = [ - osp.join(model_dir, p) for p in model_params_path if osp.exists(osp.join(model_dir, p)) and p.endswith('.pth')] + osp.join(model_dir, p) for p in model_params_path if osp.exists(osp.join(model_dir, p)) and p.endswith(('.pth','.pt'))] # choose weight file by priority, best_xxx.pth > latest.pth > epoch_xxx.pth - best_pth_files = [f for f in model_params_path if f.startswith('best_')] + best_pth_files = [f for f in model_params_path if osp.basename(f).startswith('best_')] if len(best_pth_files) > 0: return max(best_pth_files, key=os.path.getctime) - epoch_pth_files = [f for f in model_params_path if f.startswith('epoch_')] + epoch_pth_files = [f for f in model_params_path if osp.basename(f).startswith('epoch_')] if len(epoch_pth_files) > 0: return max(epoch_pth_files, key=os.path.getctime) - return "" - -def download_weight_file(model: str) -> str: - """ - download weight file from web if not exist. - """ - model_to_url = dict( - faster_rcnn_r50_fpn='https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth', - faster_rcnn_r101_fpn='https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_1x_coco/faster_rcnn_r101_fpn_1x_coco_20200130-f513f705.pth', - yolox_tiny='https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth', - yolox_s='https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_20211121_095711-4592a793.pth', - yolox_l='https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth', - yolox_x='https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254-1ef88d67.pth', - yolox_nano='https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth' - ) - - url = model_to_url[model] - torch_home = _get_torch_home() - model_dir = os.path.join(torch_home, 'checkpoints') - - os.makedirs(model_dir, exist_ok=True) - parts = urlparse(url) - filename = os.path.basename(parts.path) - cached_file = os.path.join(model_dir, filename) - - if not os.path.exists(cached_file): - sys.stderr.write('Downloading: "{}" to {}\n'.format( - url, cached_file)) - r = HASH_REGEX.search(filename) # r is Optional[Match[str]] - hash_prefix = r.group(1) if r else None - download_url_to_file( - url, cached_file, hash_prefix, progress=True) - - return cached_file + return "" def update_training_result_file(key_score): diff --git a/det-mmdetection-tmi/start.py b/det-mmdetection-tmi/start.py index 89ea239..44babba 100644 --- a/det-mmdetection-tmi/start.py +++ b/det-mmdetection-tmi/start.py @@ -1,9 +1,11 @@ +import glob import logging import os import subprocess import sys import cv2 +import yaml from easydict import EasyDict as edict from ymir_exc import dataset_reader as dr from ymir_exc import env, monitor @@ -11,7 +13,7 @@ from mmdet.utils.util_ymir import (YmirStage, get_merged_config, get_ymir_process) -from ymir_infer import YmirModel +from ymir_infer import YmirModel, mmdet_result_to_ymir def start() -> int: @@ -41,6 +43,19 @@ def _run_training(cfg: edict) -> None: command = 'python3 ymir_train.py' logging.info(f'start training: {command}') subprocess.run(command.split(), check=True) + + work_dir = cfg.ymir.output.models_dir + result_files = glob.glob(os.path.join(work_dir, '*')) + + training_result_file = cfg.ymir.output.training_result_file + with open(training_result_file, 'r') as fp: + best_result = yaml.safe_load(fp) + + # save the last checkpoint + rw.write_training_result(model_names=[os.path.basename(f) for f in result_files], + mAP=best_result['map'], + classAPs=best_result['class_aps']) + # if task done, write 100% percent log monitor.write_monitor_logger(percent=1.0) @@ -58,11 +73,12 @@ def _run_infer(cfg: edict) -> None: model = YmirModel(cfg) idx = -1 + # write infer result monitor_gap = max(1, N // 100) for asset_path, _ in dr.item_paths(dataset_type=env.DatasetType.CANDIDATE): img = cv2.imread(asset_path) result = model.infer(img) - infer_result[asset_path] = result + infer_result[asset_path] = mmdet_result_to_ymir(result, cfg.param.class_names) idx += 1 if idx % monitor_gap == 0: diff --git a/det-mmdetection-tmi/training-template.yaml b/det-mmdetection-tmi/training-template.yaml index 67b9aa3..8be36b9 100644 --- a/det-mmdetection-tmi/training-template.yaml +++ b/det-mmdetection-tmi/training-template.yaml @@ -2,6 +2,6 @@ samples_per_gpu: 2 workers_per_gpu: 2 max_epochs: 300 config_file: 'configs/yolox/yolox_nano_8x8_300e_coco.py' -args: '' +args_options: '' cfg_options: '' port: 12345 diff --git a/det-mmdetection-tmi/ymir_infer.py b/det-mmdetection-tmi/ymir_infer.py index 6863f62..47e47ed 100644 --- a/det-mmdetection-tmi/ymir_infer.py +++ b/det-mmdetection-tmi/ymir_infer.py @@ -1,17 +1,71 @@ -from mmdet.apis import init_detector, inference_detector +import argparse +from typing import Any, List + +import numpy as np from easydict import EasyDict as edict +from mmcv import DictAction +from nptyping import NDArray, Shape +from ymir_exc import result_writer as rw + +from mmdet.apis import inference_detector, init_detector from mmdet.utils.util_ymir import get_weight_file +DETECTION_RESULT = NDArray[Shape['*,5'], Any] + + +def parse_option(cfg_options: str) -> dict: + parser = argparse.ArgumentParser(description='parse cfg options') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + + args = parser.parse_args(f'--cfg-options {cfg_options}'.split()) + return args.cfg_options + + +def mmdet_result_to_ymir(results: List[DETECTION_RESULT], + class_names: List[str]) -> List[rw.Annotation]: + ann_list = [] + for idx, result in enumerate(results): + for line in result: + if any(np.isinf(line)): + continue + x1, y1, x2, y2, score = line + ann = rw.Annotation( + class_name=class_names[idx], + score=score, + box=rw.Box(x=round(x1), + y=round(y1), + w=round(x2-x1), + h=round(y2-y1)) + ) + ann_list.append(ann) + return ann_list + + class YmirModel: - def __init__(self, cfg:edict): + def __init__(self, cfg: edict): self.cfg = cfg # Specify the path to model config and checkpoint file config_file = cfg.param.config_file checkpoint_file = get_weight_file(cfg) + cfg_options = parse_option( + cfg.param.cfg_options) if cfg.param.cfg_options else None + # current infer can only use one gpu!!! + gpu_ids = cfg.param.gpu_id + gpu_id = gpu_ids.split(',')[0] # build the model from a config file and a checkpoint file - self.model = init_detector(config_file, checkpoint_file, device='cuda:0') + self.model = init_detector( + config_file, checkpoint_file, device=f'cuda:{gpu_id}', cfg_options=cfg_options) def infer(self, img): return inference_detector(self.model, img) diff --git a/det-mmdetection-tmi/ymir_train.py b/det-mmdetection-tmi/ymir_train.py index f2fc959..a39e64b 100644 --- a/det-mmdetection-tmi/ymir_train.py +++ b/det-mmdetection-tmi/ymir_train.py @@ -1,4 +1,3 @@ -import glob import logging import os import os.path as osp @@ -7,11 +6,13 @@ from easydict import EasyDict as edict from ymir_exc import monitor -from mmdet.utils.util_ymir import get_merged_config, get_weight_file, download_weight_file, get_ymir_process, YmirStage, update_training_result_file + +from mmdet.utils.util_ymir import (YmirStage, get_merged_config, + get_ymir_process) def main(cfg: edict) -> int: - ### default ymir config + # default ymir config gpu_id = cfg.param.get("gpu_id", '0') num_gpus = len(gpu_id.split(",")) if num_gpus == 0: @@ -19,31 +20,21 @@ def main(cfg: edict) -> int: classes = cfg.param.class_names num_classes = len(classes) - if num_classes==0: + if num_classes == 0: raise Exception('not find class_names in config!') - # weight_file = get_weight_file(cfg) - # if not weight_file: - # weight_file = download_weight_file(model) - - ### user define config - # learning_rate = cfg.param.learning_rate - # epochs = cfg.param.max_epochs - - # samples_per_gpu = cfg.param.samples_per_gpu - # workers_per_gpu = min(4, max(1, samples_per_gpu//2)) - - ### mmcv args config + # mmcv args config config_file = cfg.param.get("config_file") - args_options = cfg.param.get("args",None) - cfg_options = cfg.param.get("cfg_options",None) + args_options = cfg.param.get("args_options", None) + cfg_options = cfg.param.get("cfg_options", None) - monitor.write_monitor_logger(percent=get_ymir_process(YmirStage.PREPROCESS, p=0.2)) + monitor.write_monitor_logger( + percent=get_ymir_process(YmirStage.PREPROCESS, p=0.2)) work_dir = cfg.ymir.output.models_dir if num_gpus == 0: # view https://mmdetection.readthedocs.io/en/stable/1_exist_data_model.html#training-on-cpu - os.environ.setdefault('CUDA_VISIBLE_DEVICES',"-1") + os.environ.setdefault('CUDA_VISIBLE_DEVICES', "-1") cmd = f"python tools/train.py {config_file} " + \ f"--work-dir {work_dir}" elif num_gpus == 1: @@ -57,24 +48,21 @@ def main(cfg: edict) -> int: f"--work-dir {work_dir}" if args_options: - cmd +=f" {args_options}" + cmd += f" {args_options}" if cfg_options: - cmd +=f" --cfg-options {cfg_options}" + cmd += f" --cfg-options {cfg_options}" logging.info(f"training command: {cmd}") subprocess.run(cmd.split(), check=True) - - # eval_hooks will generate training_result_file if current map is best. - # create a fake map = 0 if no training_result_file generate in eval_hooks - if not osp.exists(cfg.ymir.output.training_result_file): - update_training_result_file(0) - + logging.info(f"training finished") return 0 + if __name__ == '__main__': cfg = get_merged_config() - os.environ.setdefault('YMIR_MODELS_DIR',cfg.ymir.output.models_dir) - os.environ.setdefault('COCO_EVAL_TMP_FILE', osp.join(cfg.ymir.output.root_dir,'eval_tmp.json')) + os.environ.setdefault('YMIR_MODELS_DIR', cfg.ymir.output.models_dir) + os.environ.setdefault('COCO_EVAL_TMP_FILE', osp.join( + cfg.ymir.output.root_dir, 'eval_tmp.json')) os.environ.setdefault('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', 'python') sys.exit(main(cfg)) From 0ba4479b790470da9e61d035ec4739f3ff6db479 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 21 Jun 2022 17:32:21 +0800 Subject: [PATCH 025/204] update readme --- det-mmdetection-tmi/README_ymir.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/det-mmdetection-tmi/README_ymir.md b/det-mmdetection-tmi/README_ymir.md index de86768..90a84b0 100644 --- a/det-mmdetection-tmi/README_ymir.md +++ b/det-mmdetection-tmi/README_ymir.md @@ -6,3 +6,7 @@ - modify `mmdet/datasets/coco.py`, save the evaluation result to `os.environ.get('COCO_EVAL_TMP_FILE')` with json format - modify `mmdet/core/evaluation/eval_hooks.py`, write training result file and monitor task process - modify `mmdet/datasets/__init__.py` and add `mmdet/datasets/ymir.py`, add class `YmirDataset` to load YMIR dataset. +- modify `mmdet/apis/train.py`, set `eval_cfg['classwise'] = True` for class-wise evaluation +- add `mmdet/utils/util_ymir.py` for ymir training/infer/mining +- add `ymir_infer.py` for infer and mining +- add `ymir_train.py` modify `tools/train.py` to update the mmcv config for training From a8a8eddec68b6634d2e4b0ef154295a101b59a7a Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 22 Jun 2022 18:12:21 +0800 Subject: [PATCH 026/204] update evaluation --- det-mmdetection-tmi/mmdet/apis/train.py | 1 - det-mmdetection-tmi/mmdet/utils/util_ymir.py | 23 +++++++++++++++----- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/det-mmdetection-tmi/mmdet/apis/train.py b/det-mmdetection-tmi/mmdet/apis/train.py index ebc995d..f2c14e9 100644 --- a/det-mmdetection-tmi/mmdet/apis/train.py +++ b/det-mmdetection-tmi/mmdet/apis/train.py @@ -188,7 +188,6 @@ def train_detector(model, dist=distributed, shuffle=False) eval_cfg = cfg.get('evaluation', {}) - eval_cfg['classwise'] = True # Whether to evaluating the AP for each class eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner' eval_hook = DistEvalHook if distributed else EvalHook # In this PR (https://github.com/open-mmlab/mmcv/pull/1193), the diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index 79982e0..c04013c 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -70,6 +70,8 @@ def modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: test=ymir_cfg.ymir.input.candidate_index_file ) + # validation may augment the image and use more gpu + # so set smaller samples_per_gpu for validation samples_per_gpu = ymir_cfg.param.samples_per_gpu workers_per_gpu = ymir_cfg.param.workers_per_gpu mmdet_cfg.data.samples_per_gpu = samples_per_gpu @@ -82,7 +84,11 @@ def modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: ann_prefix=ymir_cfg.ymir.input.annotations_dir, classes=ymir_cfg.param.class_names, data_root=ymir_cfg.ymir.input.root_dir, - filter_empty_gt=False + filter_empty_gt=False, + samples_per_gpu=samples_per_gpu if split == 'train' else max( + 1, samples_per_gpu//2), + workers_per_gpu=workers_per_gpu if split == 'train' else max( + 1, workers_per_gpu//2) ) # modify dataset config for `split` mmdet_dataset_cfg = mmdet_cfg.data.get(split, None) @@ -113,6 +119,12 @@ def modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: tensorboard_logger = dict(type='TensorboardLoggerHook', log_dir=ymir_cfg.ymir.output.tensorboard_dir) mmdet_cfg.log_config['hooks'].append(tensorboard_logger) + + # modify evaluation and interval + interval = max(1, mmdet_cfg.runner.max_epoch//30) + mmdet_cfg.evaluation.interval = interval + # Whether to evaluating the AP for each class + mmdet_cfg.evaluation.classwise = True return mmdet_cfg @@ -128,18 +140,19 @@ def get_weight_file(cfg: edict) -> str: model_dir = cfg.ymir.input.models_dir model_params_path = [ - osp.join(model_dir, p) for p in model_params_path if osp.exists(osp.join(model_dir, p)) and p.endswith(('.pth','.pt'))] + osp.join(model_dir, p) for p in model_params_path if osp.exists(osp.join(model_dir, p)) and p.endswith(('.pth', '.pt'))] # choose weight file by priority, best_xxx.pth > latest.pth > epoch_xxx.pth - best_pth_files = [f for f in model_params_path if osp.basename(f).startswith('best_')] + best_pth_files = [ + f for f in model_params_path if osp.basename(f).startswith('best_')] if len(best_pth_files) > 0: return max(best_pth_files, key=os.path.getctime) - epoch_pth_files = [f for f in model_params_path if osp.basename(f).startswith('epoch_')] + epoch_pth_files = [ + f for f in model_params_path if osp.basename(f).startswith('epoch_')] if len(epoch_pth_files) > 0: return max(epoch_pth_files, key=os.path.getctime) - return "" From 1bff027b7407643e238a6342bc7ae93ae4666962 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 24 Jun 2022 11:02:36 +0800 Subject: [PATCH 027/204] udpate --- det-mmdetection-tmi/mmdet/utils/util_ymir.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index c04013c..96aa821 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -2,6 +2,7 @@ utils function for ymir and yolov5 """ import glob +import logging import os import os.path as osp from enum import IntEnum @@ -157,6 +158,7 @@ def get_weight_file(cfg: edict) -> str: def update_training_result_file(key_score): + logging.info(f'key_score is {key_score}') COCO_EVAL_TMP_FILE = os.getenv('COCO_EVAL_TMP_FILE') if COCO_EVAL_TMP_FILE is None: raise Exception( From 5f435e974ee1f98350b4d871b2c0345a2dea93c3 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 24 Jun 2022 11:09:31 +0800 Subject: [PATCH 028/204] fix training bug --- det-yolov4-training/convert_label_ark2txt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/det-yolov4-training/convert_label_ark2txt.py b/det-yolov4-training/convert_label_ark2txt.py index 509e0b2..0304ba9 100755 --- a/det-yolov4-training/convert_label_ark2txt.py +++ b/det-yolov4-training/convert_label_ark2txt.py @@ -40,7 +40,7 @@ def _convert_annotations(index_file_path: str, dst_annotations_dir: str) -> None output_list = [] for each_line in txt_content: - each_line = [int(each) for each in each_line.split(",")] + each_line = [int(each) for each in each_line.split(",")[0:5]] cls, xmin, ymin, xmax, ymax, *_ = each_line xmin = max(0, xmin) From 6f1e49a45fd8806473ed22f27fce281b3ee022d5 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 24 Jun 2022 16:17:28 +0800 Subject: [PATCH 029/204] add README.MD --- README.MD | 64 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 README.MD diff --git a/README.MD b/README.MD new file mode 100644 index 0000000..aa0d4ae --- /dev/null +++ b/README.MD @@ -0,0 +1,64 @@ +# ymir-executor 使用文档 + +## det-yolov4-training + +- yolov4的训练镜像,采用mxnet与darknet框架,默认cuda版本为`10.1`,无法直接在高版本显卡如GTX3080/GTX3090上运行,需要修改dockerfile将cuda版本提升为11.1以上,并修改其它依赖。 + +## det-yolov4-mining + +- yolov4挖掘与推理镜像,与det-yolov4-training对应 + +## det-yolov5-tmi + +- yolov5训练、挖掘及推理镜像,训练时会从github上下载权重 + +- yolov5-FAQ + + - 权重下载出错:提前将权重下载好并复制到镜像中 + +## live-code-executor + +- 可以通过`git_url`, `git_branch`从网上clone代码到镜像并运行 + + - 参考[live-code](https://github.com/IndustryEssentials/ymir-remote-git) + +## det-mmdetection-tmi + +- mmdetection 训练、挖掘及推理镜像,目前还没开发完 + +# 如何制作自己的ymir-executor + +- [ymir-executor 制作指南](https://github.com/IndustryEssentials/ymir/blob/dev/docs/ymir-dataset-zh-CN.md) + +# FAQ + +- apt 或 pip 安装慢或出错 + + - 采用国内源,如在docker file 中添加如下命令 + + ``` + RUN sed -i 's/archive.ubuntu.com/mirrors.tuna.tsinghua.edu.cn/g' /etc/apt/sources.list + + RUN pip config set global.index-url https://mirrors.aliyun.com/pypi/simple + ``` + +- docker build 的时候出错,找不到相应docker file或`COPY/ADD`时出错 + + - 回到项目根目录或docker file对应根目录,确保docker file 中`COPY/ADD`的文件与文件夹能够访问,以yolov5为例. + + ``` + cd ymir-executor + + docker build -t ymir-executor/yolov5 . -f det-yolov5-tmi/cuda111.dockerfile + ``` + +- 镜像运行完`/in`与`/out`目录中的文件被清理 + + - ymir系统为节省空间,会在任务结束后删除其中不必要的文件,如果不想删除,可以在部署ymir时,修改文件`ymir/command/mir/tools/command_run_in_out.py`,注释其中的`_cleanup(work_dir=work_dir)`。注意需要重新构建后端镜像 + + ``` + cd ymir + docker build -t industryessentials/ymir-backend --build-arg PIP_SOURCE=https://pypi.mirrors.ustc.edu.cn/simple --build-arg SERVER_MODE='dev' -f Dockerfile.backend . + + docker-compose down -v && docker-compose up -d + ``` From 2f4ed3eae50a631f727cf5e2e717de8743fa952c Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 24 Jun 2022 16:19:28 +0800 Subject: [PATCH 030/204] add README.MD --- README.MD | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/README.MD b/README.MD index aa0d4ae..81e0077 100644 --- a/README.MD +++ b/README.MD @@ -20,16 +20,19 @@ - 可以通过`git_url`, `git_branch`从网上clone代码到镜像并运行 - - 参考[live-code](https://github.com/IndustryEssentials/ymir-remote-git) +- 参考 [live-code](https://github.com/IndustryEssentials/ymir-remote-git) ## det-mmdetection-tmi - mmdetection 训练、挖掘及推理镜像,目前还没开发完 -# 如何制作自己的ymir-executor + +## 如何制作自己的ymir-executor - [ymir-executor 制作指南](https://github.com/IndustryEssentials/ymir/blob/dev/docs/ymir-dataset-zh-CN.md) +--- + # FAQ - apt 或 pip 安装慢或出错 From bbf824b067f4c63e1f4ed8ad888d7f368c4dde9f Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 24 Jun 2022 18:58:37 +0800 Subject: [PATCH 031/204] fix yolov5 nptyping problem --- det-yolov5-tmi/requirements.txt | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/det-yolov5-tmi/requirements.txt b/det-yolov5-tmi/requirements.txt index fa1d389..70af2f8 100755 --- a/det-yolov5-tmi/requirements.txt +++ b/det-yolov5-tmi/requirements.txt @@ -2,7 +2,7 @@ # Base ---------------------------------------- matplotlib>=3.2.2 -numpy>=1.18.5 +numpy>=1.20.0 opencv-python>=4.1.2 Pillow>=7.1.2 PyYAML>=5.3.1 @@ -39,4 +39,5 @@ thop # FLOPs computation # Ymir --------------------------------------- imagesize>=1.3.0 # fast obtain image size without load image nptyping>=2.1.1 # numpy type hint -easydict>=1.9 \ No newline at end of file +typing-extensions>=4.2.0 +easydict>=1.9 From 8882e6088eeee0470d1f6c195169fea83395df5a Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 27 Jun 2022 16:49:54 +0800 Subject: [PATCH 032/204] fix extra tag --- det-yolov5-tmi/utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/det-yolov5-tmi/utils/datasets.py b/det-yolov5-tmi/utils/datasets.py index d4bf7b9..28a25b9 100755 --- a/det-yolov5-tmi/utils/datasets.py +++ b/det-yolov5-tmi/utils/datasets.py @@ -903,7 +903,7 @@ def verify_image_label(args): if os.path.isfile(lb_file): nf = 1 # label found with open(lb_file) as f: - lb = [x.split(',') for x in f.read().strip().splitlines() if len(x)] + lb = [x.split(',')[0:5] for x in f.read().strip().splitlines() if len(x)] nl = len(lb) if nl: From 78c06edaa93a700a38a2e66aba1efbb71018e6f3 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 28 Jun 2022 18:08:03 +0800 Subject: [PATCH 033/204] no merge, support for ymir1.2.0 --- det-yolov5-tmi/infer-template.yaml | 8 +++--- det-yolov5-tmi/mining-template.yaml | 8 +++--- det-yolov5-tmi/start.py | 9 ++++-- det-yolov5-tmi/train.py | 6 ++-- det-yolov5-tmi/training-template.yaml | 10 ++++--- det-yolov5-tmi/utils/metrics.py | 2 +- det-yolov5-tmi/utils/ymir_yolov5.py | 40 +++++++++++++-------------- 7 files changed, 46 insertions(+), 37 deletions(-) diff --git a/det-yolov5-tmi/infer-template.yaml b/det-yolov5-tmi/infer-template.yaml index 7574512..89dcc96 100644 --- a/det-yolov5-tmi/infer-template.yaml +++ b/det-yolov5-tmi/infer-template.yaml @@ -2,10 +2,10 @@ # after build image, it should at /img-man/infer-template.yaml # key: gpu_id, task_id, model_params_path, class_names should be preserved -gpu_id: '0' -task_id: 'default-infer-task' -model_params_path: [] -class_names: [] +# gpu_id: '0' +# task_id: 'default-infer-task' +# model_params_path: [] +# class_names: [] img_size: 640 conf_thres: 0.25 diff --git a/det-yolov5-tmi/mining-template.yaml b/det-yolov5-tmi/mining-template.yaml index 5f2a3b2..20106dc 100644 --- a/det-yolov5-tmi/mining-template.yaml +++ b/det-yolov5-tmi/mining-template.yaml @@ -2,10 +2,10 @@ # after build image, it should at /img-man/mining-template.yaml # key: gpu_id, task_id, model_params_path, class_names should be preserved -gpu_id: '0' -task_id: 'default-training-task' -model_params_path: [] -class_names: [] +# gpu_id: '0' +# task_id: 'default-training-task' +# model_params_path: [] +# class_names: [] img_size: 640 conf_thres: 0.25 diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py index ba06400..7b687e9 100644 --- a/det-yolov5-tmi/start.py +++ b/det-yolov5-tmi/start.py @@ -50,6 +50,8 @@ def _run_training(cfg: edict) -> None: batch_size = cfg.param.batch_size model = cfg.param.model img_size = cfg.param.img_size + save_period = cfg.param.save_period + args_options = cfg.param.args_options weights = get_weight_file(cfg) if not weights: # download pretrained weight @@ -59,8 +61,11 @@ def _run_training(cfg: edict) -> None: command = f'python3 train.py --epochs {epochs} ' + \ f'--batch-size {batch_size} --data {out_dir}/data.yaml --project /out ' + \ f'--cfg models/{model}.yaml --name models --weights {weights} ' + \ - f'--img-size {img_size} --hyp data/hyps/hyp.scratch-low.yaml ' + \ - '--exist-ok' + f'--img-size {img_size} ' + \ + f'--save-period {save_period}' + if args_options: + command += f" {args_options}" + logging.info(f'start training: {command}') subprocess.run(command.split(), check=True) diff --git a/det-yolov5-tmi/train.py b/det-yolov5-tmi/train.py index 6dd190e..7fcbbce 100644 --- a/det-yolov5-tmi/train.py +++ b/det-yolov5-tmi/train.py @@ -413,12 +413,12 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Save last, best and delete torch.save(ckpt, last) - write_ymir_training_result(ymir_cfg, results, maps, rewrite=False) if best_fitness == fi: torch.save(ckpt, best) - write_ymir_training_result(ymir_cfg, results, maps, rewrite=True) if (epoch > 0) and (opt.save_period > 0) and (epoch % opt.save_period == 0): torch.save(ckpt, w / f'epoch{epoch}.pt') + weight_file = str(w / f'epoch{epoch}.pt') + write_ymir_training_result(ymir_cfg, map50=results[2], epoch=epoch, weight_file=weight_file) del ckpt callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) @@ -465,6 +465,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") torch.cuda.empty_cache() + # save the best and last weight file with other files in models_dir + write_ymir_training_result(ymir_cfg, map50=best_fitness, epoch=epochs, weight_file='') return results diff --git a/det-yolov5-tmi/training-template.yaml b/det-yolov5-tmi/training-template.yaml index 8cacec8..72356b7 100644 --- a/det-yolov5-tmi/training-template.yaml +++ b/det-yolov5-tmi/training-template.yaml @@ -2,13 +2,15 @@ # after build image, it should at /img-man/training-template.yaml # key: gpu_id, task_id, pretrained_model_paths, class_names should be preserved -gpu_id: '0' -task_id: 'default-training-task' -pretrained_model_paths: [] -class_names: [] +# gpu_id: '0' +# task_id: 'default-training-task' +# pretrained_model_paths: [] +# class_names: [] model: 'yolov5s' batch_size: 16 epochs: 300 img_size: 640 opset: 11 +args_options: '--exist-ok' +save_period: 10 diff --git a/det-yolov5-tmi/utils/metrics.py b/det-yolov5-tmi/utils/metrics.py index 857fa5d..48db16f 100644 --- a/det-yolov5-tmi/utils/metrics.py +++ b/det-yolov5-tmi/utils/metrics.py @@ -14,7 +14,7 @@ def fitness(x): # Model fitness as a weighted combination of metrics - w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] + w = [0.0, 0.0, 1.0, 0.0] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] return (x[:, :4] * w).sum(1) diff --git a/det-yolov5-tmi/utils/ymir_yolov5.py b/det-yolov5-tmi/utils/ymir_yolov5.py index 68b5854..ffcb39d 100644 --- a/det-yolov5-tmi/utils/ymir_yolov5.py +++ b/det-yolov5-tmi/utils/ymir_yolov5.py @@ -1,6 +1,7 @@ """ utils function for ymir and yolov5 """ +import glob import os.path as osp import shutil from enum import IntEnum @@ -62,6 +63,7 @@ def get_merged_config() -> edict: merged_cfg.ymir = env.get_current_env() return merged_cfg + def get_weight_file(cfg: edict) -> str: """ return the weight file path by priority @@ -202,31 +204,29 @@ def convert_ymir_to_yolov5(cfg: edict) -> None: fw.write(yaml.safe_dump(data)) -def write_ymir_training_result(cfg: edict, results: Tuple, maps: NDArray, rewrite=False) -> int: +def write_ymir_training_result(cfg: edict, + map50: float, + epoch: int, + weight_file: str) -> int: """ cfg: ymir config results: (mp, mr, map50, map, loss) maps: map@0.5:0.95 for all classes - rewrite: set true to ensure write the best result + epoch: stage + weight_file: saved weight files, empty weight_file will save all files """ - if not rewrite: - training_result_file = cfg.ymir.output.training_result_file - if osp.exists(training_result_file): - return 0 - model = cfg.param.model - class_names = cfg.param.class_names - mp = results[0] # mean of precision - mr = results[1] # mean of recall - map50 = results[2] # mean of ap@0.5 - map = results[3] # mean of ap@0.5:0.95 - # use `rw.write_training_result` to save training result - rw.write_training_result(model_names=[f'{model}.yaml', 'best.pt', 'last.pt', 'best.onnx'], - mAP=float(map), - mAP50=float(map50), - precision=float(mp), - recall=float(mr), - classAPs={class_name: v - for class_name, v in zip(class_names, maps.tolist())}) + if weight_file: + rw.write_model_stage(stage_name=f"{model}_{epoch}", + files=[weight_file], + mAP=float(map50)) + else: + # save other files with + files = [osp.basename(f) for f in glob.glob(osp.join(cfg.ymir.output.models_dir, '*')) + if not f.endswith('.pt')] + ['last.pt', 'best.pt'] + + rw.write_model_stage(stage_name=f"{model}_last_and_best", + files=files, + mAP=float(map50)) return 0 From 8a01ee0c7e57c34bdfbb51f42e70ef3e83df0e59 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 30 Jun 2022 18:03:50 +0800 Subject: [PATCH 034/204] fix spell error --- det-yolov5-tmi/utils/ymir_yolov5.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/det-yolov5-tmi/utils/ymir_yolov5.py b/det-yolov5-tmi/utils/ymir_yolov5.py index ffcb39d..db6e183 100644 --- a/det-yolov5-tmi/utils/ymir_yolov5.py +++ b/det-yolov5-tmi/utils/ymir_yolov5.py @@ -70,7 +70,7 @@ def get_weight_file(cfg: edict) -> str: find weight file in cfg.param.model_params_path or cfg.param.model_params_path """ if cfg.ymir.run_training: - model_params_path = cfg.param.pretrained_model_paths + model_params_path = cfg.param.get('pretrained_model_params',[]) else: model_params_path = cfg.param.model_params_path From 9b825acaa928a3929d47b457faecda0f8635a0b8 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 30 Jun 2022 18:05:36 +0800 Subject: [PATCH 035/204] fix spell error --- det-yolov5-tmi/training-template.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/det-yolov5-tmi/training-template.yaml b/det-yolov5-tmi/training-template.yaml index 72356b7..c6d0ee4 100644 --- a/det-yolov5-tmi/training-template.yaml +++ b/det-yolov5-tmi/training-template.yaml @@ -1,10 +1,10 @@ # training template for your executor app # after build image, it should at /img-man/training-template.yaml -# key: gpu_id, task_id, pretrained_model_paths, class_names should be preserved +# key: gpu_id, task_id, pretrained_model_params, class_names should be preserved # gpu_id: '0' # task_id: 'default-training-task' -# pretrained_model_paths: [] +# pretrained_model_params: [] # class_names: [] model: 'yolov5s' From 22f3e4587fe7e7f675ba061f6dfb8c754312bb97 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 30 Jun 2022 18:56:32 +0800 Subject: [PATCH 036/204] fix abs_path file not found error --- det-yolov5-tmi/utils/ymir_yolov5.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/det-yolov5-tmi/utils/ymir_yolov5.py b/det-yolov5-tmi/utils/ymir_yolov5.py index db6e183..492822f 100644 --- a/det-yolov5-tmi/utils/ymir_yolov5.py +++ b/det-yolov5-tmi/utils/ymir_yolov5.py @@ -219,7 +219,7 @@ def write_ymir_training_result(cfg: edict, # use `rw.write_training_result` to save training result if weight_file: rw.write_model_stage(stage_name=f"{model}_{epoch}", - files=[weight_file], + files=[osp.basename(weight_file)], mAP=float(map50)) else: # save other files with From 9ebc439c9531fb79cdab48561020ddb89fe9a0dd Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 4 Jul 2022 11:42:35 +0800 Subject: [PATCH 037/204] infer && mining --- det-yolov5-tmi/start.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py index 7b687e9..12cdcc2 100644 --- a/det-yolov5-tmi/start.py +++ b/det-yolov5-tmi/start.py @@ -22,12 +22,10 @@ def start() -> int: if cfg.ymir.run_training: _run_training(cfg) - elif cfg.ymir.run_mining: + if cfg.ymir.run_mining: _run_mining(cfg) - elif cfg.ymir.run_infer: + if cfg.ymir.run_infer: _run_infer(cfg) - else: - logging.warning('no task running') return 0 From 94cefe7cf41736ce180ce7b04120b89fc421bf27 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 4 Jul 2022 16:51:41 +0800 Subject: [PATCH 038/204] training or mining && infer --- det-yolov5-tmi/start.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py index 12cdcc2..fba6632 100644 --- a/det-yolov5-tmi/start.py +++ b/det-yolov5-tmi/start.py @@ -22,10 +22,11 @@ def start() -> int: if cfg.ymir.run_training: _run_training(cfg) - if cfg.ymir.run_mining: - _run_mining(cfg) - if cfg.ymir.run_infer: - _run_infer(cfg) + else: + if cfg.ymir.run_mining: + _run_mining(cfg) + if cfg.ymir.run_infer: + _run_infer(cfg) return 0 From 035fb311aea1ae93570f0d20a8d9df643146f2e6 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 4 Jul 2022 17:21:34 +0800 Subject: [PATCH 039/204] write file from /in to /out --- det-yolov4-training/cfg/coco.data | 4 ++-- det-yolov4-training/convert_label_ark2txt.py | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/det-yolov4-training/cfg/coco.data b/det-yolov4-training/cfg/coco.data index 95f0887..78903a4 100755 --- a/det-yolov4-training/cfg/coco.data +++ b/det-yolov4-training/cfg/coco.data @@ -1,5 +1,5 @@ classes= 1 -train = /in/train-index-assets.tsv -valid = /in/val-index-assets.tsv +train = /out/train-index-assets.tsv +valid = /out/val-index-assets.tsv names = /out/coco.names backup = /out/models diff --git a/det-yolov4-training/convert_label_ark2txt.py b/det-yolov4-training/convert_label_ark2txt.py index 0304ba9..1043b53 100755 --- a/det-yolov4-training/convert_label_ark2txt.py +++ b/det-yolov4-training/convert_label_ark2txt.py @@ -81,7 +81,7 @@ def _create_image_index_file(src_index_path: str, dst_index_path: str) -> None: if __name__ == "__main__": - _create_image_index_file(src_index_path='/in/train-index.tsv', dst_index_path='/in/train-index-assets.tsv') - _create_image_index_file(src_index_path='/in/val-index.tsv', dst_index_path='/in/val-index-assets.tsv') - _convert_annotations(index_file_path='/in/train-index.tsv', dst_annotations_dir='/in/tmp_labels') - _convert_annotations(index_file_path='/in/val-index.tsv', dst_annotations_dir='/in/tmp_labels') + _create_image_index_file(src_index_path='/in/train-index.tsv', dst_index_path='/out/train-index-assets.tsv') + _create_image_index_file(src_index_path='/in/val-index.tsv', dst_index_path='/out/val-index-assets.tsv') + _convert_annotations(index_file_path='/in/train-index.tsv', dst_annotations_dir='/out/tmp_labels') + _convert_annotations(index_file_path='/in/val-index.tsv', dst_annotations_dir='/out/tmp_labels') From dba2d4c6b5680d2748cc7cfd566a55d1e8d1d2d9 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 4 Jul 2022 19:21:07 +0800 Subject: [PATCH 040/204] update docker file and c --- det-yolov4-training/Dockerfile | 2 +- det-yolov4-training/src/utils.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/det-yolov4-training/Dockerfile b/det-yolov4-training/Dockerfile index 1d8ce14..6e6c4c9 100644 --- a/det-yolov4-training/Dockerfile +++ b/det-yolov4-training/Dockerfile @@ -1,7 +1,7 @@ FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04 ARG PIP_SOURCE=https://pypi.mirrors.ustc.edu.cn/simple WORKDIR /darknet -RUN apt-get update +RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC && apt-get update RUN apt install -y software-properties-common wget RUN add-apt-repository ppa:deadsnakes/ppa RUN apt-get update diff --git a/det-yolov4-training/src/utils.c b/det-yolov4-training/src/utils.c index c7c90aa..10b12cf 100755 --- a/det-yolov4-training/src/utils.c +++ b/det-yolov4-training/src/utils.c @@ -271,7 +271,7 @@ void replace_image_to_label(const char* input_path, char* output_path) find_replace(output_path, "/images/train2014/", "/labels/train2014/", output_path); // COCO find_replace(output_path, "/images/val2014/", "/labels/val2014/", output_path); // COCO - find_replace(output_path, "/in/assets/", "/in/tmp_labels/", output_path); + find_replace(output_path, "/in/assets/", "/out/tmp_labels/", output_path); find_replace(output_path, "\\JPEGImages\\", "\\labels\\", output_path); // PascalVOC //find_replace(output_path, "/images/", "/labels/", output_path); // COCO From 7a5dd796b7597a9ee1be556a4455b79becbb9344 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 5 Jul 2022 19:31:22 +0800 Subject: [PATCH 041/204] update readme --- README.MD | 68 +++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 53 insertions(+), 15 deletions(-) diff --git a/README.MD b/README.MD index 81e0077..ac81320 100644 --- a/README.MD +++ b/README.MD @@ -14,7 +14,7 @@ - yolov5-FAQ - - 权重下载出错:提前将权重下载好并复制到镜像中 + - 权重下载出错:提前将权重下载好并复制到镜像中或导入预训练模型 ## live-code-executor @@ -31,6 +31,12 @@ - [ymir-executor 制作指南](https://github.com/IndustryEssentials/ymir/blob/dev/docs/ymir-dataset-zh-CN.md) +## 如何导入预训练模型 + +- [如何导入外部模型](https://github.com/IndustryEssentials/ymir/blob/dev/docs/import-extra-models.md) + + - 通过ymir网页端的 `模型管理/模型列表/导入模型` 同样可以导入模型 + --- # FAQ @@ -39,29 +45,61 @@ - 采用国内源,如在docker file 中添加如下命令 - ``` - RUN sed -i 's/archive.ubuntu.com/mirrors.tuna.tsinghua.edu.cn/g' /etc/apt/sources.list + ``` + RUN sed -i 's/archive.ubuntu.com/mirrors.tuna.tsinghua.edu.cn/g' /etc/apt/sources.list - RUN pip config set global.index-url https://mirrors.aliyun.com/pypi/simple - ``` + RUN pip config set global.index-url https://mirrors.aliyun.com/pypi/simple + ``` - docker build 的时候出错,找不到相应docker file或`COPY/ADD`时出错 - 回到项目根目录或docker file对应根目录,确保docker file 中`COPY/ADD`的文件与文件夹能够访问,以yolov5为例. - ``` - cd ymir-executor + ``` + cd ymir-executor - docker build -t ymir-executor/yolov5 . -f det-yolov5-tmi/cuda111.dockerfile - ``` + docker build -t ymir-executor/yolov5 . -f det-yolov5-tmi/cuda111.dockerfile + ``` - 镜像运行完`/in`与`/out`目录中的文件被清理 - - ymir系统为节省空间,会在任务结束后删除其中不必要的文件,如果不想删除,可以在部署ymir时,修改文件`ymir/command/mir/tools/command_run_in_out.py`,注释其中的`_cleanup(work_dir=work_dir)`。注意需要重新构建后端镜像 + - ymir系统为节省空间,会在任务`成功结束`后删除其中不必要的文件,如果不想删除,可以在部署ymir时,修改文件`ymir/command/mir/tools/command_run_in_out.py`,注释其中的`_cleanup(work_dir=work_dir)`。注意需要重新构建后端镜像 + + ``` + cd ymir + docker build -t industryessentials/ymir-backend --build-arg PIP_SOURCE=https://pypi.mirrors.ustc.edu.cn/simple --build-arg SERVER_MODE='dev' -f Dockerfile.backend . + + docker-compose down -v && docker-compose up -d + ``` + +- 训练镜像如何调试 + + - 先通过失败任务的tensorboard链接拿到任务id,如`t000000100000175245d1656933456` + + - 进入ymir部署目录 `ymir-workplace/sandbox/work_dir/TaskTypeTraining/t000000100000175245d1656933456/sub_task/t000000100000175245d1656933456`, `ls` 可以看到以下结果 + + ``` + # ls + in out task_config.yaml + ``` + + - 挂载目录并运行镜像``,注意需要将ymir部署目录挂载到镜像中 + + ``` + docker run -it --gpus all -v $PWD/in:/in -v $PWD/out:/out -v : bash + + # 以/home/ymir/ymir-workplace作为ymir部署目录为例 + docker run -it --gpus all -v $PWD/in:/in -v $PWD/out:/out -v /home/ymir/ymir-workplace:/home/ymir/ymir-workplace bash + ``` + + - 推理与挖掘镜像调试同理,注意对应目录均为`ymir-workplace/sandbox/work_dir/TaskTypeMining` + +- 模型精度/速度如何权衡与提升 + + - 模型精度与数据集大小、数据集质量、学习率、batch size、 迭代次数、模型结构、数据增强方式、损失函数等相关,在此不做展开,详情参考: + + - [Object Detection in 20 Years: A Survey](https://arxiv.org/abs/1905.05055) - ``` - cd ymir - docker build -t industryessentials/ymir-backend --build-arg PIP_SOURCE=https://pypi.mirrors.ustc.edu.cn/simple --build-arg SERVER_MODE='dev' -f Dockerfile.backend . + - [Paper with Code: Object Detection](https://paperswithcode.com/task/object-detection) - docker-compose down -v && docker-compose up -d - ``` + - [awesome object detection](https://github.com/amusi/awesome-object-detection) From 41fef8c971b8721e57f54be1c01419dcd4a2117e Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 5 Jul 2022 19:36:08 +0800 Subject: [PATCH 042/204] Revert "fix yolov5 nptyping problem" This reverts commit bbf824b067f4c63e1f4ed8ad888d7f368c4dde9f. --- det-yolov5-tmi/requirements.txt | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/det-yolov5-tmi/requirements.txt b/det-yolov5-tmi/requirements.txt index 70af2f8..fa1d389 100755 --- a/det-yolov5-tmi/requirements.txt +++ b/det-yolov5-tmi/requirements.txt @@ -2,7 +2,7 @@ # Base ---------------------------------------- matplotlib>=3.2.2 -numpy>=1.20.0 +numpy>=1.18.5 opencv-python>=4.1.2 Pillow>=7.1.2 PyYAML>=5.3.1 @@ -39,5 +39,4 @@ thop # FLOPs computation # Ymir --------------------------------------- imagesize>=1.3.0 # fast obtain image size without load image nptyping>=2.1.1 # numpy type hint -typing-extensions>=4.2.0 -easydict>=1.9 +easydict>=1.9 \ No newline at end of file From 1a4af5e29b2cb341cdaabc5f26be10e9ecb8aa4c Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 5 Jul 2022 19:42:39 +0800 Subject: [PATCH 043/204] revert and redo --- det-yolov5-tmi/requirements.txt | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/det-yolov5-tmi/requirements.txt b/det-yolov5-tmi/requirements.txt index fa1d389..70af2f8 100755 --- a/det-yolov5-tmi/requirements.txt +++ b/det-yolov5-tmi/requirements.txt @@ -2,7 +2,7 @@ # Base ---------------------------------------- matplotlib>=3.2.2 -numpy>=1.18.5 +numpy>=1.20.0 opencv-python>=4.1.2 Pillow>=7.1.2 PyYAML>=5.3.1 @@ -39,4 +39,5 @@ thop # FLOPs computation # Ymir --------------------------------------- imagesize>=1.3.0 # fast obtain image size without load image nptyping>=2.1.1 # numpy type hint -easydict>=1.9 \ No newline at end of file +typing-extensions>=4.2.0 +easydict>=1.9 From 3b3e97886dd0993511fc0e46e7ef6d9ac0025ace Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 6 Jul 2022 11:48:52 +0800 Subject: [PATCH 044/204] update readme --- README.MD | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.MD b/README.MD index ac81320..bcba683 100644 --- a/README.MD +++ b/README.MD @@ -14,7 +14,7 @@ - yolov5-FAQ - - 权重下载出错:提前将权重下载好并复制到镜像中或导入预训练模型 + - 镜像训练时权重下载出错或慢:提前将权重下载好并复制到镜像`/app`目录下或通过ymir导入预训练模型,在训练时进行加载。 ## live-code-executor From 7b201d13ee4a2b66d1d2e2a36ca9cbface9ff305 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 6 Jul 2022 15:15:14 +0800 Subject: [PATCH 045/204] add cuda112 dockerfile for yolov4 --- det-yolov4-mining/cuda112.dockerfile | 15 +++++++++++++++ det-yolov4-training/Dockerfile | 2 +- det-yolov4-training/cuda112.dockerfile | 23 +++++++++++++++++++++++ 3 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 det-yolov4-mining/cuda112.dockerfile create mode 100644 det-yolov4-training/cuda112.dockerfile diff --git a/det-yolov4-mining/cuda112.dockerfile b/det-yolov4-mining/cuda112.dockerfile new file mode 100644 index 0000000..871b00f --- /dev/null +++ b/det-yolov4-mining/cuda112.dockerfile @@ -0,0 +1,15 @@ +FROM industryessentials/ymir-executor:cuda112-yolov4-training + +RUN apt-get update && apt-get install -y --no-install-recommends libsm6 libxext6 libfontconfig1 libxrender1 libgl1-mesa-glx \ + && apt-get clean && rm -rf /var/lib/apt/lists/* + +RUN pip3 install --upgrade pip setuptools wheel && pip3 install opencv-python pyyaml scipy tqdm && rm -rf /root/.cache/pip3 + +COPY . /app +WORKDIR /app +RUN cp ./start.sh /usr/bin/start.sh && \ + mkdir -p /img-man && \ + cp ./mining-template.yaml /img-man/mining-template.yaml && \ + cp ./infer-template.yaml /img-man/infer-template.yaml && \ + cp ./README.md /img-man/readme.md +CMD sh /usr/bin/start.sh diff --git a/det-yolov4-training/Dockerfile b/det-yolov4-training/Dockerfile index 6e6c4c9..61ce1f6 100644 --- a/det-yolov4-training/Dockerfile +++ b/det-yolov4-training/Dockerfile @@ -1,6 +1,7 @@ FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04 ARG PIP_SOURCE=https://pypi.mirrors.ustc.edu.cn/simple WORKDIR /darknet +RUN sed -i 's#http://archive.ubuntu.com#https://mirrors.ustc.edu.cn#g' /etc/apt/sources.list RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC && apt-get update RUN apt install -y software-properties-common wget RUN add-apt-repository ppa:deadsnakes/ppa @@ -12,7 +13,6 @@ RUN rm /usr/bin/python3 RUN ln -s /usr/bin/python3.7 /usr/bin/python3 RUN python3 get-pip.py RUN pip3 install -i ${PIP_SOURCE} mxnet-cu101==1.5.1 numpy opencv-python pyyaml watchdog tensorboardX six -RUN echo '\ndeb https://mirrors.ustc.edu.cn/ubuntu/ bionic main restricted universe multiverse\ndeb https://mirrors.ustc.edu.cn/ubuntu/ bionic-updates main restricted universe multiverse\ndeb https://mirrors.ustc.edu.cn/ubuntu/ bionic-backports main restricted universe multiverse\ndeb https://mirrors.ustc.edu.cn/ubuntu/ bionic-security main restricted universe multiverse\n' >> /etc/apt/sources.list ENV DEBIAN_FRONTEND noninteractive RUN apt-get update && apt-get install -y libopencv-dev COPY . /darknet diff --git a/det-yolov4-training/cuda112.dockerfile b/det-yolov4-training/cuda112.dockerfile new file mode 100644 index 0000000..3e6884b --- /dev/null +++ b/det-yolov4-training/cuda112.dockerfile @@ -0,0 +1,23 @@ +FROM nvidia/cuda:11.2.1-cudnn8-devel-ubuntu18.04 +ARG PIP_SOURCE=https://pypi.mirrors.ustc.edu.cn/simple +WORKDIR /darknet +RUN sed -i 's#http://archive.ubuntu.com#https://mirrors.ustc.edu.cn#g' /etc/apt/sources.list +RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC && apt-get update +RUN apt install -y software-properties-common wget +RUN add-apt-repository ppa:deadsnakes/ppa +RUN apt-get update +RUN apt install -y python3.7 python3-distutils +RUN wget https://bootstrap.pypa.io/get-pip.py +RUN wget https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v3_optimal/yolov4.conv.137 +RUN rm /usr/bin/python3 +RUN ln -s /usr/bin/python3.7 /usr/bin/python3 +RUN python3 get-pip.py +RUN pip3 install -i ${PIP_SOURCE} mxnet-cu112==1.9.1 numpy opencv-python pyyaml watchdog tensorboardX six + +ENV DEBIAN_FRONTEND noninteractive +RUN apt-get update && apt-get install -y libopencv-dev +COPY . /darknet +RUN cp /darknet/make_train_test_darknet.sh /usr/bin/start.sh +RUN mkdir /img-man && cp /darknet/training-template.yaml /img-man/training-template.yaml +RUN make -j +CMD bash /usr/bin/start.sh From 4c99d813e360cec9d16cc8a4448ae2b8e9c305a9 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 6 Jul 2022 16:45:29 +0800 Subject: [PATCH 046/204] update mmdet for ymir1.2.0 --- det-mmdetection-tmi/docker/Dockerfile | 25 -------- det-mmdetection-tmi/docker/Dockerfile.cuda102 | 46 ++++++++++++++ det-mmdetection-tmi/docker/Dockerfile.cuda111 | 4 +- .../mmdet/core/evaluation/eval_hooks.py | 56 ++++++++++++----- det-mmdetection-tmi/mmdet/datasets/coco.py | 17 +++-- det-mmdetection-tmi/mmdet/utils/util_ymir.py | 63 ++++++++++++++----- det-mmdetection-tmi/start.py | 31 ++++----- det-mmdetection-tmi/training-template.yaml | 1 + det-mmdetection-tmi/ymir_train.py | 11 ++-- 9 files changed, 162 insertions(+), 92 deletions(-) delete mode 100644 det-mmdetection-tmi/docker/Dockerfile create mode 100644 det-mmdetection-tmi/docker/Dockerfile.cuda102 diff --git a/det-mmdetection-tmi/docker/Dockerfile b/det-mmdetection-tmi/docker/Dockerfile deleted file mode 100644 index 5ee7a37..0000000 --- a/det-mmdetection-tmi/docker/Dockerfile +++ /dev/null @@ -1,25 +0,0 @@ -ARG PYTORCH="1.6.0" -ARG CUDA="10.1" -ARG CUDNN="7" - -FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel - -ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" -ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" -ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" - -RUN apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -# Install MMCV -RUN pip install --no-cache-dir --upgrade pip wheel setuptools -RUN pip install --no-cache-dir mmcv-full==1.3.17 -f https://download.openmmlab.com/mmcv/dist/cu101/torch1.6.0/index.html - -# Install MMDetection -RUN conda clean --all -RUN git clone https://github.com/open-mmlab/mmdetection.git /mmdetection -WORKDIR /mmdetection -ENV FORCE_CUDA="1" -RUN pip install --no-cache-dir -r requirements/build.txt -RUN pip install --no-cache-dir -e . diff --git a/det-mmdetection-tmi/docker/Dockerfile.cuda102 b/det-mmdetection-tmi/docker/Dockerfile.cuda102 new file mode 100644 index 0000000..6110bf6 --- /dev/null +++ b/det-mmdetection-tmi/docker/Dockerfile.cuda102 @@ -0,0 +1,46 @@ +ARG PYTORCH="1.8.1" +ARG CUDA="10.2" +ARG CUDNN="7" + +FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel + +# mmcv>=1.3.17, <=1.5.0 +ARG MMCV="1.4.3" +ARG SERVER_MODE=prod + +ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" +ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" +ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" +ENV LANG=C.UTF-8 + +# Set timezone +RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \ + && echo 'Asia/Shanghai' >/etc/timezone + +RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC \ + && apt-get update \ + && apt-get install -y build-essential ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# Install ymir-exc sdk and MMCV (no cu102/torch1.8.1, use torch1.8.0 instead) +RUN pip install --no-cache-dir --upgrade pip wheel setuptools && \ + if [ "${SERVER_MODE}" = "dev" ]; then \ + pip install --force-reinstall -U "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ + else \ + pip install ymir-exc; \ + fi \ + && pip install --no-cache-dir mmcv-full==${MMCV} -f https://download.openmmlab.com/mmcv/dist/cu102/torch1.8.0/index.html \ + && conda clean --all + +# Install det-mmdetection-tmi +COPY . /app/ +WORKDIR /app +ENV FORCE_CUDA="1" +RUN pip install --no-cache-dir -r requirements/runtime.txt \ + && pip install --no-cache-dir -e . \ + && mkdir /img-man \ + && mv *-template.yaml /img-man \ + && echo "cd /app && python3 start.py" > /usr/bin/start.sh + +CMD bash /usr/bin/start.sh diff --git a/det-mmdetection-tmi/docker/Dockerfile.cuda111 b/det-mmdetection-tmi/docker/Dockerfile.cuda111 index 42a9004..f969e4f 100644 --- a/det-mmdetection-tmi/docker/Dockerfile.cuda111 +++ b/det-mmdetection-tmi/docker/Dockerfile.cuda111 @@ -32,13 +32,13 @@ RUN pip install --no-cache-dir --upgrade pip wheel setuptools && \ && conda clean --all # Install det-mmdetection-tmi -ADD det-mmdetection-tmi /app +COPY . /app/ WORKDIR /app ENV FORCE_CUDA="1" RUN pip install --no-cache-dir -r requirements/runtime.txt \ && pip install --no-cache-dir -e . \ && mkdir /img-man \ && mv *-template.yaml /img-man \ - && echo "python3 start.py" > /usr/bin/start.sh + && echo "cd /app && python3 start.py" > /usr/bin/start.sh CMD bash /usr/bin/start.sh diff --git a/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py b/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py index 89bc7bb..6e63d43 100644 --- a/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py +++ b/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py @@ -49,15 +49,27 @@ def before_train_epoch(self, runner): def after_train_epoch(self, runner): """Report the training process for ymir""" - percent = get_ymir_process( - stage=YmirStage.TASK, p=runner.epoch/runner.max_epochs) - monitor.write_monitor_logger(percent=percent) + if self.by_epoch: + monitor_interval = max(1, runner.max_epochs//1000) + if runner.epoch % monitor_interval == 0: + percent = get_ymir_process( + stage=YmirStage.TASK, p=runner.epoch/runner.max_epochs) + monitor.write_monitor_logger(percent=percent) super().after_train_epoch(runner) def before_train_iter(self, runner): self._decide_interval(runner) super().before_train_iter(runner) + def after_train_iter(self, runner): + if not self.by_epoch: + monitor_interval = max(1, runner.max_iters//1000) + if runner.iter % monitor_interval == 0: + percent = get_ymir_process( + stage=YmirStage.TASK, p=runner.iter/runner.max_iters) + monitor.write_monitor_logger(percent=percent) + super().after_train_iter(runner) + def _do_evaluate(self, runner): """perform evaluation and save ckpt.""" if not self._should_evaluate(runner): @@ -67,14 +79,15 @@ def _do_evaluate(self, runner): results = single_gpu_test(runner.model, self.dataloader, show=False) runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) key_score = self.evaluate(runner, results) + update_training_result_file(last=False, key_score=key_score) # the key_score may be `None` so it needs to skip the action to save # the best checkpoint if self.save_best and key_score: self._save_ckpt(runner, key_score) - best_score = runner.meta['hook_msgs'].get( - 'best_score', self.init_value_map[self.rule]) - if self.compare_func(key_score, best_score): - update_training_result_file(key_score) + # best_score = runner.meta['hook_msgs'].get( + # 'best_score', self.init_value_map[self.rule]) + # if self.compare_func(key_score, best_score): + # update_training_result_file(key_score) # Note: Considering that MMCV's EvalHook updated its interface in V1.3.16, @@ -104,15 +117,27 @@ def before_train_epoch(self, runner): def after_train_epoch(self, runner): """Report the training process for ymir""" - percent = get_ymir_process( - stage=YmirStage.TASK, p=runner.epoch/runner.max_epochs) - monitor.write_monitor_logger(percent=percent) + if self.by_epoch and runner.rank == 0: + monitor_interval = max(1, runner.max_epochs//1000) + if runner.epoch % monitor_interval == 0: + percent = get_ymir_process( + stage=YmirStage.TASK, p=runner.epoch/runner.max_epochs) + monitor.write_monitor_logger(percent=percent) super().after_train_epoch(runner) def before_train_iter(self, runner): self._decide_interval(runner) super().before_train_iter(runner) + def after_train_iter(self, runner): + if not self.by_epoch and runner.rank == 0: + monitor_interval = max(1, runner.max_iters//1000) + if runner.iter % monitor_interval == 0: + percent = get_ymir_process( + stage=YmirStage.TASK, p=runner.iter/runner.max_iters) + monitor.write_monitor_logger(percent=percent) + super().after_train_iter(runner) + def _do_evaluate(self, runner): """perform evaluation and save ckpt.""" # Synchronization of BatchNorm's buffer (running_mean @@ -145,13 +170,14 @@ def _do_evaluate(self, runner): print('\n') runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) key_score = self.evaluate(runner, results) - + update_training_result_file(last=False, key_score=key_score) # the key_score may be `None` so it needs to skip # the action to save the best checkpoint if self.save_best and key_score: self._save_ckpt(runner, key_score) - best_score = runner.meta['hook_msgs'].get( - 'best_score', self.init_value_map[self.rule]) - if self.compare_func(key_score, best_score): - update_training_result_file(key_score) + # best_score = runner.meta['hook_msgs'].get( + # 'best_score', self.init_value_map[self.rule]) + # if self.compare_func(key_score, best_score): + # update_training_result_file(key_score) + diff --git a/det-mmdetection-tmi/mmdet/datasets/coco.py b/det-mmdetection-tmi/mmdet/datasets/coco.py index cde2de7..ffe83d4 100644 --- a/det-mmdetection-tmi/mmdet/datasets/coco.py +++ b/det-mmdetection-tmi/mmdet/datasets/coco.py @@ -562,15 +562,6 @@ def evaluate(self, results_per_category.append( (f'{nm["name"]}', f'{float(ap):0.3f}')) - - COCO_EVAL_TMP_FILE = os.getenv('COCO_EVAL_TMP_FILE') - if COCO_EVAL_TMP_FILE is not None: - mmcv.dump({name:value for name,value in results_per_category}, COCO_EVAL_TMP_FILE, file_format='json') - else: - raise Exception('please set valid environment variable COCO_EVAL_TMP_FILE to write result into json file') - - print_log(f'\n write eval result to {COCO_EVAL_TMP_FILE}', logger=logger) - num_columns = min(6, len(results_per_category) * 2) results_flatten = list( itertools.chain(*results_per_category)) @@ -601,4 +592,12 @@ def evaluate(self, f'{ap[4]:.3f} {ap[5]:.3f}') if tmp_dir is not None: tmp_dir.cleanup() + + COCO_EVAL_TMP_FILE = os.getenv('COCO_EVAL_TMP_FILE') + if COCO_EVAL_TMP_FILE is not None: + mmcv.dump(eval_results, COCO_EVAL_TMP_FILE, file_format='json') + else: + raise Exception('please set valid environment variable COCO_EVAL_TMP_FILE to write result into json file') + + print_log(f'\n write eval result to {COCO_EVAL_TMP_FILE}', logger=logger) return eval_results diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index 96aa821..bf2d31c 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -3,6 +3,7 @@ """ import glob import logging +import yaml import os import os.path as osp from enum import IntEnum @@ -85,11 +86,7 @@ def modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: ann_prefix=ymir_cfg.ymir.input.annotations_dir, classes=ymir_cfg.param.class_names, data_root=ymir_cfg.ymir.input.root_dir, - filter_empty_gt=False, - samples_per_gpu=samples_per_gpu if split == 'train' else max( - 1, samples_per_gpu//2), - workers_per_gpu=workers_per_gpu if split == 'train' else max( - 1, workers_per_gpu//2) + filter_empty_gt=False ) # modify dataset config for `split` mmdet_dataset_cfg = mmdet_cfg.data.get(split, None) @@ -101,7 +98,7 @@ def modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: x.update(ymir_dataset_cfg) else: src_dataset_type = mmdet_dataset_cfg.type - if src_dataset_type in ['CocoDataset']: + if src_dataset_type in ['CocoDataset', 'YmirDataset']: mmdet_dataset_cfg.update(ymir_dataset_cfg) elif src_dataset_type in ['MultiImageMixDataset', 'RepeatDataset']: mmdet_dataset_cfg.dataset.update(ymir_dataset_cfg) @@ -119,13 +116,17 @@ def modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: mmdet_cfg.checkpoint_config['out_dir'] = ymir_cfg.ymir.output.models_dir tensorboard_logger = dict(type='TensorboardLoggerHook', log_dir=ymir_cfg.ymir.output.tensorboard_dir) - mmdet_cfg.log_config['hooks'].append(tensorboard_logger) + if len(mmdet_cfg.log_config['hooks']) <= 1: + mmdet_cfg.log_config['hooks'].append(tensorboard_logger) + else: + mmdet_cfg.log_config['hooks'][1].update(tensorboard_logger) # modify evaluation and interval - interval = max(1, mmdet_cfg.runner.max_epoch//30) + interval = max(1, mmdet_cfg.runner.max_epochs//30) mmdet_cfg.evaluation.interval = interval + mmdet_cfg.evaluation.metric = ymir_cfg.param.get('metric', 'bbox') # Whether to evaluating the AP for each class - mmdet_cfg.evaluation.classwise = True + # mmdet_cfg.evaluation.classwise = True return mmdet_cfg @@ -150,21 +151,23 @@ def get_weight_file(cfg: edict) -> str: return max(best_pth_files, key=os.path.getctime) epoch_pth_files = [ - f for f in model_params_path if osp.basename(f).startswith('epoch_')] + f for f in model_params_path if osp.basename(f).startswith(('epoch_', 'iter_'))] if len(epoch_pth_files) > 0: return max(epoch_pth_files, key=os.path.getctime) return "" -def update_training_result_file(key_score): - logging.info(f'key_score is {key_score}') +def update_training_result_file(last=False, key_score=None): + if key_score: + logging.info(f'key_score is {key_score}') COCO_EVAL_TMP_FILE = os.getenv('COCO_EVAL_TMP_FILE') if COCO_EVAL_TMP_FILE is None: raise Exception( 'please set valid environment variable COCO_EVAL_TMP_FILE to write result into json file') - results_per_category = mmcv.load(COCO_EVAL_TMP_FILE) + eval_result = mmcv.load(COCO_EVAL_TMP_FILE) + map = eval_result['bbox_mAP_50'] work_dir = os.getenv('YMIR_MODELS_DIR') if work_dir is None or not osp.isdir(work_dir): @@ -172,7 +175,33 @@ def update_training_result_file(key_score): f'please set valid environment variable YMIR_MODELS_DIR, invalid directory {work_dir}') # assert only one model config file in work_dir - result_files = glob.glob(osp.join(work_dir, '*')) - rw.write_training_result(model_names=[osp.basename(f) for f in result_files], - mAP=key_score, - classAPs=results_per_category) + result_files = [osp.basename(f) for f in glob.glob( + osp.join(work_dir, '*')) if osp.basename(f) != 'result.yaml'] + + if last: + # save all output file + rw.write_model_stage(files=result_files, + mAP=float(map), + stage_name='last') + else: + # save newest weight file in format epoch_xxx.pth or iter_xxx.pth + weight_files = [osp.join(work_dir, f) for f in result_files if f.startswith( + ('iter_', 'epoch_')) and f.endswith('.pth')] + + if len(weight_files) > 0: + newest_weight_file = osp.basename( + max(weight_files, key=os.path.getctime)) + + stage_name = osp.splitext(newest_weight_file)[0] + training_result_file = osp.join(work_dir, 'result.yaml') + if osp.exists(training_result_file): + with open(training_result_file, 'r') as f: + training_result = yaml.safe_load(f) + model_stages = training_result.get('model_stages', {}) + else: + model_stages = {} + + if stage_name not in model_stages: + rw.write_model_stage(files=[newest_weight_file], + mAP=float(map), + stage_name=stage_name) diff --git a/det-mmdetection-tmi/start.py b/det-mmdetection-tmi/start.py index 44babba..9fd6a7c 100644 --- a/det-mmdetection-tmi/start.py +++ b/det-mmdetection-tmi/start.py @@ -16,17 +16,16 @@ from ymir_infer import YmirModel, mmdet_result_to_ymir -def start() -> int: - cfg = get_merged_config() - +def start(cfg: edict) -> int: logging.info(f'merged config: {cfg}') if cfg.ymir.run_training: _run_training(cfg) - elif cfg.ymir.run_mining: - _run_mining(cfg) - elif cfg.ymir.run_infer: - _run_infer(cfg) + elif cfg.ymir.run_mining or cfg.ymir.run_infer: + if cfg.ymir.run_mining: + _run_mining(cfg) + if cfg.ymir.run_infer: + _run_infer(cfg) else: logging.warning('no task running') @@ -44,18 +43,6 @@ def _run_training(cfg: edict) -> None: logging.info(f'start training: {command}') subprocess.run(command.split(), check=True) - work_dir = cfg.ymir.output.models_dir - result_files = glob.glob(os.path.join(work_dir, '*')) - - training_result_file = cfg.ymir.output.training_result_file - with open(training_result_file, 'r') as fp: - best_result = yaml.safe_load(fp) - - # save the last checkpoint - rw.write_training_result(model_names=[os.path.basename(f) for f in result_files], - mAP=best_result['map'], - classAPs=best_result['class_aps']) - # if task done, write 100% percent log monitor.write_monitor_logger(percent=1.0) @@ -95,5 +82,9 @@ def _run_infer(cfg: edict) -> None: datefmt='%Y%m%d-%H:%M:%S', level=logging.INFO) + cfg = get_merged_config() + os.environ.setdefault('YMIR_MODELS_DIR', cfg.ymir.output.models_dir) + os.environ.setdefault('COCO_EVAL_TMP_FILE', os.path.join( + cfg.ymir.output.root_dir, 'eval_tmp.json')) os.environ.setdefault('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', 'python') - sys.exit(start()) + sys.exit(start(cfg)) diff --git a/det-mmdetection-tmi/training-template.yaml b/det-mmdetection-tmi/training-template.yaml index 8be36b9..9b97e21 100644 --- a/det-mmdetection-tmi/training-template.yaml +++ b/det-mmdetection-tmi/training-template.yaml @@ -4,4 +4,5 @@ max_epochs: 300 config_file: 'configs/yolox/yolox_nano_8x8_300e_coco.py' args_options: '' cfg_options: '' +metric: 'bbox_mAP_50' port: 12345 diff --git a/det-mmdetection-tmi/ymir_train.py b/det-mmdetection-tmi/ymir_train.py index a39e64b..e3f3537 100644 --- a/det-mmdetection-tmi/ymir_train.py +++ b/det-mmdetection-tmi/ymir_train.py @@ -8,7 +8,7 @@ from ymir_exc import monitor from mmdet.utils.util_ymir import (YmirStage, get_merged_config, - get_ymir_process) + get_ymir_process, update_training_result_file) def main(cfg: edict) -> int: @@ -35,15 +35,15 @@ def main(cfg: edict) -> int: if num_gpus == 0: # view https://mmdetection.readthedocs.io/en/stable/1_exist_data_model.html#training-on-cpu os.environ.setdefault('CUDA_VISIBLE_DEVICES', "-1") - cmd = f"python tools/train.py {config_file} " + \ + cmd = f"python3 tools/train.py {config_file} " + \ f"--work-dir {work_dir}" elif num_gpus == 1: - cmd = f"python tools/train.py {config_file} " + \ + cmd = f"python3 tools/train.py {config_file} " + \ f"--work-dir {work_dir} --gpu-id {gpu_id}" else: os.environ.setdefault('CUDA_VISIBLE_DEVICES', gpu_id) port = cfg.param.get('port') - os.environ.setdefault('PORT', port) + os.environ.setdefault('PORT', str(port)) cmd = f"./tools/dist_train.sh {config_file} {num_gpus} " + \ f"--work-dir {work_dir}" @@ -55,6 +55,9 @@ def main(cfg: edict) -> int: logging.info(f"training command: {cmd}") subprocess.run(cmd.split(), check=True) + + # save the last checkpoint + update_training_result_file(last=True) logging.info(f"training finished") return 0 From 2cbb95e00a8a5480b9d8ca088be7c00db53cc484 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 7 Jul 2022 10:36:41 +0800 Subject: [PATCH 047/204] update training --- det-mmdetection-tmi/mmdet/utils/util_ymir.py | 5 +++-- det-mmdetection-tmi/training-template.yaml | 2 +- det-mmdetection-tmi/ymir_infer.py | 20 +++++++++++++++++++- det-mmdetection-tmi/ymir_train.py | 2 +- 4 files changed, 24 insertions(+), 5 deletions(-) diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index bf2d31c..d3c6e97 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -136,7 +136,7 @@ def get_weight_file(cfg: edict) -> str: find weight file in cfg.param.model_params_path or cfg.param.model_params_path """ if cfg.ymir.run_training: - model_params_path: List = cfg.param.pretrained_model_paths + model_params_path: List = cfg.param.pretrained_model_params else: model_params_path: List = cfg.param.model_params_path @@ -167,7 +167,8 @@ def update_training_result_file(last=False, key_score=None): 'please set valid environment variable COCO_EVAL_TMP_FILE to write result into json file') eval_result = mmcv.load(COCO_EVAL_TMP_FILE) - map = eval_result['bbox_mAP_50'] + # eval_result may be empty dict {}. + map = eval_result.get('bbox_mAP_50',0) work_dir = os.getenv('YMIR_MODELS_DIR') if work_dir is None or not osp.isdir(work_dir): diff --git a/det-mmdetection-tmi/training-template.yaml b/det-mmdetection-tmi/training-template.yaml index 9b97e21..a56133d 100644 --- a/det-mmdetection-tmi/training-template.yaml +++ b/det-mmdetection-tmi/training-template.yaml @@ -4,5 +4,5 @@ max_epochs: 300 config_file: 'configs/yolox/yolox_nano_8x8_300e_coco.py' args_options: '' cfg_options: '' -metric: 'bbox_mAP_50' +metric: 'bbox' port: 12345 diff --git a/det-mmdetection-tmi/ymir_infer.py b/det-mmdetection-tmi/ymir_infer.py index 47e47ed..7fc8892 100644 --- a/det-mmdetection-tmi/ymir_infer.py +++ b/det-mmdetection-tmi/ymir_infer.py @@ -1,4 +1,5 @@ import argparse +import os.path as osp from typing import Any, List import numpy as np @@ -49,13 +50,27 @@ def mmdet_result_to_ymir(results: List[DETECTION_RESULT], ann_list.append(ann) return ann_list +def get_config_file(cfg): + if cfg.ymir.run_training: + model_params_path: List = cfg.param.pretrained_model_params + else: + model_params_path: List = cfg.param.model_params_path + + model_dir = cfg.ymir.input.models_dir + config_files = [ + osp.join(model_dir, p) for p in model_params_path if osp.exists(osp.join(model_dir, p)) and p.endswith(('.py'))] + + if len(config_files) > 0: + return config_files[0] + else: + return None class YmirModel: def __init__(self, cfg: edict): self.cfg = cfg # Specify the path to model config and checkpoint file - config_file = cfg.param.config_file + config_file = get_config_file(cfg) checkpoint_file = get_weight_file(cfg) cfg_options = parse_option( cfg.param.cfg_options) if cfg.param.cfg_options else None @@ -69,3 +84,6 @@ def __init__(self, cfg: edict): def infer(self, img): return inference_detector(self.model, img) + + def mining(self): + pass diff --git a/det-mmdetection-tmi/ymir_train.py b/det-mmdetection-tmi/ymir_train.py index e3f3537..3a3b3de 100644 --- a/det-mmdetection-tmi/ymir_train.py +++ b/det-mmdetection-tmi/ymir_train.py @@ -44,7 +44,7 @@ def main(cfg: edict) -> int: os.environ.setdefault('CUDA_VISIBLE_DEVICES', gpu_id) port = cfg.param.get('port') os.environ.setdefault('PORT', str(port)) - cmd = f"./tools/dist_train.sh {config_file} {num_gpus} " + \ + cmd = f"bash ./tools/dist_train.sh {config_file} {num_gpus} " + \ f"--work-dir {work_dir}" if args_options: From 9bdc59f41538c7a7fa3790961ecd0a64599514eb Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 7 Jul 2022 11:34:40 +0800 Subject: [PATCH 048/204] fix py3.7 and ymir_exc problem --- det-mmdetection-tmi/docker/Dockerfile.cuda102 | 8 ++++---- det-mmdetection-tmi/docker/Dockerfile.cuda111 | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/det-mmdetection-tmi/docker/Dockerfile.cuda102 b/det-mmdetection-tmi/docker/Dockerfile.cuda102 index 6110bf6..653b03a 100644 --- a/det-mmdetection-tmi/docker/Dockerfile.cuda102 +++ b/det-mmdetection-tmi/docker/Dockerfile.cuda102 @@ -12,7 +12,8 @@ ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" ENV LANG=C.UTF-8 - +ENV FORCE_CUDA="1" +ENV PYTHONPATH=. # Set timezone RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \ && echo 'Asia/Shanghai' >/etc/timezone @@ -24,9 +25,9 @@ RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC \ && rm -rf /var/lib/apt/lists/* # Install ymir-exc sdk and MMCV (no cu102/torch1.8.1, use torch1.8.0 instead) -RUN pip install --no-cache-dir --upgrade pip wheel setuptools && \ +RUN pip install --no-cache-dir --upgrade pip wheel setuptools pydantic tensorboardX pyyaml && \ if [ "${SERVER_MODE}" = "dev" ]; then \ - pip install --force-reinstall -U "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ + pip install --ignore-requires-python --force-reinstall -U "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ else \ pip install ymir-exc; \ fi \ @@ -36,7 +37,6 @@ RUN pip install --no-cache-dir --upgrade pip wheel setuptools && \ # Install det-mmdetection-tmi COPY . /app/ WORKDIR /app -ENV FORCE_CUDA="1" RUN pip install --no-cache-dir -r requirements/runtime.txt \ && pip install --no-cache-dir -e . \ && mkdir /img-man \ diff --git a/det-mmdetection-tmi/docker/Dockerfile.cuda111 b/det-mmdetection-tmi/docker/Dockerfile.cuda111 index f969e4f..d0c24c6 100644 --- a/det-mmdetection-tmi/docker/Dockerfile.cuda111 +++ b/det-mmdetection-tmi/docker/Dockerfile.cuda111 @@ -11,7 +11,8 @@ ARG SERVER_MODE=prod ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" - +ENV FORCE_CUDA="1" +ENV PYTHONPATH=. # Set timezone RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \ && echo 'Asia/Shanghai' >/etc/timezone @@ -34,7 +35,6 @@ RUN pip install --no-cache-dir --upgrade pip wheel setuptools && \ # Install det-mmdetection-tmi COPY . /app/ WORKDIR /app -ENV FORCE_CUDA="1" RUN pip install --no-cache-dir -r requirements/runtime.txt \ && pip install --no-cache-dir -e . \ && mkdir /img-man \ From 5d14372d3697609928df560715472499edc26090 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 7 Jul 2022 15:38:52 +0800 Subject: [PATCH 049/204] add mining and infer --- det-mmdetection-tmi/start.py | 60 ++--- det-mmdetection-tmi/ymir_infer.py | 53 ++++- det-mmdetection-tmi/ymir_log.py | 53 ----- det-mmdetection-tmi/ymir_mining.py | 357 +++++++++++++++++++++++++++++ det-mmdetection-tmi/ymir_train.py | 1 - 5 files changed, 420 insertions(+), 104 deletions(-) delete mode 100644 det-mmdetection-tmi/ymir_log.py create mode 100644 det-mmdetection-tmi/ymir_mining.py diff --git a/det-mmdetection-tmi/start.py b/det-mmdetection-tmi/start.py index 9fd6a7c..686d451 100644 --- a/det-mmdetection-tmi/start.py +++ b/det-mmdetection-tmi/start.py @@ -1,80 +1,54 @@ -import glob import logging import os import subprocess import sys -import cv2 -import yaml from easydict import EasyDict as edict -from ymir_exc import dataset_reader as dr -from ymir_exc import env, monitor -from ymir_exc import result_writer as rw -from mmdet.utils.util_ymir import (YmirStage, get_merged_config, - get_ymir_process) -from ymir_infer import YmirModel, mmdet_result_to_ymir +from mmdet.utils.util_ymir import get_merged_config +from ymir_exc import monitor def start(cfg: edict) -> int: logging.info(f'merged config: {cfg}') if cfg.ymir.run_training: - _run_training(cfg) + _run_training() elif cfg.ymir.run_mining or cfg.ymir.run_infer: if cfg.ymir.run_mining: - _run_mining(cfg) + _run_mining() if cfg.ymir.run_infer: - _run_infer(cfg) + _run_infer() else: logging.warning('no task running') return 0 -def _run_training(cfg: edict) -> None: - """ - function for training task - 1. convert dataset - 2. training model - 3. save model weight/hyperparameter/... to design directory - """ +def _run_training() -> None: command = 'python3 ymir_train.py' logging.info(f'start training: {command}') subprocess.run(command.split(), check=True) # if task done, write 100% percent log monitor.write_monitor_logger(percent=1.0) + logging.info(f"training finished") - -def _run_mining(cfg: edict()) -> None: - command = 'python3 mining/mining_cald.py' - logging.info(f'mining: {command}') +def _run_mining() -> None: + command = 'python3 ymir_mining.py' + logging.info(f'start mining: {command}') subprocess.run(command.split(), check=True) - monitor.write_monitor_logger(percent=1.0) - - -def _run_infer(cfg: edict) -> None: - N = dr.items_count(env.DatasetType.CANDIDATE) - infer_result = dict() - model = YmirModel(cfg) - idx = -1 - # write infer result - monitor_gap = max(1, N // 100) - for asset_path, _ in dr.item_paths(dataset_type=env.DatasetType.CANDIDATE): - img = cv2.imread(asset_path) - result = model.infer(img) - infer_result[asset_path] = mmdet_result_to_ymir(result, cfg.param.class_names) - idx += 1 + monitor.write_monitor_logger(percent=1.0) + logging.info(f"mining finished") - if idx % monitor_gap == 0: - percent = get_ymir_process(stage=YmirStage.TASK, p=idx / N) - monitor.write_monitor_logger(percent=percent) +def _run_infer() -> None: + command = 'python3 ymir_infer.py' + logging.info(f'start infer: {command}') + subprocess.run(command.split(), check=True) - rw.write_infer_result(infer_result=infer_result) monitor.write_monitor_logger(percent=1.0) - + logging.info(f"infer finished") if __name__ == '__main__': logging.basicConfig(stream=sys.stdout, diff --git a/det-mmdetection-tmi/ymir_infer.py b/det-mmdetection-tmi/ymir_infer.py index 7fc8892..84df374 100644 --- a/det-mmdetection-tmi/ymir_infer.py +++ b/det-mmdetection-tmi/ymir_infer.py @@ -1,15 +1,22 @@ import argparse import os.path as osp +import sys +import warnings from typing import Any, List +import cv2 import numpy as np from easydict import EasyDict as edict from mmcv import DictAction from nptyping import NDArray, Shape -from ymir_exc import result_writer as rw +from tqdm import tqdm from mmdet.apis import inference_detector, init_detector -from mmdet.utils.util_ymir import get_weight_file +from mmdet.utils.util_ymir import (YmirStage, get_merged_config, + get_weight_file, get_ymir_process) +from ymir_exc import dataset_reader as dr +from ymir_exc import env, monitor +from ymir_exc import result_writer as rw DETECTION_RESULT = NDArray[Shape['*,5'], Any] @@ -50,6 +57,7 @@ def mmdet_result_to_ymir(results: List[DETECTION_RESULT], ann_list.append(ann) return ann_list + def get_config_file(cfg): if cfg.ymir.run_training: model_params_path: List = cfg.param.pretrained_model_params @@ -61,9 +69,13 @@ def get_config_file(cfg): osp.join(model_dir, p) for p in model_params_path if osp.exists(osp.join(model_dir, p)) and p.endswith(('.py'))] if len(config_files) > 0: + if len(config_files) > 1: + warnings.warn(f'multiple config file found! use {config_files[0]}') return config_files[0] else: - return None + raise Exception( + f'no config_file found in {model_dir} and {model_params_path}') + class YmirModel: def __init__(self, cfg: edict): @@ -72,8 +84,8 @@ def __init__(self, cfg: edict): # Specify the path to model config and checkpoint file config_file = get_config_file(cfg) checkpoint_file = get_weight_file(cfg) - cfg_options = parse_option( - cfg.param.cfg_options) if cfg.param.cfg_options else None + options = cfg.param.get('cfg_options', None) + cfg_options = parse_option(options) if options else None # current infer can only use one gpu!!! gpu_ids = cfg.param.gpu_id @@ -85,5 +97,32 @@ def __init__(self, cfg: edict): def infer(self, img): return inference_detector(self.model, img) - def mining(self): - pass + +def main(): + cfg = get_merged_config() + + N = dr.items_count(env.DatasetType.CANDIDATE) + infer_result = dict() + model = YmirModel(cfg) + idx = -1 + + # write infer result + monitor_gap = max(1, N // 100) + for asset_path, _ in tqdm(dr.item_paths(dataset_type=env.DatasetType.CANDIDATE)): + img = cv2.imread(asset_path) + result = model.infer(img) + infer_result[asset_path] = mmdet_result_to_ymir( + result, cfg.param.class_names) + idx += 1 + + if idx % monitor_gap == 0: + percent = get_ymir_process(stage=YmirStage.TASK, p=idx / N) + monitor.write_monitor_logger(percent=percent) + + rw.write_infer_result(infer_result=infer_result) + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/det-mmdetection-tmi/ymir_log.py b/det-mmdetection-tmi/ymir_log.py deleted file mode 100644 index 29f2ec8..0000000 --- a/det-mmdetection-tmi/ymir_log.py +++ /dev/null @@ -1,53 +0,0 @@ -import time -import os.path as osp -from typing import Generator -from pygtail import Pygtail -from mmcv.util import TORCH_VERSION, digit_version - -if (TORCH_VERSION == 'parrots' - or digit_version(TORCH_VERSION) < digit_version('1.1')): - try: - from tensorboardX import SummaryWriter - except ImportError: - raise ImportError('Please install tensorboardX to use ' - 'TensorboardLoggerHook.') -else: - try: - from torch.utils.tensorboard import SummaryWriter - except ImportError: - raise ImportError( - 'Please run "pip install future tensorboard" to install ' - 'the dependencies to use torch.utils.tensorboard ' - '(applicable to PyTorch 1.1 or higher)') - - -def read_log(f: str, wait: bool = True, sleep: float = 0.1) -> Generator[str]: - """ - Basically tail -f with a configurable sleep - """ - with open(f) as logfile: - # logfile.seek(0, os.SEEK_END) - while True: - new_line = logfile.readline() - if new_line: - yield new_line - else: - if wait: - # wait for new line - time.sleep(sleep) - else: - # read all line in file - break - -def write_tensorboard_text(tb_log_file: str, executor_log_file: str) -> None: - global _TENSORBOARD_GLOBAL_STEP - # tb_log_file = osp.join(cfg.ymir.output.tensorboard_dir, 'tensorboard_text.log') - # executor_log_file = cfg.ymir.output.executor_log_file - writer = SummaryWriter(tb_log_file) - - # Pygtail always return the new lines - for line in Pygtail(executor_log_file): - writer.add_text(tag='ymir-executor', text_string=line, global_step=_TENSORBOARD_GLOBAL_STEP) - _TENSORBOARD_GLOBAL_STEP += 1 - - writer.close() \ No newline at end of file diff --git a/det-mmdetection-tmi/ymir_mining.py b/det-mmdetection-tmi/ymir_mining.py new file mode 100644 index 0000000..16379df --- /dev/null +++ b/det-mmdetection-tmi/ymir_mining.py @@ -0,0 +1,357 @@ +""" +data augmentations for CALD method, including horizontal_flip, rotate(5'), cutout +official code: https://github.com/we1pingyu/CALD/blob/master/cald/cald_helper.py +""" +import random +import sys +from typing import Any, Dict, List, Tuple + +import cv2 +import numpy as np +from nptyping import NDArray +from scipy.stats import entropy +from tqdm import tqdm + +from mmdet.utils.util_ymir import (BBOX, CV_IMAGE, YmirStage, + get_merged_config, get_ymir_process) +from ymir_exc import dataset_reader as dr +from ymir_exc import env, monitor +from ymir_exc import result_writer as rw +from ymir_infer import YmirModel + + +def intersect(boxes1: BBOX, boxes2: BBOX) -> NDArray: + ''' + Find intersection of every box combination between two sets of box + boxes1: bounding boxes 1, a tensor of dimensions (n1, 4) + boxes2: bounding boxes 2, a tensor of dimensions (n2, 4) + + Out: Intersection each of boxes1 with respect to each of boxes2, + a tensor of dimensions (n1, n2) + ''' + n1 = boxes1.shape[0] + n2 = boxes2.shape[0] + max_xy = np.minimum(np.expand_dims(boxes1[:, 2:], axis=1).repeat(n2, axis=1), + np.expand_dims(boxes2[:, 2:], axis=0).repeat(n1, axis=0)) + + min_xy = np.maximum(np.expand_dims(boxes1[:, :2], axis=1).repeat(n2, axis=1), + np.expand_dims(boxes2[:, :2], axis=0).repeat(n1, axis=0)) + inter = np.clip(max_xy - min_xy, a_min=0, a_max=None) # (n1, n2, 2) + return inter[:, :, 0] * inter[:, :, 1] # (n1, n2) + + +def horizontal_flip(image: CV_IMAGE, bbox: BBOX) \ + -> Tuple[CV_IMAGE, BBOX]: + """ + image: opencv image, [height,width,channels] + bbox: numpy.ndarray, [N,4] --> [x1,y1,x2,y2] + """ + image = image.copy() + + width = image.shape[1] + # Flip image horizontally + image = image[:, ::-1, :] + if len(bbox) > 0: + bbox = bbox.copy() + # Flip bbox horizontally + bbox[:, [0, 2]] = width - bbox[:, [2, 0]] + return image, bbox + + +def cutout(image: CV_IMAGE, bbox: BBOX, cut_num: int = 2, fill_val: int = 0, + bbox_remove_thres: float = 0.4, bbox_min_thres: float = 0.1) -> Tuple[CV_IMAGE, BBOX]: + ''' + Cutout augmentation + image: A PIL image + boxes: bounding boxes, a tensor of dimensions (#objects, 4) + labels: labels of object, a tensor of dimensions (#objects) + fill_val: Value filled in cut out + bbox_remove_thres: Theshold to remove bbox cut by cutout + + Out: new image, new_boxes, new_labels + ''' + image = image.copy() + bbox = bbox.copy() + + if len(bbox) == 0: + return image, bbox + + original_h, original_w, original_channel = image.shape + count = 0 + for _ in range(50): + # Random cutout size: [0.15, 0.5] of original dimension + cutout_size_h = random.uniform(0.05 * original_h, 0.2 * original_h) + cutout_size_w = random.uniform(0.05 * original_w, 0.2 * original_w) + + # Random position for cutout + left = random.uniform(0, original_w - cutout_size_w) + right = left + cutout_size_w + top = random.uniform(0, original_h - cutout_size_h) + bottom = top + cutout_size_h + cutout = np.array( + [[float(left), float(top), float(right), float(bottom)]]) + + # Calculate intersect between cutout and bounding boxes + overlap_size = intersect(cutout, bbox) + area_boxes = (bbox[:, 2] - bbox[:, 0]) * (bbox[:, 3] - bbox[:, 1]) + ratio = overlap_size / (area_boxes + 1e-14) + # If all boxes have Iou greater than bbox_remove_thres, try again + if ratio.max() > bbox_remove_thres or ratio.max() < bbox_min_thres: + continue + + image[int(top):int(bottom), int(left):int(right), :] = fill_val + count += 1 + if count >= cut_num: + break + return image, bbox + + +def rotate(image: CV_IMAGE, bbox: BBOX, rot: float = 5) -> Tuple[CV_IMAGE, BBOX]: + image = image.copy() + bbox = bbox.copy() + h, w, c = image.shape + center = np.array([w / 2.0, h / 2.0]) + s = max(h, w) * 1.0 + trans = get_affine_transform(center, s, rot, [w, h]) + if len(bbox) > 0: + for i in range(bbox.shape[0]): + x1, y1 = affine_transform(bbox[i, :2], trans) + x2, y2 = affine_transform(bbox[i, 2:], trans) + x3, y3 = affine_transform(bbox[i, [2, 1]], trans) + x4, y4 = affine_transform(bbox[i, [0, 3]], trans) + bbox[i, :2] = [min(x1, x2, x3, x4), min(y1, y2, y3, y4)] + bbox[i, 2:] = [max(x1, x2, x3, x4), max(y1, y2, y3, y4)] + image = cv2.warpAffine(image, trans, (w, h), flags=cv2.INTER_LINEAR) + return image, bbox + + +def get_3rd_point(a: NDArray, b: NDArray) -> NDArray: + direct = a - b + return b + np.array([-direct[1], direct[0]], dtype=np.float32) + + +def get_dir(src_point: NDArray, rot_rad: float) -> List: + sn, cs = np.sin(rot_rad), np.cos(rot_rad) + + src_result = [0, 0] + src_result[0] = src_point[0] * cs - src_point[1] * sn + src_result[1] = src_point[0] * sn + src_point[1] * cs + + return src_result + + +def transform_preds(coords: NDArray, center: NDArray, scale: Any, rot: float, output_size: List) -> NDArray: + trans = get_affine_transform(center, scale, rot, output_size, inv=True) + target_coords = affine_transform(coords, trans) + return target_coords + + +def get_affine_transform(center: NDArray, + scale: Any, + rot: float, + output_size: List, + shift: NDArray = np.array([0, 0], dtype=np.float32), + inv: bool = False) -> NDArray: + if not isinstance(scale, np.ndarray) and not isinstance(scale, list): + scale = np.array([scale, scale], dtype=np.float32) + + scale_tmp = scale + src_w = scale_tmp[0] + dst_w = output_size[0] + dst_h = output_size[1] + + rot_rad = np.pi * rot / 180 + src_dir = get_dir([0, src_w * -0.5], rot_rad) + dst_dir = np.array([0, dst_w * -0.5], np.float32) + + src = np.zeros((3, 2), dtype=np.float32) + dst = np.zeros((3, 2), dtype=np.float32) + src[0, :] = center + scale_tmp * shift + src[1, :] = center + src_dir + scale_tmp * shift + dst[0, :] = [dst_w * 0.5, dst_h * 0.5] + dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir + + src[2:, :] = get_3rd_point(src[0, :], src[1, :]) + dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :]) + + if inv: + trans = cv2.getAffineTransform(np.float32(dst), np.float32(src)) + else: + trans = cv2.getAffineTransform(np.float32(src), np.float32(dst)) + + return trans + + +def affine_transform(pt: NDArray, t: NDArray) -> NDArray: + new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T + new_pt = np.dot(t, new_pt) + return new_pt[:2] + + +def resize(img: CV_IMAGE, boxes: BBOX, ratio: float = 0.8) -> Tuple[CV_IMAGE, BBOX]: + """ + ratio: <= 1.0 + """ + assert ratio <= 1.0, f'resize ratio {ratio} must <= 1.0' + + h, w, _ = img.shape + ow = int(w * ratio) + oh = int(h * ratio) + resize_img = cv2.resize(img, (ow, oh)) + new_img = np.zeros_like(img) + new_img[:oh, :ow] = resize_img + + if len(boxes) == 0: + return new_img, boxes + else: + return new_img, boxes * ratio + + +def get_ious(boxes1: BBOX, boxes2: BBOX) -> NDArray: + """ + args: + boxes1: np.array, (N, 4), xyxy + boxes2: np.array, (M, 4), xyxy + return: + iou: np.array, (N, M) + """ + area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1]) + area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1]) + iner_area = intersect(boxes1, boxes2) + area1 = area1.reshape(-1, 1).repeat(area2.shape[0], axis=1) + area2 = area2.reshape(1, -1).repeat(area1.shape[0], axis=0) + iou = iner_area / (area1 + area2 - iner_area + 1e-14) + return iou + + +def split_result(result: NDArray) -> Tuple[BBOX, NDArray, NDArray]: + if len(result) > 0: + bboxes = result[:, :4].astype(np.int32) + conf = result[:, 4] + class_id = result[:, 5] + else: + bboxes = np.zeros(shape=(0, 4), dtype=np.int32) + conf = np.zeros(shape=(0, 1), dtype=np.float32) + class_id = np.zeros(shape=(0, 1), dtype=np.int32) + + return bboxes, conf, class_id + + +class YmirMining(YmirModel): + def mining(self): + N = dr.items_count(env.DatasetType.CANDIDATE) + monitor_gap = max(1, N // 100) + idx = -1 + beta = 1.3 + mining_result = [] + for asset_path, _ in tqdm(dr.item_paths(dataset_type=env.DatasetType.CANDIDATE)): + img = cv2.imread(asset_path) + # xyxy,conf,cls + result = self.predict(img) + bboxes, conf, _ = split_result(result) + if len(result) == 0: + # no result for the image without augmentation + mining_result.append((asset_path, -beta)) + continue + + consistency = 0.0 + aug_bboxes_dict, aug_results_dict = self.aug_predict(img, bboxes) + for key in aug_results_dict: + # no result for the image with augmentation f'{key}' + if len(aug_results_dict[key]) == 0: + consistency += beta + continue + + bboxes_key, conf_key, _ = split_result(aug_results_dict[key]) + cls_scores_aug = 1 - conf_key + cls_scores = 1 - conf + + consistency_per_aug = 2.0 + ious = get_ious(bboxes_key, aug_bboxes_dict[key]) + aug_idxs = np.argmax(ious, axis=0) + for origin_idx, aug_idx in enumerate(aug_idxs): + max_iou = ious[aug_idx, origin_idx] + if max_iou == 0: + consistency_per_aug = min(consistency_per_aug, beta) + p = cls_scores_aug[aug_idx] + q = cls_scores[origin_idx] + m = (p + q) / 2. + js = 0.5 * entropy(p, m) + 0.5 * entropy(q, m) + if js < 0: + js = 0 + consistency_box = max_iou + consistency_cls = 0.5 * \ + (conf[origin_idx] + conf_key[aug_idx]) * (1 - js) + consistency_per_inst = abs( + consistency_box + consistency_cls - beta) + consistency_per_aug = min( + consistency_per_aug, consistency_per_inst.item()) + + consistency += consistency_per_aug + + consistency /= len(aug_results_dict) + + mining_result.append((asset_path, consistency)) + idx += 1 + + if idx % monitor_gap == 0: + percent = get_ymir_process(stage=YmirStage.TASK, p=idx / N) + monitor.write_monitor_logger(percent=percent) + + return mining_result + + def predict(self, img: CV_IMAGE) -> NDArray: + """ + predict single image and return bbox information + img: opencv BGR, uint8 format + """ + results = self.infer(img) + + xyxy_conf_idx_list=[] + for idx, result in enumerate(results): + for line in result: + if any(np.isinf(line)): + continue + x1, y1, x2, y2, score = line + xyxy_conf_idx_list.append([x1, y1, x2, y2, score, idx]) + + if len(xyxy_conf_idx_list) == 0: + return np.zeros(shape=(0, 6), dtype=np.float32) + else: + return np.array(xyxy_conf_idx_list, dtype=np.float32) + + def aug_predict(self, image: CV_IMAGE, bboxes: BBOX) -> Tuple[Dict[str, BBOX], Dict[str, NDArray]]: + """ + for different augmentation methods: flip, cutout, rotate and resize + augment the image and bbox and use model to predict them. + + return the predict result and augment bbox. + """ + aug_dict = dict(flip=horizontal_flip, + cutout=cutout, + rotate=rotate, + resize=resize) + + aug_bboxes = dict() + aug_results = dict() + for key in aug_dict: + aug_img, aug_bbox = aug_dict[key](image, bboxes) + + aug_result = self.predict(aug_img) + aug_bboxes[key] = aug_bbox + aug_results[key] = aug_result + + return aug_bboxes, aug_results + + +def main(): + cfg = get_merged_config() + miner = YmirMining(cfg) + mining_result = miner.mining() + rw.write_mining_result(mining_result=mining_result) + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/det-mmdetection-tmi/ymir_train.py b/det-mmdetection-tmi/ymir_train.py index 3a3b3de..ace0c27 100644 --- a/det-mmdetection-tmi/ymir_train.py +++ b/det-mmdetection-tmi/ymir_train.py @@ -58,7 +58,6 @@ def main(cfg: edict) -> int: # save the last checkpoint update_training_result_file(last=True) - logging.info(f"training finished") return 0 From 8cbf2e9332407f3428899de74645c1460f2d52b7 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 7 Jul 2022 15:40:03 +0800 Subject: [PATCH 050/204] update dockerfile --- det-mmdetection-tmi/docker/Dockerfile.cuda102 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/det-mmdetection-tmi/docker/Dockerfile.cuda102 b/det-mmdetection-tmi/docker/Dockerfile.cuda102 index 653b03a..841fe7b 100644 --- a/det-mmdetection-tmi/docker/Dockerfile.cuda102 +++ b/det-mmdetection-tmi/docker/Dockerfile.cuda102 @@ -25,9 +25,9 @@ RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC \ && rm -rf /var/lib/apt/lists/* # Install ymir-exc sdk and MMCV (no cu102/torch1.8.1, use torch1.8.0 instead) -RUN pip install --no-cache-dir --upgrade pip wheel setuptools pydantic tensorboardX pyyaml && \ +RUN pip install --no-cache-dir --upgrade pip wheel setuptools && \ if [ "${SERVER_MODE}" = "dev" ]; then \ - pip install --ignore-requires-python --force-reinstall -U "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ + pip install --force-reinstall -U "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ else \ pip install ymir-exc; \ fi \ From 0daa2cd591db41c06549d955adae7d6db57dbd5d Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 7 Jul 2022 16:01:44 +0800 Subject: [PATCH 051/204] empty mining template --- det-mmdetection-tmi/README_ymir.md | 15 +++++++++++++-- det-mmdetection-tmi/infer-template.yaml | 12 ++++++------ det-mmdetection-tmi/mining-template.yaml | 7 +++++++ 3 files changed, 26 insertions(+), 8 deletions(-) create mode 100644 det-mmdetection-tmi/mining-template.yaml diff --git a/det-mmdetection-tmi/README_ymir.md b/det-mmdetection-tmi/README_ymir.md index 90a84b0..194bd03 100644 --- a/det-mmdetection-tmi/README_ymir.md +++ b/det-mmdetection-tmi/README_ymir.md @@ -2,11 +2,22 @@ `mmdetection` framework for object `det`ection `t`raining/`m`ining/`i`nfer task +# build docker image + +``` +docker build -t ymir-executor/mmdet:cuda102-tmi -build-arg SERVER_MODE=dev -f docker/Dockerfile.cuda102 . + +docker build -t ymir-executor/mmdet:cuda111-tmi -build-arg SERVER_MODE=dev -f docker/Dockerfile.cuda111 . +``` + # changelog - modify `mmdet/datasets/coco.py`, save the evaluation result to `os.environ.get('COCO_EVAL_TMP_FILE')` with json format - modify `mmdet/core/evaluation/eval_hooks.py`, write training result file and monitor task process - modify `mmdet/datasets/__init__.py` and add `mmdet/datasets/ymir.py`, add class `YmirDataset` to load YMIR dataset. -- modify `mmdet/apis/train.py`, set `eval_cfg['classwise'] = True` for class-wise evaluation - add `mmdet/utils/util_ymir.py` for ymir training/infer/mining -- add `ymir_infer.py` for infer and mining +- add `ymir_infer.py` for infer +- add `ymir_mining.py` for mining - add `ymir_train.py` modify `tools/train.py` to update the mmcv config for training +- add `start.py`, the entrypoint for docker image + + diff --git a/det-mmdetection-tmi/infer-template.yaml b/det-mmdetection-tmi/infer-template.yaml index 8be36b9..7dd411c 100644 --- a/det-mmdetection-tmi/infer-template.yaml +++ b/det-mmdetection-tmi/infer-template.yaml @@ -1,7 +1,7 @@ -samples_per_gpu: 2 -workers_per_gpu: 2 -max_epochs: 300 -config_file: 'configs/yolox/yolox_nano_8x8_300e_coco.py' -args_options: '' +# samples_per_gpu: 2 +# workers_per_gpu: 2 +# max_epochs: 300 +# config_file: 'configs/yolox/yolox_nano_8x8_300e_coco.py' +# args_options: '' cfg_options: '' -port: 12345 +# port: 12345 diff --git a/det-mmdetection-tmi/mining-template.yaml b/det-mmdetection-tmi/mining-template.yaml new file mode 100644 index 0000000..7dd411c --- /dev/null +++ b/det-mmdetection-tmi/mining-template.yaml @@ -0,0 +1,7 @@ +# samples_per_gpu: 2 +# workers_per_gpu: 2 +# max_epochs: 300 +# config_file: 'configs/yolox/yolox_nano_8x8_300e_coco.py' +# args_options: '' +cfg_options: '' +# port: 12345 From 9b346f25f793dc73db2059d3c0dd561b78d72fa3 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 7 Jul 2022 16:25:53 +0800 Subject: [PATCH 052/204] model_path --> model_params --- live-code-executor/img-man/training-template.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/live-code-executor/img-man/training-template.yaml b/live-code-executor/img-man/training-template.yaml index 79b1356..865b40b 100644 --- a/live-code-executor/img-man/training-template.yaml +++ b/live-code-executor/img-man/training-template.yaml @@ -4,5 +4,5 @@ gpu_id: '0' task_id: 'default-training-task' -pretrained_model_paths: [] +pretrained_model_params: [] class_names: [] From 84eb49fddcf3bf1e37bd37125140f52cdd076391 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 7 Jul 2022 18:08:15 +0800 Subject: [PATCH 053/204] add opencv --- live-code-executor/mxnet.dockerfile | 2 +- live-code-executor/torch.dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/live-code-executor/mxnet.dockerfile b/live-code-executor/mxnet.dockerfile index 1ff0a66..6a09472 100644 --- a/live-code-executor/mxnet.dockerfile +++ b/live-code-executor/mxnet.dockerfile @@ -29,7 +29,7 @@ RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC && # Install python package # view https://mxnet.apache.org/versions/1.9.1/get_started for detail -RUN pip3 install mxnet-cu112==${MXNET} loguru +RUN pip3 install mxnet-cu112==${MXNET} loguru opencv # install ymir-exc sdk RUN if [ "${SERVER_MODE}" = "dev" ]; then \ diff --git a/live-code-executor/torch.dockerfile b/live-code-executor/torch.dockerfile index 66de371..4b7d735 100644 --- a/live-code-executor/torch.dockerfile +++ b/live-code-executor/torch.dockerfile @@ -20,7 +20,7 @@ RUN apt-get update && apt-get install -y git curl wget zip gcc \ # Install python package RUN pip install -U pip && \ - pip install loguru + pip install loguru opencv # install ymir-exc sdk RUN if [ "${SERVER_MODE}" = "dev" ]; then \ From 61554ae9d337fee97605d32e3c05c16cff4ea007 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 7 Jul 2022 18:12:46 +0800 Subject: [PATCH 054/204] add opencv --- live-code-executor/mxnet.dockerfile | 2 +- live-code-executor/torch.dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/live-code-executor/mxnet.dockerfile b/live-code-executor/mxnet.dockerfile index 6a09472..58601dc 100644 --- a/live-code-executor/mxnet.dockerfile +++ b/live-code-executor/mxnet.dockerfile @@ -29,7 +29,7 @@ RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC && # Install python package # view https://mxnet.apache.org/versions/1.9.1/get_started for detail -RUN pip3 install mxnet-cu112==${MXNET} loguru opencv +RUN pip3 install mxnet-cu112==${MXNET} loguru opencv-python # install ymir-exc sdk RUN if [ "${SERVER_MODE}" = "dev" ]; then \ diff --git a/live-code-executor/torch.dockerfile b/live-code-executor/torch.dockerfile index 4b7d735..32f4883 100644 --- a/live-code-executor/torch.dockerfile +++ b/live-code-executor/torch.dockerfile @@ -20,7 +20,7 @@ RUN apt-get update && apt-get install -y git curl wget zip gcc \ # Install python package RUN pip install -U pip && \ - pip install loguru opencv + pip install loguru opencv-python # install ymir-exc sdk RUN if [ "${SERVER_MODE}" = "dev" ]; then \ From f1dd6c61ac33b3d3c33af6567a72573001bd23ee Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 7 Jul 2022 18:18:46 +0800 Subject: [PATCH 055/204] udpate opencv version --- live-code-executor/mxnet.dockerfile | 2 +- live-code-executor/torch.dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/live-code-executor/mxnet.dockerfile b/live-code-executor/mxnet.dockerfile index 58601dc..a738f9b 100644 --- a/live-code-executor/mxnet.dockerfile +++ b/live-code-executor/mxnet.dockerfile @@ -29,7 +29,7 @@ RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC && # Install python package # view https://mxnet.apache.org/versions/1.9.1/get_started for detail -RUN pip3 install mxnet-cu112==${MXNET} loguru opencv-python +RUN pip3 install mxnet-cu112==${MXNET} loguru opencv-python==4.1.2.30 # install ymir-exc sdk RUN if [ "${SERVER_MODE}" = "dev" ]; then \ diff --git a/live-code-executor/torch.dockerfile b/live-code-executor/torch.dockerfile index 32f4883..aa17ce2 100644 --- a/live-code-executor/torch.dockerfile +++ b/live-code-executor/torch.dockerfile @@ -20,7 +20,7 @@ RUN apt-get update && apt-get install -y git curl wget zip gcc \ # Install python package RUN pip install -U pip && \ - pip install loguru opencv-python + pip install loguru opencv-python==4.1.2.30 # install ymir-exc sdk RUN if [ "${SERVER_MODE}" = "dev" ]; then \ From 50e6864f67a8ff8f9ba399649611a374e1ca788e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E4=BD=B3=E6=AC=A3?= Date: Thu, 7 Jul 2022 18:32:16 +0800 Subject: [PATCH 056/204] Update torch.dockerfile --- live-code-executor/torch.dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/live-code-executor/torch.dockerfile b/live-code-executor/torch.dockerfile index aa17ce2..61526d5 100644 --- a/live-code-executor/torch.dockerfile +++ b/live-code-executor/torch.dockerfile @@ -6,7 +6,7 @@ ARG CUDNN="8" FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-runtime ARG SERVER_MODE=prod - +ARG OPENCV="4.1.2.30" ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" @@ -20,7 +20,7 @@ RUN apt-get update && apt-get install -y git curl wget zip gcc \ # Install python package RUN pip install -U pip && \ - pip install loguru opencv-python==4.1.2.30 + pip install loguru opencv-python==${OPENCV} # install ymir-exc sdk RUN if [ "${SERVER_MODE}" = "dev" ]; then \ From 6a164cae6f7841debae5a905f8b1cac79a9401aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E4=BD=B3=E6=AC=A3?= Date: Thu, 7 Jul 2022 18:32:49 +0800 Subject: [PATCH 057/204] Update mxnet.dockerfile --- live-code-executor/mxnet.dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/live-code-executor/mxnet.dockerfile b/live-code-executor/mxnet.dockerfile index a738f9b..fd258af 100644 --- a/live-code-executor/mxnet.dockerfile +++ b/live-code-executor/mxnet.dockerfile @@ -5,6 +5,7 @@ ARG SYSTEM="ubuntu18.04" FROM nvidia/cuda:${CUDA}-cudnn${CUDNN}-${BUILD}-${SYSTEM} ARG MXNET="1.9.1" +ARG OPENCV="4.1.2.30" ARG DEBIAN_FRONTEND="noninteractive" ARG MINICONDA_URL="https://repo.anaconda.com/miniconda/Miniconda3-py39_4.11.0-Linux-x86_64.sh" @@ -29,8 +30,7 @@ RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC && # Install python package # view https://mxnet.apache.org/versions/1.9.1/get_started for detail -RUN pip3 install mxnet-cu112==${MXNET} loguru opencv-python==4.1.2.30 - +RUN pip3 install mxnet-cu112==${MXNET} loguru opencv-python==${OPENCV} # install ymir-exc sdk RUN if [ "${SERVER_MODE}" = "dev" ]; then \ pip install --force-reinstall -U "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ From 3ef0d4356300aea96d9c3502fa4eaf7d9b9f5561 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 7 Jul 2022 18:51:23 +0800 Subject: [PATCH 058/204] add opencv and numpy arg --- live-code-executor/mxnet.dockerfile | 3 ++- live-code-executor/torch.dockerfile | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/live-code-executor/mxnet.dockerfile b/live-code-executor/mxnet.dockerfile index fd258af..e1ff9c4 100644 --- a/live-code-executor/mxnet.dockerfile +++ b/live-code-executor/mxnet.dockerfile @@ -6,6 +6,7 @@ ARG SYSTEM="ubuntu18.04" FROM nvidia/cuda:${CUDA}-cudnn${CUDNN}-${BUILD}-${SYSTEM} ARG MXNET="1.9.1" ARG OPENCV="4.1.2.30" +ARG NUMPY="1.20.0" ARG DEBIAN_FRONTEND="noninteractive" ARG MINICONDA_URL="https://repo.anaconda.com/miniconda/Miniconda3-py39_4.11.0-Linux-x86_64.sh" @@ -30,7 +31,7 @@ RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC && # Install python package # view https://mxnet.apache.org/versions/1.9.1/get_started for detail -RUN pip3 install mxnet-cu112==${MXNET} loguru opencv-python==${OPENCV} +RUN pip3 install mxnet-cu112==${MXNET} loguru opencv-python==${OPENCV} numpy=${NUMPY} # install ymir-exc sdk RUN if [ "${SERVER_MODE}" = "dev" ]; then \ pip install --force-reinstall -U "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ diff --git a/live-code-executor/torch.dockerfile b/live-code-executor/torch.dockerfile index 61526d5..806f471 100644 --- a/live-code-executor/torch.dockerfile +++ b/live-code-executor/torch.dockerfile @@ -7,6 +7,7 @@ FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-runtime ARG SERVER_MODE=prod ARG OPENCV="4.1.2.30" +ARG NUMPY="1.20.0" ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" @@ -20,7 +21,7 @@ RUN apt-get update && apt-get install -y git curl wget zip gcc \ # Install python package RUN pip install -U pip && \ - pip install loguru opencv-python==${OPENCV} + pip install loguru opencv-python==${OPENCV} numpy=${NUMPY} # install ymir-exc sdk RUN if [ "${SERVER_MODE}" = "dev" ]; then \ From cffc5d0033b6d7acd1f26b8bcc1f40e85895848b Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 6 Jul 2022 15:15:14 +0800 Subject: [PATCH 059/204] add cuda112 dockerfile for yolov4 --- det-yolov4-mining/cuda112.dockerfile | 15 +++++++++++++++ det-yolov4-training/Dockerfile | 2 +- det-yolov4-training/cuda112.dockerfile | 23 +++++++++++++++++++++++ 3 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 det-yolov4-mining/cuda112.dockerfile create mode 100644 det-yolov4-training/cuda112.dockerfile diff --git a/det-yolov4-mining/cuda112.dockerfile b/det-yolov4-mining/cuda112.dockerfile new file mode 100644 index 0000000..871b00f --- /dev/null +++ b/det-yolov4-mining/cuda112.dockerfile @@ -0,0 +1,15 @@ +FROM industryessentials/ymir-executor:cuda112-yolov4-training + +RUN apt-get update && apt-get install -y --no-install-recommends libsm6 libxext6 libfontconfig1 libxrender1 libgl1-mesa-glx \ + && apt-get clean && rm -rf /var/lib/apt/lists/* + +RUN pip3 install --upgrade pip setuptools wheel && pip3 install opencv-python pyyaml scipy tqdm && rm -rf /root/.cache/pip3 + +COPY . /app +WORKDIR /app +RUN cp ./start.sh /usr/bin/start.sh && \ + mkdir -p /img-man && \ + cp ./mining-template.yaml /img-man/mining-template.yaml && \ + cp ./infer-template.yaml /img-man/infer-template.yaml && \ + cp ./README.md /img-man/readme.md +CMD sh /usr/bin/start.sh diff --git a/det-yolov4-training/Dockerfile b/det-yolov4-training/Dockerfile index 6e6c4c9..61ce1f6 100644 --- a/det-yolov4-training/Dockerfile +++ b/det-yolov4-training/Dockerfile @@ -1,6 +1,7 @@ FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04 ARG PIP_SOURCE=https://pypi.mirrors.ustc.edu.cn/simple WORKDIR /darknet +RUN sed -i 's#http://archive.ubuntu.com#https://mirrors.ustc.edu.cn#g' /etc/apt/sources.list RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC && apt-get update RUN apt install -y software-properties-common wget RUN add-apt-repository ppa:deadsnakes/ppa @@ -12,7 +13,6 @@ RUN rm /usr/bin/python3 RUN ln -s /usr/bin/python3.7 /usr/bin/python3 RUN python3 get-pip.py RUN pip3 install -i ${PIP_SOURCE} mxnet-cu101==1.5.1 numpy opencv-python pyyaml watchdog tensorboardX six -RUN echo '\ndeb https://mirrors.ustc.edu.cn/ubuntu/ bionic main restricted universe multiverse\ndeb https://mirrors.ustc.edu.cn/ubuntu/ bionic-updates main restricted universe multiverse\ndeb https://mirrors.ustc.edu.cn/ubuntu/ bionic-backports main restricted universe multiverse\ndeb https://mirrors.ustc.edu.cn/ubuntu/ bionic-security main restricted universe multiverse\n' >> /etc/apt/sources.list ENV DEBIAN_FRONTEND noninteractive RUN apt-get update && apt-get install -y libopencv-dev COPY . /darknet diff --git a/det-yolov4-training/cuda112.dockerfile b/det-yolov4-training/cuda112.dockerfile new file mode 100644 index 0000000..3e6884b --- /dev/null +++ b/det-yolov4-training/cuda112.dockerfile @@ -0,0 +1,23 @@ +FROM nvidia/cuda:11.2.1-cudnn8-devel-ubuntu18.04 +ARG PIP_SOURCE=https://pypi.mirrors.ustc.edu.cn/simple +WORKDIR /darknet +RUN sed -i 's#http://archive.ubuntu.com#https://mirrors.ustc.edu.cn#g' /etc/apt/sources.list +RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC && apt-get update +RUN apt install -y software-properties-common wget +RUN add-apt-repository ppa:deadsnakes/ppa +RUN apt-get update +RUN apt install -y python3.7 python3-distutils +RUN wget https://bootstrap.pypa.io/get-pip.py +RUN wget https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v3_optimal/yolov4.conv.137 +RUN rm /usr/bin/python3 +RUN ln -s /usr/bin/python3.7 /usr/bin/python3 +RUN python3 get-pip.py +RUN pip3 install -i ${PIP_SOURCE} mxnet-cu112==1.9.1 numpy opencv-python pyyaml watchdog tensorboardX six + +ENV DEBIAN_FRONTEND noninteractive +RUN apt-get update && apt-get install -y libopencv-dev +COPY . /darknet +RUN cp /darknet/make_train_test_darknet.sh /usr/bin/start.sh +RUN mkdir /img-man && cp /darknet/training-template.yaml /img-man/training-template.yaml +RUN make -j +CMD bash /usr/bin/start.sh From 80084c65e720d0b4d060e04b7c0e3ffaa3a5a781 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 8 Jul 2022 13:32:18 +0800 Subject: [PATCH 060/204] update master --- README.MD | 11 ++++++++++- det-yolov5-tmi/.dockerignore | 2 +- live-code-executor/mxnet.dockerfile | 3 ++- live-code-executor/torch.dockerfile | 2 +- 4 files changed, 14 insertions(+), 4 deletions(-) diff --git a/README.MD b/README.MD index bcba683..dafee8a 100644 --- a/README.MD +++ b/README.MD @@ -2,7 +2,16 @@ ## det-yolov4-training -- yolov4的训练镜像,采用mxnet与darknet框架,默认cuda版本为`10.1`,无法直接在高版本显卡如GTX3080/GTX3090上运行,需要修改dockerfile将cuda版本提升为11.1以上,并修改其它依赖。 +- yolov4的训练镜像,采用mxnet与darknet框架,默认的 `Dockerfile` cuda版本为`10.1`,无法直接在高版本显卡如GTX3080/GTX3090上运行,需要修改dockerfile将cuda版本提升为11.1以上,参考 `cuda112.dockerfile` 进行构建。 + + ``` + cd det-yolov4-training + # cuda101-yolov4-training + docker build -t ymir-executor/yolov4:cuda101-training -f Dockerfile . + + # cuda112-yolov4-training + docker build -t ymir-executor/yolov4:cuda112-training -f cuda112.dockerfile . + ``` ## det-yolov4-mining diff --git a/det-yolov5-tmi/.dockerignore b/det-yolov5-tmi/.dockerignore index af51ccc..bee6b98 100644 --- a/det-yolov5-tmi/.dockerignore +++ b/det-yolov5-tmi/.dockerignore @@ -12,7 +12,7 @@ data/samples/* *.jpg # Neural Network weights ----------------------------------------------------------------------------------------------- -**/*.pt +#**/*.pt **/*.pth **/*.onnx **/*.engine diff --git a/live-code-executor/mxnet.dockerfile b/live-code-executor/mxnet.dockerfile index e1ff9c4..cd2ed5d 100644 --- a/live-code-executor/mxnet.dockerfile +++ b/live-code-executor/mxnet.dockerfile @@ -15,7 +15,8 @@ ENV PATH /opt/conda/bin:$PATH # install linux package, needs to fix GPG error first. RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC && \ apt-get update && \ - apt-get install -y git gcc wget curl zip libglib2.0-0 libgl1-mesa-glx && \ + apt-get install -y git gcc wget curl zip libglib2.0-0 libgl1-mesa-glx \ + libsm6 libxext6 libxrender-dev && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* && \ wget "${MINICONDA_URL}" -O miniconda.sh -q && \ diff --git a/live-code-executor/torch.dockerfile b/live-code-executor/torch.dockerfile index 806f471..88fe0eb 100644 --- a/live-code-executor/torch.dockerfile +++ b/live-code-executor/torch.dockerfile @@ -15,7 +15,7 @@ ENV LANG=C.UTF-8 # install linux package RUN apt-get update && apt-get install -y git curl wget zip gcc \ - libglib2.0-0 libgl1-mesa-glx \ + libglib2.0-0 libgl1-mesa-glx libsm6 libxext6 libxrender-dev \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* From 6bdd3e529b1d0dafcf1349d3ab7bfa226f3883fc Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 11 Jul 2022 11:54:47 +0800 Subject: [PATCH 061/204] update dockerfile --- det-mmdetection-tmi/docker/Dockerfile.cuda102 | 3 +-- det-mmdetection-tmi/docker/Dockerfile.cuda111 | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/det-mmdetection-tmi/docker/Dockerfile.cuda102 b/det-mmdetection-tmi/docker/Dockerfile.cuda102 index 841fe7b..62ea15e 100644 --- a/det-mmdetection-tmi/docker/Dockerfile.cuda102 +++ b/det-mmdetection-tmi/docker/Dockerfile.cuda102 @@ -27,7 +27,7 @@ RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC \ # Install ymir-exc sdk and MMCV (no cu102/torch1.8.1, use torch1.8.0 instead) RUN pip install --no-cache-dir --upgrade pip wheel setuptools && \ if [ "${SERVER_MODE}" = "dev" ]; then \ - pip install --force-reinstall -U "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ + pip install "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ else \ pip install ymir-exc; \ fi \ @@ -38,7 +38,6 @@ RUN pip install --no-cache-dir --upgrade pip wheel setuptools && \ COPY . /app/ WORKDIR /app RUN pip install --no-cache-dir -r requirements/runtime.txt \ - && pip install --no-cache-dir -e . \ && mkdir /img-man \ && mv *-template.yaml /img-man \ && echo "cd /app && python3 start.py" > /usr/bin/start.sh diff --git a/det-mmdetection-tmi/docker/Dockerfile.cuda111 b/det-mmdetection-tmi/docker/Dockerfile.cuda111 index d0c24c6..08fe8f4 100644 --- a/det-mmdetection-tmi/docker/Dockerfile.cuda111 +++ b/det-mmdetection-tmi/docker/Dockerfile.cuda111 @@ -25,7 +25,7 @@ RUN apt-get update && apt-get install -y build-essential ffmpeg libsm6 libxext6 # Install ymir-exc sdk and MMCV RUN pip install --no-cache-dir --upgrade pip wheel setuptools && \ if [ "${SERVER_MODE}" = "dev" ]; then \ - pip install --force-reinstall -U "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ + pip install "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ else \ pip install ymir-exc; \ fi \ @@ -36,7 +36,6 @@ RUN pip install --no-cache-dir --upgrade pip wheel setuptools && \ COPY . /app/ WORKDIR /app RUN pip install --no-cache-dir -r requirements/runtime.txt \ - && pip install --no-cache-dir -e . \ && mkdir /img-man \ && mv *-template.yaml /img-man \ && echo "cd /app && python3 start.py" > /usr/bin/start.sh From 5c1a6f5f95a5e979931ef547d1791563c7cba21e Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 11 Jul 2022 12:06:53 +0800 Subject: [PATCH 062/204] format doc --- .../mmdet/core/evaluation/eval_hooks.py | 1 - det-mmdetection-tmi/mmdet/datasets/coco.py | 6 +- det-mmdetection-tmi/mmdet/datasets/ymir.py | 62 ++++++++++--------- det-mmdetection-tmi/mmdet/utils/util_ymir.py | 4 +- det-mmdetection-tmi/start.py | 9 ++- det-mmdetection-tmi/tools/train.py | 1 + det-mmdetection-tmi/ymir_mining.py | 2 +- 7 files changed, 46 insertions(+), 39 deletions(-) diff --git a/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py b/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py index 6e63d43..dff0705 100644 --- a/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py +++ b/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py @@ -180,4 +180,3 @@ def _do_evaluate(self, runner): # 'best_score', self.init_value_map[self.rule]) # if self.compare_func(key_score, best_score): # update_training_result_file(key_score) - diff --git a/det-mmdetection-tmi/mmdet/datasets/coco.py b/det-mmdetection-tmi/mmdet/datasets/coco.py index ffe83d4..7de1cdb 100644 --- a/det-mmdetection-tmi/mmdet/datasets/coco.py +++ b/det-mmdetection-tmi/mmdet/datasets/coco.py @@ -597,7 +597,9 @@ def evaluate(self, if COCO_EVAL_TMP_FILE is not None: mmcv.dump(eval_results, COCO_EVAL_TMP_FILE, file_format='json') else: - raise Exception('please set valid environment variable COCO_EVAL_TMP_FILE to write result into json file') + raise Exception( + 'please set valid environment variable COCO_EVAL_TMP_FILE to write result into json file') - print_log(f'\n write eval result to {COCO_EVAL_TMP_FILE}', logger=logger) + print_log( + f'\n write eval result to {COCO_EVAL_TMP_FILE}', logger=logger) return eval_results diff --git a/det-mmdetection-tmi/mmdet/datasets/ymir.py b/det-mmdetection-tmi/mmdet/datasets/ymir.py index 42771fb..1276310 100644 --- a/det-mmdetection-tmi/mmdet/datasets/ymir.py +++ b/det-mmdetection-tmi/mmdet/datasets/ymir.py @@ -1,7 +1,6 @@ # Copyright (c) OpenMMLab voc.py. All rights reserved. # wangjiaxin 2022-04-25 -from collections import OrderedDict import os.path as osp # from PIL import Image @@ -12,21 +11,23 @@ from .api_wrappers import COCO from .coco import CocoDataset + @DATASETS.register_module() class YmirDataset(CocoDataset): """ - converted dataset by ymir system 1.0.0 + converted dataset by ymir system 1.0.0 + /in/assets: image files directory /in/annotations: annotation files directory /in/train-index.tsv: image_file \t annotation_file /in/val-index.tsv: image_file \t annotation_file """ + def __init__(self, min_size=0, ann_prefix='annotations', **kwargs): - self.min_size=min_size - self.ann_prefix=ann_prefix + self.min_size = min_size + self.ann_prefix = ann_prefix super(YmirDataset, self).__init__(**kwargs) def load_annotations(self, ann_file): @@ -43,16 +44,16 @@ def load_annotations(self, ann_file): categories = [] # category_id is from 1 for coco, not 0 for i, name in enumerate(self.CLASSES): - categories.append({'supercategory':'none', + categories.append({'supercategory': 'none', 'id': i+1, - 'name': name}) + 'name': name}) annotations = [] instance_counter = 1 image_counter = 1 - with open(ann_file,'r') as fp: - lines=fp.readlines() + with open(ann_file, 'r') as fp: + lines = fp.readlines() for line in lines: # split any white space @@ -76,22 +77,22 @@ def load_annotations(self, ann_file): anns = [] for ann in anns: - ann['image_id']=image_counter - ann['id']=instance_counter + ann['image_id'] = image_counter + ann['id'] = instance_counter annotations.append(ann) - instance_counter+=1 + instance_counter += 1 - image_counter+=1 + image_counter += 1 - ### pycocotool coco init + # pycocotool coco init self.coco = COCO() - self.coco.dataset['type']='instances' - self.coco.dataset['categories']=categories - self.coco.dataset['images']=images - self.coco.dataset['annotations']=annotations + self.coco.dataset['type'] = 'instances' + self.coco.dataset['categories'] = categories + self.coco.dataset['images'] = images + self.coco.dataset['annotations'] = annotations self.coco.createIndex() - ### mmdetection coco init + # mmdetection coco init # avoid the filter problem in CocoDataset, view coco_api.py for detail self.coco.img_ann_map = self.coco.imgToAnns self.coco.cat_img_map = self.coco.catToImgs @@ -103,7 +104,7 @@ def load_annotations(self, ann_file): self.img_ids = self.coco.get_img_ids() # self.img_ids = list(self.coco.imgs.keys()) assert len(self.img_ids) > 0, 'image number must > 0' - N=len(self.img_ids) + N = len(self.img_ids) print(f'load {N} image from YMIR dataset') data_infos = [] @@ -119,11 +120,11 @@ def load_annotations(self, ann_file): return data_infos def dump(self, ann_file): - with open(ann_file,'w') as fp: + with open(ann_file, 'w') as fp: json.dump(self.coco.dataset, fp) - def get_ann_path_from_img_path(self,img_path): - img_id=osp.splitext(osp.basename(img_path))[0] + def get_ann_path_from_img_path(self, img_path): + img_id = osp.splitext(osp.basename(img_path))[0] return osp.join(self.data_root, self.ann_prefix, img_id+'.txt') def get_txt_ann_info(self, txt_path): @@ -141,16 +142,16 @@ def get_txt_ann_info(self, txt_path): # txt_path = self.get_ann_path_from_img_path(img_path) anns = [] if osp.exists(txt_path): - with open(txt_path,'r') as fp: - lines=fp.readlines() + with open(txt_path, 'r') as fp: + lines = fp.readlines() else: - lines=[] + lines = [] for line in lines: - obj=[int(x) for x in line.strip().split(',')[0:5]] + obj = [int(x) for x in line.strip().split(',')[0:5]] # YMIR category id starts from 0, coco from 1 category_id, xmin, ymin, xmax, ymax = obj bbox = [xmin, ymin, xmax, ymax] - h,w=ymax-ymin,xmax-xmin + h, w = ymax-ymin, xmax-xmin ignore = 0 if self.min_size: assert not self.test_mode @@ -160,12 +161,13 @@ def get_txt_ann_info(self, txt_path): ignore = 1 ann = dict( - segmentation=[[xmin, ymin, xmax, ymin, xmax, ymax, xmin, ymax]], + segmentation=[ + [xmin, ymin, xmax, ymin, xmax, ymax, xmin, ymax]], area=w*h, iscrowd=0, image_id=None, bbox=[xmin, ymin, w, h], - category_id=category_id+1, # category id is from 1 for coco + category_id=category_id+1, # category id is from 1 for coco id=None, ignore=ignore ) @@ -188,7 +190,7 @@ def get_cat_ids(self, idx): txt_path = self.data_infos[idx]['ann_path'] txt_path = osp.join(self.data_root, self.ann_prefix, txt_path) if osp.exists(txt_path): - with open(txt_path,'r') as fp: + with open(txt_path, 'r') as fp: lines = fp.readlines() else: lines = [] diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index d3c6e97..faf39e0 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -158,7 +158,7 @@ def get_weight_file(cfg: edict) -> str: return "" -def update_training_result_file(last=False, key_score=None): +def update_training_result_file(last: bool = False, key_score=None): if key_score: logging.info(f'key_score is {key_score}') COCO_EVAL_TMP_FILE = os.getenv('COCO_EVAL_TMP_FILE') @@ -168,7 +168,7 @@ def update_training_result_file(last=False, key_score=None): eval_result = mmcv.load(COCO_EVAL_TMP_FILE) # eval_result may be empty dict {}. - map = eval_result.get('bbox_mAP_50',0) + map = eval_result.get('bbox_mAP_50', 0) work_dir = os.getenv('YMIR_MODELS_DIR') if work_dir is None or not osp.isdir(work_dir): diff --git a/det-mmdetection-tmi/start.py b/det-mmdetection-tmi/start.py index 686d451..e4b1398 100644 --- a/det-mmdetection-tmi/start.py +++ b/det-mmdetection-tmi/start.py @@ -32,7 +32,8 @@ def _run_training() -> None: # if task done, write 100% percent log monitor.write_monitor_logger(percent=1.0) - logging.info(f"training finished") + logging.info("training finished") + def _run_mining() -> None: command = 'python3 ymir_mining.py' @@ -40,7 +41,8 @@ def _run_mining() -> None: subprocess.run(command.split(), check=True) monitor.write_monitor_logger(percent=1.0) - logging.info(f"mining finished") + logging.info("mining finished") + def _run_infer() -> None: command = 'python3 ymir_infer.py' @@ -48,7 +50,8 @@ def _run_infer() -> None: subprocess.run(command.split(), check=True) monitor.write_monitor_logger(percent=1.0) - logging.info(f"infer finished") + logging.info("infer finished") + if __name__ == '__main__': logging.basicConfig(stream=sys.stdout, diff --git a/det-mmdetection-tmi/tools/train.py b/det-mmdetection-tmi/tools/train.py index a65e130..74121ff 100644 --- a/det-mmdetection-tmi/tools/train.py +++ b/det-mmdetection-tmi/tools/train.py @@ -19,6 +19,7 @@ from mmdet.utils import collect_env, get_root_logger, setup_multi_processes from mmdet.utils.util_ymir import modify_mmdet_config, get_merged_config + def parse_args(): parser = argparse.ArgumentParser(description='Train a detector') parser.add_argument('config', help='train config file path') diff --git a/det-mmdetection-tmi/ymir_mining.py b/det-mmdetection-tmi/ymir_mining.py index 16379df..f4bea0c 100644 --- a/det-mmdetection-tmi/ymir_mining.py +++ b/det-mmdetection-tmi/ymir_mining.py @@ -307,7 +307,7 @@ def predict(self, img: CV_IMAGE) -> NDArray: """ results = self.infer(img) - xyxy_conf_idx_list=[] + xyxy_conf_idx_list = [] for idx, result in enumerate(results): for line in result: if any(np.isinf(line)): From 0a6f11c9e9c7b82319b63d4652dff3a6b99e4fd3 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 11 Jul 2022 12:17:11 +0800 Subject: [PATCH 063/204] update .gitignore --- .gitignore | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.gitignore b/.gitignore index 5563689..6dbd818 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,7 @@ +# dockerfile for China +*.dockerfile.cn +det-mmdetection-tmi/docker/*.cn + *.png *.jpg *.img From 49906e3068b2319982fedd591a7f6042c3a4e6b5 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 11 Jul 2022 15:50:47 +0800 Subject: [PATCH 064/204] DDP training --- det-yolov5-tmi/start.py | 36 +++++++++++++++++++++++---- det-yolov5-tmi/training-template.yaml | 2 ++ 2 files changed, 33 insertions(+), 5 deletions(-) diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py index fba6632..d59fa8a 100644 --- a/det-yolov5-tmi/start.py +++ b/det-yolov5-tmi/start.py @@ -51,17 +51,43 @@ def _run_training(cfg: edict) -> None: img_size = cfg.param.img_size save_period = cfg.param.save_period args_options = cfg.param.args_options + gpu_id = str(cfg.param.gpu_id) + gpu_count = len(gpu_id.split(',')) if gpu_id else 0 + port = int(cfg.param.port) + sync_bn = cfg.param.sync_bn weights = get_weight_file(cfg) if not weights: # download pretrained weight weights = download_weight_file(model) models_dir = cfg.ymir.output.models_dir - command = f'python3 train.py --epochs {epochs} ' + \ - f'--batch-size {batch_size} --data {out_dir}/data.yaml --project /out ' + \ - f'--cfg models/{model}.yaml --name models --weights {weights} ' + \ - f'--img-size {img_size} ' + \ - f'--save-period {save_period}' + + if gpu_count == 0: + command = f'python3 train.py --epochs {epochs} ' + \ + f'--batch-size {batch_size} --data {out_dir}/data.yaml --project /out ' + \ + f'--cfg models/{model}.yaml --name models --weights {weights} ' + \ + f'--img-size {img_size} ' + \ + f'--save-period {save_period} ' + \ + f'--devices cpu' + elif gpu_count == 1: + command = f'python3 train.py --epochs {epochs} ' + \ + f'--batch-size {batch_size} --data {out_dir}/data.yaml --project /out ' + \ + f'--cfg models/{model}.yaml --name models --weights {weights} ' + \ + f'--img-size {img_size} ' + \ + f'--save-period {save_period} ' + \ + f'--devices {gpu_id}' + else: + command = f'python3 -m torch.distributed.launch --nproc_per_node {gpu_count} ' + \ + f'--master_port {port} train.py --epochs {epochs} ' + \ + f'--batch-size {batch_size} --data {out_dir}/data.yaml --project /out ' + \ + f'--cfg models/{model}.yaml --name models --weights {weights} ' + \ + f'--img-size {img_size} ' + \ + f'--save-period {save_period} ' + \ + f'--devices {gpu_id}' + + if sync_bn: + command += " --sync-bn" + if args_options: command += f" {args_options}" diff --git a/det-yolov5-tmi/training-template.yaml b/det-yolov5-tmi/training-template.yaml index c6d0ee4..b01bdc1 100644 --- a/det-yolov5-tmi/training-template.yaml +++ b/det-yolov5-tmi/training-template.yaml @@ -14,3 +14,5 @@ img_size: 640 opset: 11 args_options: '--exist-ok' save_period: 10 +port: 29500 # work for multi-gpu only +sync_bn: False # work for multi-gpu only From a1177dcab0916d1ef745b80f27eff3b0dfe3734a Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 11 Jul 2022 16:15:22 +0800 Subject: [PATCH 065/204] remove *.pt in dockerignore file --- det-yolov5-tmi/.dockerignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/det-yolov5-tmi/.dockerignore b/det-yolov5-tmi/.dockerignore index af51ccc..bee6b98 100644 --- a/det-yolov5-tmi/.dockerignore +++ b/det-yolov5-tmi/.dockerignore @@ -12,7 +12,7 @@ data/samples/* *.jpg # Neural Network weights ----------------------------------------------------------------------------------------------- -**/*.pt +#**/*.pt **/*.pth **/*.onnx **/*.engine From 3d342d05cab9479ce3fcdee7253614ea497038f4 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 11 Jul 2022 17:13:34 +0800 Subject: [PATCH 066/204] add conf_threshold for infer --- det-mmdetection-tmi/infer-template.yaml | 1 + det-mmdetection-tmi/mmdet/utils/util_ymir.py | 3 ++- det-mmdetection-tmi/ymir_infer.py | 6 +++++- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/det-mmdetection-tmi/infer-template.yaml b/det-mmdetection-tmi/infer-template.yaml index 7dd411c..cc2f1e7 100644 --- a/det-mmdetection-tmi/infer-template.yaml +++ b/det-mmdetection-tmi/infer-template.yaml @@ -4,4 +4,5 @@ # config_file: 'configs/yolox/yolox_nano_8x8_300e_coco.py' # args_options: '' cfg_options: '' +conf_threshold: 0.2 # port: 12345 diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index faf39e0..eff0aba 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -203,6 +203,7 @@ def update_training_result_file(last: bool = False, key_score=None): model_stages = {} if stage_name not in model_stages: - rw.write_model_stage(files=[newest_weight_file], + config_files = [f for f in result_files if f.endswith('.py')] + rw.write_model_stage(files=[newest_weight_file] + config_files, mAP=float(map), stage_name=stage_name) diff --git a/det-mmdetection-tmi/ymir_infer.py b/det-mmdetection-tmi/ymir_infer.py index 84df374..b4716e2 100644 --- a/det-mmdetection-tmi/ymir_infer.py +++ b/det-mmdetection-tmi/ymir_infer.py @@ -108,11 +108,15 @@ def main(): # write infer result monitor_gap = max(1, N // 100) + conf_threshold = float(cfg.param.conf_threshold) for asset_path, _ in tqdm(dr.item_paths(dataset_type=env.DatasetType.CANDIDATE)): img = cv2.imread(asset_path) result = model.infer(img) - infer_result[asset_path] = mmdet_result_to_ymir( + raw_anns = mmdet_result_to_ymir( result, cfg.param.class_names) + + infer_result[asset_path] = [ + ann for ann in raw_anns if ann.score >= conf_threshold] idx += 1 if idx % monitor_gap == 0: From ba3b7f3a17b1e9f51e3c6db90d53b524152885d9 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 13 Jul 2022 14:00:14 +0800 Subject: [PATCH 067/204] update dockerfile and commit id --- live-code-executor/mxnet.dockerfile | 4 ++-- live-code-executor/torch.dockerfile | 4 ++-- live-code-executor/ymir_start.py | 19 +++++++++++++------ 3 files changed, 17 insertions(+), 10 deletions(-) diff --git a/live-code-executor/mxnet.dockerfile b/live-code-executor/mxnet.dockerfile index e1ff9c4..e04bd4b 100644 --- a/live-code-executor/mxnet.dockerfile +++ b/live-code-executor/mxnet.dockerfile @@ -31,10 +31,10 @@ RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC && # Install python package # view https://mxnet.apache.org/versions/1.9.1/get_started for detail -RUN pip3 install mxnet-cu112==${MXNET} loguru opencv-python==${OPENCV} numpy=${NUMPY} +RUN pip3 install mxnet-cu112==${MXNET} loguru opencv-python==${OPENCV} numpy==${NUMPY} # install ymir-exc sdk RUN if [ "${SERVER_MODE}" = "dev" ]; then \ - pip install --force-reinstall -U "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ + pip install "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ else \ pip install ymir-exc; \ fi diff --git a/live-code-executor/torch.dockerfile b/live-code-executor/torch.dockerfile index 806f471..a71476f 100644 --- a/live-code-executor/torch.dockerfile +++ b/live-code-executor/torch.dockerfile @@ -21,11 +21,11 @@ RUN apt-get update && apt-get install -y git curl wget zip gcc \ # Install python package RUN pip install -U pip && \ - pip install loguru opencv-python==${OPENCV} numpy=${NUMPY} + pip install loguru opencv-python==${OPENCV} numpy==${NUMPY} # install ymir-exc sdk RUN if [ "${SERVER_MODE}" = "dev" ]; then \ - pip install --force-reinstall -U "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ + pip install "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ else \ pip install ymir-exc; \ fi diff --git a/live-code-executor/ymir_start.py b/live-code-executor/ymir_start.py index 0ea1bd6..918320c 100644 --- a/live-code-executor/ymir_start.py +++ b/live-code-executor/ymir_start.py @@ -20,15 +20,22 @@ def main(): show_ymir_info(executor_config) git_url = executor_config['git_url'] - git_branch = executor_config.get('git_branch', '') + # commit id, tag or branch + git_id = executor_config.get('git_branch', '') - if not git_branch: - cmd = f'git clone {git_url} --depth 1 /app' - else: - cmd = f'git clone {git_url} --depth 1 -b {git_branch} /app' - logger.info(f'clone code: {cmd}') + cmd = f'git clone {git_url} /app' subprocess.run(cmd.split(), check=True) + if not git_id: + result = subprocess.run('git rev-parse HEAD', check=True, shell=True, + capture_output=True, encoding='utf-8', cwd='/app') + # remove '\n' + git_id = result.stdout.strip() + else: + subprocess.run(f'git checkout {git_id}', check=True, shell=True, cwd='/app') + + logger.info(f'clone code with {git_id}: {cmd}') + # step 2. read /app/extra-requirements.txt and install it. pypi_file = '/app/extra-requirements.txt' if osp.exists(pypi_file): From c3e55a41ad47869e981838378349a0bdac50550d Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 15 Jul 2022 16:32:32 +0800 Subject: [PATCH 068/204] fix bug --- det-yolov5-tmi/start.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py index d59fa8a..7f66691 100644 --- a/det-yolov5-tmi/start.py +++ b/det-yolov5-tmi/start.py @@ -54,7 +54,7 @@ def _run_training(cfg: edict) -> None: gpu_id = str(cfg.param.gpu_id) gpu_count = len(gpu_id.split(',')) if gpu_id else 0 port = int(cfg.param.port) - sync_bn = cfg.param.sync_bn + sync_bn = cfg.param.get('sync_bn', False) weights = get_weight_file(cfg) if not weights: # download pretrained weight @@ -68,14 +68,14 @@ def _run_training(cfg: edict) -> None: f'--cfg models/{model}.yaml --name models --weights {weights} ' + \ f'--img-size {img_size} ' + \ f'--save-period {save_period} ' + \ - f'--devices cpu' + f'--device cpu' elif gpu_count == 1: command = f'python3 train.py --epochs {epochs} ' + \ f'--batch-size {batch_size} --data {out_dir}/data.yaml --project /out ' + \ f'--cfg models/{model}.yaml --name models --weights {weights} ' + \ f'--img-size {img_size} ' + \ f'--save-period {save_period} ' + \ - f'--devices {gpu_id}' + f'--device {gpu_id}' else: command = f'python3 -m torch.distributed.launch --nproc_per_node {gpu_count} ' + \ f'--master_port {port} train.py --epochs {epochs} ' + \ @@ -83,7 +83,7 @@ def _run_training(cfg: edict) -> None: f'--cfg models/{model}.yaml --name models --weights {weights} ' + \ f'--img-size {img_size} ' + \ f'--save-period {save_period} ' + \ - f'--devices {gpu_id}' + f'--device {gpu_id}' if sync_bn: command += " --sync-bn" From 9f8c1ac26c26b5eb82184d9f7f8ed99189eeb5e2 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Sat, 16 Jul 2022 10:12:23 +0800 Subject: [PATCH 069/204] swap config and add export_format --- det-yolov5-tmi/start.py | 2 +- det-yolov5-tmi/training-template.yaml | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py index 7f66691..9e82742 100644 --- a/det-yolov5-tmi/start.py +++ b/det-yolov5-tmi/start.py @@ -53,7 +53,7 @@ def _run_training(cfg: edict) -> None: args_options = cfg.param.args_options gpu_id = str(cfg.param.gpu_id) gpu_count = len(gpu_id.split(',')) if gpu_id else 0 - port = int(cfg.param.port) + port = int(cfg.param.get('port', 29500)) sync_bn = cfg.param.get('sync_bn', False) weights = get_weight_file(cfg) if not weights: diff --git a/det-yolov5-tmi/training-template.yaml b/det-yolov5-tmi/training-template.yaml index b01bdc1..763f66a 100644 --- a/det-yolov5-tmi/training-template.yaml +++ b/det-yolov5-tmi/training-template.yaml @@ -7,6 +7,8 @@ # pretrained_model_params: [] # class_names: [] +shm_size: '32G' +export_format: 'ark:raw' model: 'yolov5s' batch_size: 16 epochs: 300 @@ -14,5 +16,5 @@ img_size: 640 opset: 11 args_options: '--exist-ok' save_period: 10 -port: 29500 # work for multi-gpu only sync_bn: False # work for multi-gpu only +port: 29500 # work for multi-gpu only From a3ffc83bac8b788fa6c688bc3dbf5e8c77fa9149 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Sat, 16 Jul 2022 10:52:11 +0800 Subject: [PATCH 070/204] update mmdet --- det-mmdetection-tmi/README_ymir.md | 8 +++++--- .../mmdet/core/evaluation/eval_hooks.py | 10 +++++----- det-mmdetection-tmi/mmdet/utils/util_ymir.py | 2 +- det-mmdetection-tmi/ymir_train.py | 19 ++++++++++++++++--- 4 files changed, 27 insertions(+), 12 deletions(-) diff --git a/det-mmdetection-tmi/README_ymir.md b/det-mmdetection-tmi/README_ymir.md index 194bd03..1281e7f 100644 --- a/det-mmdetection-tmi/README_ymir.md +++ b/det-mmdetection-tmi/README_ymir.md @@ -13,11 +13,13 @@ docker build -t ymir-executor/mmdet:cuda111-tmi -build-arg SERVER_MODE=dev -f do # changelog - modify `mmdet/datasets/coco.py`, save the evaluation result to `os.environ.get('COCO_EVAL_TMP_FILE')` with json format - modify `mmdet/core/evaluation/eval_hooks.py`, write training result file and monitor task process -- modify `mmdet/datasets/__init__.py` and add `mmdet/datasets/ymir.py`, add class `YmirDataset` to load YMIR dataset. +- modify `mmdet/datasets/__init__.py, mmdet/datasets/coco.py` and add `mmdet/datasets/ymir.py`, add class `YmirDataset` to load YMIR dataset. +- modify `requirements/runtime.txt` to add new dependent package. - add `mmdet/utils/util_ymir.py` for ymir training/infer/mining - add `ymir_infer.py` for infer - add `ymir_mining.py` for mining - add `ymir_train.py` modify `tools/train.py` to update the mmcv config for training - add `start.py`, the entrypoint for docker image - - +- add `training-template.yaml, infer-template.yaml, mining-template.yaml` for ymir pre-defined hyper-parameters. +- add `docker/Dockerfile.cuda102, docker/Dockerfile.cuda111` to build docker image +- remove `docker/Dockerfile` to avoid misuse diff --git a/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py b/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py index dff0705..dc40801 100644 --- a/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py +++ b/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py @@ -10,7 +10,7 @@ from ymir_exc import monitor from mmdet.utils.util_ymir import (YmirStage, get_ymir_process, - update_training_result_file) + write_ymir_training_result) def _calc_dynamic_intervals(start_interval, dynamic_interval_list): @@ -79,7 +79,7 @@ def _do_evaluate(self, runner): results = single_gpu_test(runner.model, self.dataloader, show=False) runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) key_score = self.evaluate(runner, results) - update_training_result_file(last=False, key_score=key_score) + write_ymir_training_result(last=False, key_score=key_score) # the key_score may be `None` so it needs to skip the action to save # the best checkpoint if self.save_best and key_score: @@ -87,7 +87,7 @@ def _do_evaluate(self, runner): # best_score = runner.meta['hook_msgs'].get( # 'best_score', self.init_value_map[self.rule]) # if self.compare_func(key_score, best_score): - # update_training_result_file(key_score) + # write_ymir_training_result(key_score) # Note: Considering that MMCV's EvalHook updated its interface in V1.3.16, @@ -170,7 +170,7 @@ def _do_evaluate(self, runner): print('\n') runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) key_score = self.evaluate(runner, results) - update_training_result_file(last=False, key_score=key_score) + write_ymir_training_result(last=False, key_score=key_score) # the key_score may be `None` so it needs to skip # the action to save the best checkpoint if self.save_best and key_score: @@ -179,4 +179,4 @@ def _do_evaluate(self, runner): # best_score = runner.meta['hook_msgs'].get( # 'best_score', self.init_value_map[self.rule]) # if self.compare_func(key_score, best_score): - # update_training_result_file(key_score) + # write_ymir_training_result(key_score) diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index eff0aba..2c232e2 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -158,7 +158,7 @@ def get_weight_file(cfg: edict) -> str: return "" -def update_training_result_file(last: bool = False, key_score=None): +def write_ymir_training_result(last: bool = False, key_score=None): if key_score: logging.info(f'key_score is {key_score}') COCO_EVAL_TMP_FILE = os.getenv('COCO_EVAL_TMP_FILE') diff --git a/det-mmdetection-tmi/ymir_train.py b/det-mmdetection-tmi/ymir_train.py index ace0c27..7a57946 100644 --- a/det-mmdetection-tmi/ymir_train.py +++ b/det-mmdetection-tmi/ymir_train.py @@ -7,8 +7,8 @@ from easydict import EasyDict as edict from ymir_exc import monitor -from mmdet.utils.util_ymir import (YmirStage, get_merged_config, - get_ymir_process, update_training_result_file) +from mmdet.utils.util_ymir import (YmirStage, get_merged_config, get_weight_file, + get_ymir_process, write_ymir_training_result) def main(cfg: edict) -> int: @@ -28,6 +28,19 @@ def main(cfg: edict) -> int: args_options = cfg.param.get("args_options", None) cfg_options = cfg.param.get("cfg_options", None) + if args_options.find('--resume-from') == -1 and \ + cfg_options.find('load_from') == -1 and \ + cfg_options.find('resume_from') == -1: + + weight_file = get_weight_file(cfg) + if weight_file: + if cfg_options: + cfg_options += f' load_from={weight_file}' + else: + cfg_options = f'load_from={weight_file}' + else: + logging.warning('no weight file used for training!') + monitor.write_monitor_logger( percent=get_ymir_process(YmirStage.PREPROCESS, p=0.2)) @@ -57,7 +70,7 @@ def main(cfg: edict) -> int: subprocess.run(cmd.split(), check=True) # save the last checkpoint - update_training_result_file(last=True) + write_ymir_training_result(last=True) return 0 From 4bfe8c3a5e9f9cc4d8eaa43b01bfe3687bbc5041 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Sat, 16 Jul 2022 11:12:31 +0800 Subject: [PATCH 071/204] fix none error --- det-mmdetection-tmi/ymir_train.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/det-mmdetection-tmi/ymir_train.py b/det-mmdetection-tmi/ymir_train.py index 7a57946..96d1a69 100644 --- a/det-mmdetection-tmi/ymir_train.py +++ b/det-mmdetection-tmi/ymir_train.py @@ -28,9 +28,9 @@ def main(cfg: edict) -> int: args_options = cfg.param.get("args_options", None) cfg_options = cfg.param.get("cfg_options", None) - if args_options.find('--resume-from') == -1 and \ - cfg_options.find('load_from') == -1 and \ - cfg_options.find('resume_from') == -1: + if (args_options is None or args_options.find('--resume-from') == -1) and \ + (cfg_options is None or (cfg_options.find('load_from') == -1 and + cfg_options.find('resume_from') == -1)): weight_file = get_weight_file(cfg) if weight_file: From 6326d07bb456d163e750bd935f0c6964882c62b3 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Sat, 16 Jul 2022 11:14:16 +0800 Subject: [PATCH 072/204] add comment --- det-mmdetection-tmi/ymir_train.py | 1 + 1 file changed, 1 insertion(+) diff --git a/det-mmdetection-tmi/ymir_train.py b/det-mmdetection-tmi/ymir_train.py index 96d1a69..31c2375 100644 --- a/det-mmdetection-tmi/ymir_train.py +++ b/det-mmdetection-tmi/ymir_train.py @@ -28,6 +28,7 @@ def main(cfg: edict) -> int: args_options = cfg.param.get("args_options", None) cfg_options = cfg.param.get("cfg_options", None) + # auto load offered weight file if not set by user! if (args_options is None or args_options.find('--resume-from') == -1) and \ (cfg_options is None or (cfg_options.find('load_from') == -1 and cfg_options.find('resume_from') == -1)): From e87e7ce151599fdb6e857fbcab6678c6261210f2 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 18 Jul 2022 12:33:14 +0800 Subject: [PATCH 073/204] update git clone for live code --- live-code-executor/ymir_start.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/live-code-executor/ymir_start.py b/live-code-executor/ymir_start.py index 918320c..bd4f2dc 100644 --- a/live-code-executor/ymir_start.py +++ b/live-code-executor/ymir_start.py @@ -23,18 +23,26 @@ def main(): # commit id, tag or branch git_id = executor_config.get('git_branch', '') - cmd = f'git clone {git_url} /app' + # https://github.blog/2020-12-21-get-up-to-speed-with-partial-clone-and-shallow-clone/ + cmd = f'git clone --filter=blob:none {git_url} /app' + logger.info(f'running: {cmd}') subprocess.run(cmd.split(), check=True) if not git_id: - result = subprocess.run('git rev-parse HEAD', check=True, shell=True, - capture_output=True, encoding='utf-8', cwd='/app') - # remove '\n' - git_id = result.stdout.strip() + # logger.warning(f'no commid_id/tag/branch offered for {git_url}') + raise Exception(f'no commid_id/tag/branch offered for {git_url}') else: - subprocess.run(f'git checkout {git_id}', check=True, shell=True, cwd='/app') + cmd = f'git checkout {git_id}' + logger.info(f'running: {cmd}') + subprocess.run(cmd.split(), check=True, cwd='/app') + + result = subprocess.run('git rev-parse HEAD', check=True, shell=True, + capture_output=True, encoding='utf-8', cwd='/app') + + commit_id = result.stdout.strip() # remove '\n' + subprocess.run(f'echo {commit_id} > /out/models/git_commit_id.txt', check=True, shell=True) + logger.info(f'clone code {git_url} with commit id {commit_id}') - logger.info(f'clone code with {git_id}: {cmd}') # step 2. read /app/extra-requirements.txt and install it. pypi_file = '/app/extra-requirements.txt' From a701af6f4a6e559cb5e3a0e671ecb8b5f9ca67fd Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 19 Jul 2022 09:28:03 +0800 Subject: [PATCH 074/204] support ymir1.0.0+ --- det-yolov5-tmi/cuda102.dockerfile | 2 + det-yolov5-tmi/cuda111.dockerfile | 2 + det-yolov5-tmi/train.py | 57 +++++++++++++++---------- det-yolov5-tmi/utils/ymir_yolov5.py | 64 +++++++++++++++++++++++++++-- 4 files changed, 99 insertions(+), 26 deletions(-) diff --git a/det-yolov5-tmi/cuda102.dockerfile b/det-yolov5-tmi/cuda102.dockerfile index 49a29d3..3afe7e4 100644 --- a/det-yolov5-tmi/cuda102.dockerfile +++ b/det-yolov5-tmi/cuda102.dockerfile @@ -4,11 +4,13 @@ ARG CUDNN="7" FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-runtime ARG SERVER_MODE=prod +ARG YMIR="1.1.0" ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" ENV LANG=C.UTF-8 +ENV YMIR_VERSION=${YMIR} # Install linux package RUN apt-get update && apt-get install -y gnupg2 git libglib2.0-0 \ diff --git a/det-yolov5-tmi/cuda111.dockerfile b/det-yolov5-tmi/cuda111.dockerfile index 0c6e5dd..ca19784 100644 --- a/det-yolov5-tmi/cuda111.dockerfile +++ b/det-yolov5-tmi/cuda111.dockerfile @@ -5,11 +5,13 @@ ARG CUDNN="8" # cuda11.1 + pytorch 1.9.0 + cudnn8 not work!!! FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-runtime ARG SERVER_MODE=prod +ARG YMIR="1.1.0" ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" ENV LANG=C.UTF-8 +ENV YMIR_VERSION=$YMIR # Install linux package RUN apt-get update && apt-get install -y gnupg2 git libglib2.0-0 \ diff --git a/det-yolov5-tmi/train.py b/det-yolov5-tmi/train.py index 7fcbbce..449c85d 100644 --- a/det-yolov5-tmi/train.py +++ b/det-yolov5-tmi/train.py @@ -12,6 +12,7 @@ $ python path/to/train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch """ +from ymir_exc import monitor import argparse import math import os @@ -23,6 +24,7 @@ from pathlib import Path import numpy as np +from packaging.version import Version import torch import torch.distributed as dist import torch.nn as nn @@ -38,26 +40,25 @@ sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative -import val # for end-of-epoch mAP -from models.experimental import attempt_load -from models.yolo import Model -from utils.autoanchor import check_anchors -from utils.autobatch import check_train_batch_size -from utils.callbacks import Callbacks -from utils.datasets import create_dataloader -from utils.downloads import attempt_download +from utils.ymir_yolov5 import write_ymir_training_result, YmirStage, get_ymir_process, get_merged_config, write_old_ymir_training_result +from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, select_device, torch_distributed_zero_first +from utils.plots import plot_evolve, plot_labels +from utils.metrics import fitness +from utils.loss import ComputeLoss +from utils.loggers.wandb.wandb_utils import check_wandb_resume +from utils.loggers import Loggers from utils.general import (LOGGER, check_dataset, check_file, check_git_status, check_img_size, check_requirements, check_suffix, check_version, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer) -from utils.loggers import Loggers -from utils.loggers.wandb.wandb_utils import check_wandb_resume -from utils.loss import ComputeLoss -from utils.metrics import fitness -from utils.plots import plot_evolve, plot_labels -from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, select_device, torch_distributed_zero_first -from utils.ymir_yolov5 import write_ymir_training_result, YmirStage, get_ymir_process, get_merged_config -from ymir_exc import monitor +from utils.downloads import attempt_download +from utils.datasets import create_dataloader +from utils.callbacks import Callbacks +from utils.autobatch import check_train_batch_size +from utils.autoanchor import check_anchors +from models.yolo import Model +from models.experimental import attempt_load +import val # for end-of-epoch mAP LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) @@ -73,9 +74,15 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze ymir_cfg = opt.ymir_cfg - opt.ymir_cfg = '' # yaml cannot dump edict, remove it here + opt.ymir_cfg = '' # yaml cannot dump edict, remove it here log_dir = Path(ymir_cfg.ymir.output.tensorboard_dir) + YMIR_VERSION = os.environ.get('YMIR_VERSION', '1.2.0') + if Version(YMIR_VERSION) >= Version('1.2.0'): + latest_ymir = True + else: + latest_ymir = False + # Directories w = save_dir # weights dir (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir @@ -184,7 +191,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if opt.cos_lr: lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] else: - lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear + def lf(x): return (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA @@ -296,7 +303,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # ymir monitor if epoch % monitor_gap == 0: - percent = get_ymir_process(stage=YmirStage.TASK, p=epoch/(epochs-start_epoch+1)) + percent = get_ymir_process(stage=YmirStage.TASK, p=epoch / (epochs - start_epoch + 1)) monitor.write_monitor_logger(percent=percent) # Update image weights (optional, single-GPU only) @@ -418,7 +425,10 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if (epoch > 0) and (opt.save_period > 0) and (epoch % opt.save_period == 0): torch.save(ckpt, w / f'epoch{epoch}.pt') weight_file = str(w / f'epoch{epoch}.pt') - write_ymir_training_result(ymir_cfg, map50=results[2], epoch=epoch, weight_file=weight_file) + if latest_ymir: + write_ymir_training_result(ymir_cfg, map50=results[2], epoch=epoch, weight_file=weight_file) + else: + write_old_ymir_training_result(ymir_cfg, results, maps, rewrite=True) del ckpt callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) @@ -466,7 +476,11 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary torch.cuda.empty_cache() # save the best and last weight file with other files in models_dir - write_ymir_training_result(ymir_cfg, map50=best_fitness, epoch=epochs, weight_file='') + if RANK in [-1, 0]: + if latest_ymir: + write_ymir_training_result(ymir_cfg, map50=best_fitness, epoch=epochs, weight_file='') + else: + write_old_ymir_training_result(ymir_cfg, (), np.array([0]), rewrite=False) return results @@ -541,7 +555,6 @@ def main(opt, callbacks=Callbacks()): ymir_cfg = get_merged_config() opt.ymir_cfg = ymir_cfg - # DDP mode device = select_device(opt.device, batch_size=opt.batch_size) if LOCAL_RANK != -1: diff --git a/det-yolov5-tmi/utils/ymir_yolov5.py b/det-yolov5-tmi/utils/ymir_yolov5.py index 492822f..bc3fe7e 100644 --- a/det-yolov5-tmi/utils/ymir_yolov5.py +++ b/det-yolov5-tmi/utils/ymir_yolov5.py @@ -5,7 +5,7 @@ import os.path as osp import shutil from enum import IntEnum -from typing import Any, List, Tuple +from typing import Any, Dict, List, Tuple import numpy as np import torch @@ -70,7 +70,7 @@ def get_weight_file(cfg: edict) -> str: find weight file in cfg.param.model_params_path or cfg.param.model_params_path """ if cfg.ymir.run_training: - model_params_path = cfg.param.get('pretrained_model_params',[]) + model_params_path = cfg.param.get('pretrained_model_params', []) else: model_params_path = cfg.param.model_params_path @@ -209,9 +209,9 @@ def write_ymir_training_result(cfg: edict, epoch: int, weight_file: str) -> int: """ + for ymir>=1.2.0 cfg: ymir config - results: (mp, mr, map50, map, loss) - maps: map@0.5:0.95 for all classes + map50: map50 epoch: stage weight_file: saved weight files, empty weight_file will save all files """ @@ -230,3 +230,59 @@ def write_ymir_training_result(cfg: edict, files=files, mAP=float(map50)) return 0 + + +def write_training_result(model: List[str], map: float, class_aps: Dict[str, float], **kwargs: dict) -> None: + """ + for 1.0.0 <= ymir <=1.1.0 + """ + training_result = { + 'model': model, + 'map': map, + 'class_aps': class_aps, + } + training_result.update(kwargs) + + env_config = env.get_current_env() + with open(env_config.output.training_result_file, 'w') as f: + yaml.safe_dump(training_result, f) + + +def write_old_ymir_training_result(cfg: edict, results: Tuple, maps: NDArray, rewrite=False) -> int: + """ + for 1.0.0 <= ymir <=1.1.0 + cfg: ymir config + results: (mp, mr, map50, map, loss) + maps: map@0.5:0.95 for all classes + rewrite: set true to ensure write the best result + """ + + if not rewrite: + training_result_file = cfg.ymir.output.training_result_file + if osp.exists(training_result_file): + with open(cfg.ymir.output.training_result_file, 'r') as f: + training_result = yaml.safe_load(stream=f) + + files = [osp.basename(f) for f in glob.glob(osp.join(cfg.ymir.output.models_dir, '*'))] + + training_result['model_names'] = files + ['best.onnx'] + write_training_result(**training_result) + + return 0 + + class_names = cfg.param.class_names + mp = results[0] # mean of precision + mr = results[1] # mean of recall + map50 = results[2] # mean of ap@0.5 + map = results[3] # mean of ap@0.5:0.95 + + files = [osp.basename(f) for f in glob.glob(osp.join(cfg.ymir.output.models_dir, '*'))] + # use `rw.write_training_result` to save training result + write_training_result(model=files + ['best.onnx'], + map=float(map), + map50=float(map50), + precision=float(mp), + recall=float(mr), + class_aps={class_name: v + for class_name, v in zip(class_names, maps.tolist())}) + return 0 From d2beb2967aadd5e173dd87fff2d4eb1d11edb5cf Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 19 Jul 2022 09:44:20 +0800 Subject: [PATCH 075/204] update --- live-code-executor/ymir_start.py | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/live-code-executor/ymir_start.py b/live-code-executor/ymir_start.py index bd4f2dc..d2c5415 100644 --- a/live-code-executor/ymir_start.py +++ b/live-code-executor/ymir_start.py @@ -21,28 +21,20 @@ def main(): git_url = executor_config['git_url'] # commit id, tag or branch - git_id = executor_config.get('git_branch', '') + git_revision = executor_config.get('git_branch', '') # https://github.blog/2020-12-21-get-up-to-speed-with-partial-clone-and-shallow-clone/ cmd = f'git clone --filter=blob:none {git_url} /app' logger.info(f'running: {cmd}') subprocess.run(cmd.split(), check=True) - if not git_id: + if not git_revision: # logger.warning(f'no commid_id/tag/branch offered for {git_url}') raise Exception(f'no commid_id/tag/branch offered for {git_url}') - else: - cmd = f'git checkout {git_id}' - logger.info(f'running: {cmd}') - subprocess.run(cmd.split(), check=True, cwd='/app') - - result = subprocess.run('git rev-parse HEAD', check=True, shell=True, - capture_output=True, encoding='utf-8', cwd='/app') - - commit_id = result.stdout.strip() # remove '\n' - subprocess.run(f'echo {commit_id} > /out/models/git_commit_id.txt', check=True, shell=True) - logger.info(f'clone code {git_url} with commit id {commit_id}') + cmd = f'git checkout {git_revision}' + logger.info(f'running: {cmd}') + subprocess.run(cmd.split(), check=True, cwd='/app') # step 2. read /app/extra-requirements.txt and install it. pypi_file = '/app/extra-requirements.txt' From 298637dfb3031d72886746a621901b1bd6ddbb22 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 19 Jul 2022 13:56:09 +0800 Subject: [PATCH 076/204] update doc --- README.MD | 60 +++++++++++++++++++++++++++++++++++++++++++----------- debug.png | Bin 0 -> 43913 bytes 2 files changed, 48 insertions(+), 12 deletions(-) create mode 100644 debug.png diff --git a/README.MD b/README.MD index dafee8a..3618622 100644 --- a/README.MD +++ b/README.MD @@ -17,23 +17,51 @@ - yolov4挖掘与推理镜像,与det-yolov4-training对应 +``` +cd det-yolov4-mining + +docker build -t ymir-executor/yolov4:cuda101-mi -f Dockerfile . + +docker build -t ymir-executor/yolov4:cuda112-mi -f cuda112.dockerfile . +``` + ## det-yolov5-tmi -- yolov5训练、挖掘及推理镜像,训练时会从github上下载权重 +- [修改说明](./det-yolov5-tmi/README_yolov5.md) + +- yolov5训练、挖掘及推理镜像,镜像构建时会从github上下载权重, 如果访问github不稳定, 建议提前将模型权重下载并在构建时复制到镜像中. -- yolov5-FAQ +``` +cd det-yolov5-tmi +docker build -t ymir-executor/ymir1.1.0:cuda102-yolov5-tmi --build-arg SERVER_MODE=dev --build-arg YMIR=1.1.0 -f cuda102.dockerfile . - - 镜像训练时权重下载出错或慢:提前将权重下载好并复制到镜像`/app`目录下或通过ymir导入预训练模型,在训练时进行加载。 +docker build -t ymir-executor/ymir1.1.0:cuda111-yolov5-tmi --build-arg SERVER_MODE=dev --build-arg YMIR=1.1.0 -f cuda111.dockerfile . +``` ## live-code-executor -- 可以通过`git_url`, `git_branch`从网上clone代码到镜像并运行 +- 可以通过`git_url`, `commit id` 或 `tag` 从网上clone代码到镜像并运行, 不推荐使用`branch`, 因为这样拉取的代码可能随时间变化, 实验结果不具备可重复性. - 参考 [live-code](https://github.com/IndustryEssentials/ymir-remote-git) +``` +cd live-code-executor + +docker build -t ymir-executor/live-code:torch-tmi -f torch.dockerfile + +docker build -t ymir-executor/live-code:mxnet-tmi -f mxnet.dockerfile +``` + ## det-mmdetection-tmi -- mmdetection 训练、挖掘及推理镜像,目前还没开发完 +- [修改说明](./det-mmdetection-tmi/README_ymir.md) + +``` +cd det-mmdetection-tmi +docker build -t youdaoyzbx/ymir-executor:ymir1.1.0-mmdet-cu102-tmi -f docker/Dockerfile.cuda102 --build-arg SERVER_MODE=dev --build-arg YMIR=1.1.0 . + +docker build -t youdaoyzbx/ymir-executor:ymir1.1.0-mmdet-cu111-tmi -f docker/Dockerfile.cuda111 --build-arg SERVER_MODE=dev --build-arg YMIR=1.1.0 . +``` ## 如何制作自己的ymir-executor @@ -50,7 +78,7 @@ # FAQ -- apt 或 pip 安装慢或出错 +## apt 或 pip 安装慢或出错 - 采用国内源,如在docker file 中添加如下命令 @@ -60,7 +88,7 @@ RUN pip config set global.index-url https://mirrors.aliyun.com/pypi/simple ``` -- docker build 的时候出错,找不到相应docker file或`COPY/ADD`时出错 +## docker build 的时候出错,找不到相应docker file或`COPY/ADD`时出错 - 回到项目根目录或docker file对应根目录,确保docker file 中`COPY/ADD`的文件与文件夹能够访问,以yolov5为例. @@ -70,7 +98,7 @@ docker build -t ymir-executor/yolov5 . -f det-yolov5-tmi/cuda111.dockerfile ``` -- 镜像运行完`/in`与`/out`目录中的文件被清理 +## 镜像运行完`/in`与`/out`目录中的文件被清理 - ymir系统为节省空间,会在任务`成功结束`后删除其中不必要的文件,如果不想删除,可以在部署ymir时,修改文件`ymir/command/mir/tools/command_run_in_out.py`,注释其中的`_cleanup(work_dir=work_dir)`。注意需要重新构建后端镜像 @@ -81,7 +109,9 @@ docker-compose down -v && docker-compose up -d ``` -- 训练镜像如何调试 +## 训练镜像如何调试 + + ![](./debug.png) - 先通过失败任务的tensorboard链接拿到任务id,如`t000000100000175245d1656933456` @@ -95,15 +125,21 @@ - 挂载目录并运行镜像``,注意需要将ymir部署目录挂载到镜像中 ``` - docker run -it --gpus all -v $PWD/in:/in -v $PWD/out:/out -v : bash + docker run -it --gpus all --shm-size 12G -v $PWD/in:/in -v $PWD/out:/out -v : bash # 以/home/ymir/ymir-workplace作为ymir部署目录为例 - docker run -it --gpus all -v $PWD/in:/in -v $PWD/out:/out -v /home/ymir/ymir-workplace:/home/ymir/ymir-workplace bash + docker run -it --gpus all --shm-size 12G -v $PWD/in:/in -v $PWD/out:/out -v /home/ymir/ymir-workplace:/home/ymir/ymir-workplace bash + ``` + + - 进入到docker 容器中后, 执行镜像默认的命令, 如dockerfile中写的 `CMD bash /usr/bin/start.sh` + + ``` + bash /usr/bin/start.sh ``` - 推理与挖掘镜像调试同理,注意对应目录均为`ymir-workplace/sandbox/work_dir/TaskTypeMining` -- 模型精度/速度如何权衡与提升 +## 模型精度/速度如何权衡与提升 - 模型精度与数据集大小、数据集质量、学习率、batch size、 迭代次数、模型结构、数据增强方式、损失函数等相关,在此不做展开,详情参考: diff --git a/debug.png b/debug.png new file mode 100644 index 0000000000000000000000000000000000000000..e439ca6465be75812be03450bcfb52acb23ae0a5 GIT binary patch literal 43913 zcmeFYXIN9){w@kqg0#R=L8Jsk1*M8gC!vVwLO?_*A`n0XrT3bIqM{<8OPW$bQ3L@= zq}PD-j?x05_ma?CAmvQf-pjrJ=Q;O&IA89U`$2r>%p7x!ImU1N%KN_K)g41UF7`9* z3=9lhw{Bj)%fP^_#=yX+#>NWV>5F>(9QcpX`>vig!?$jsCE$YDQOiJ!fdL!KL9t~4 zu8(`%wD4wN;BGkj$JpYY^N4{VPvq8ht$P91Bx-QEm2Sx@#~Q2~SxZgXN~yuESuPCg z|8RA^H=8XQ_E41B3w*pn;u(*&s2KAfZeFKehCbxtVPj@uep$|W?9Y>LuJinmc`03% z!@>LTh0M!inWEv-t`%mf~aR1TgiIsB5wI_RvYp9tt8UZpUW2F8wUMUe*IOjkC zwWRrHR2@q6R2HBpsY8LYB&E^tqVBCpDis&fW?&r<5*u_a)mmD}+TN|y zIHB62km4>SWNlZXg{k!{%vaiGf7QPlvM^RGd#OlR zj0*ZlXI-TX*@O${JvQ)0!$Mx5z}+9Owvl!=GJb~WcB(kV^}vE!wTvrX_I2*;Eb7{i zeYFu;8>}*^^dz8Ch~M^yJ>434vogsCAAd%*RF;s$Gz@>YfgI0p_d1#zJalRxli@_Z=jjg+{!1jR^z(}Rn9$mX=O^`!us=T zOV)w#LpQ&Z#Qo6A`ppMkH)PJ7h*t`KTsqNSI6fmn9ur*sUbVWMfSjBiUR_5p{=?aEetK8*hnnbNRwE_m z7aaHwcLu?mnN7&A$Wda#MaXT-@1w4?fsUg|g+jNJ&8mJKr*^b?(<~u%ls*_qghq09 z>+6uK^z`Q6eQS1p2<33AOK#`KE_Np4<-J2ETGWEOFFkA?7p6hqNrbn2xnSsH0#|wYQD2X>_9}t{vz~IEa?vxyk({LscykfqjSGvrioLeC<*Y4bg5Z z!S07PlbA5upl_6*6%Ynr7KP&IXeoTY&m!<-C}c*kR_M*nL-Oo=i^vX6+HJpn522xx zdX^k*DY6QWa_4t>y}q(GvsOE2SnGA)G;Is*@b>=R*h1PtM-}Z_+<^AL#`|NWcbgR= zq-0-MaYuUS|at)yNRgL?Qu}@p(P#p7fM(3(Qs;{-WHP!C8O*S z738h&erNts%X7MSh9xb&Vs*4djY(;T`QomK+7GLp%|9RvTB7H&#n0%?!S#N&y$ZYn zTeT(+?yiFTNsFbUl|_f71g$jp5E^Pfoso%0j7>VFEX3d4zCQ;MVx3YMN}M~|7u^#B zh=86|u>99+c;i{vxTOG=HW#p+W;tqO9nyK?hT%eY>Vh@$z`D$n`jq}>4VSZ-U(vg> ze4UuENLlhicICwdp=G{@N@FtR$VxS0zRsB5p732&I#Hk7yRREPyEQ`T=TiBamBJKz zI=-YQ=;Y7N-G)qNjz}J9^X>;lgK~E3fusg7`T0*rx5MzmT|wOD6Z5)~N_A7SzqeQ- za#vvwzlK_=-C33!jk7me%m$a6hpd;FQS1ctny`{q^i+u|nRdnwy$OPiBI&Ys9KF>N z=a+~i8y3d0Nc_@PMt+U6(Wh$uw1Xrw#c`1x>46cJUiIf`{5L7=QO`>TqT|7aHOmih z`?O__9&(pOc~VcC%xFrMVUDGtbrZ_vq^BLD1iwdk*wSv2@*&8TRp1ch;B}?hPjkX_ z33s~q)Q=6u)b+$C{iMpSWa@r+bt|fr{}4hd`fQ)YJ{B*im<8N8o1aH?Q^`P;oI26esxmV-T4f~c+KhSQ(m2D%>Yff1sqkd_B(}%l~E|4 zH*!p(7_8&^%w(7=k~e?L;YX~QfbTW(R}%I%WgMDeuV`&{~V~@qiRN=vMr6G6qsp7Jw)d_NvOGI0hPDp&HbX3;46O%I+n@s$*M_Su zc*UCH0x7xNGeownmIFCf*XQXmPEw1{>bdJS;tj6ZIWgVStd-+C{1L)z%0Rp= zPQGS$ujTNATYV08wc&iYFofX!5>=`vEgA}Mkrh_mPfWa1`ncN%^_qE-y16k^BkK|a zQT#PFI>jWCa3+EOFOD0t?R1Ywp|Z7Sn>Se0@UX=b*%=ePFhz!Ab}P==MebVhGzSa= zBO7J%^&Y4BvqnRs*>GX;`Dm}9+S1}kA;wqs?kP*IqIl1l;8aGAyoU6~Uvgbb>~U}9 z@wN`4ew&%&aFC(zL-N*nsJzeYKwqY6CbmCBP+vN!4&GuJ>$Xsfv|4x)^99#0@DLM} ztJ!!l%Fn+L5&ZUWsYb0l3p~+5!uG&XmzXPC?R}lO{Nmn;bAA8XvxI59@czy z*_k2DsmkMsM{8nkKQ=d37q9h=z2UeJ-wmd=eKFepqVm}4YK0!=?5VoN0=QlQNw2)O ze!43=Qjk~r>9^r+19}I0Omb=E=-~O%Bq01(lou@$ zR*Vc3)tc?;4g}Us<}sId_ctEu-DakCv4 zR5($aK&ZsT__>r;(SbZ|m%`8C*ZAw{`O<{%&fkKDY}ovS0@t0C{93hE7j>G;OCo!= z!$L1B#~V)Dp<(Ljw5zlI; z9R;h;=EC)E#?zX-RR*hl!z~Y69NcKv`YwUg*qDw_xk@X)3+p?rtl_o(<1#mW->X|( zk6f&e49OqT!Y~E$t|#pbH;n$cb|plUaY+(9cNXtCg4ha58LSgwfleIYjQkeVL*f!` zXq4$3xgAzDo|G_10Ni;o(uv#PxSu+`M3|zZX#&_5lcCY%NtQ=qfF(tRpW11E$7-R* ztabrCe8t>$PT4bTvZa$lVki#Am!I05$)wQ zqB1|qG0Id(|FtI-V}C_MLT&}t!l}4Dbptg#XI2SPRdjYbF{=}7m-Vdl%}$T2k=stt zV&B&p2^NB4icKAP#ciu)8~Y@jJHzcAeKhB++%HC?Fn5RKQ^?!R*ris&>n$BakvtA- z!e^uk-Fquy9!Jxl57#xz&>hs2LK17J&ROBYxm@y!9qGI)a?GcA^6MUpB-4i1K#X>k zVpsHhSQ%Z`>seXt!q|VPtG8Xo&cxRrA07Vz*Vm|zQJ7mR+LQMmpKTileX<$Km53cp=?_7h5r``KULvE`(7WjArb3Oe^i^}wVP}90*LYt z=>_1`c#wGj-1d1EcHe;`D!Q;5S66R5Vw%{p?8NU)T7~gPOi-G7#)_LdRQVf{YnNQ@ z#M+w_*T|*28={)d5KZJzQEE#m7T@A&OBoYG4qvy(fVRo(b{wTQF39C98Bcf0Ys*1@ zSH9Il8`zfM#W${RHZVdwQaIiy1SOAod`40cc`@7d?m56Zn%QFq2^7B3Kf-P7uo;@@ zO!r}!q@Z9-6t1|w!xn;g;7j=gC%o9|*p&LViNk+KVJ#Z40SWG|jTA==6LcDKUzD&( zYi^zIDluxunWBNp&s>;P{=}btjgEMWK5*&?Q=kvSLG&<|!f1BC#=*OOUH8`xILf|u z3ahk_g*4vw@5+C&=03kAp~`0XOq9*z2i_B^`F(A?VT{G(f%xf7$m6p)KrwZd;fffx0MhzG+1GI&*zYKNAeBOyMazi2gkMUKns1a8MZ`Jq@q_+^dw=&Ay%_ zo$>H$$iFXwKX2;W?!(mkbjmDkHSZszV;@bwjrjGd|RFLnUM2VPVf%6;GHuSqR`a zX)iK`XYriRy+WJrO6{A)(#?bgIRjV%7qstk%H_^8=n{fX|8c zhAupA-2AJkR$~J62U`UH{>soboEkQnbCk01el$g&dn+(1T$rI&OxbSB1QHyV2?x1H zU;|%q1!*cJaIcHrReyW(i_LoIU8xgYpN+zW^*Z;<>6T^`6OKo_jOiv4h##;|ar9=p zxdIX575T9u!U6j?prQkcK(zWcq8qI0nC&TL&c zYX>t!*6eU>+1tLsDHJC^7`;$W5KV&6`*}OuZOI8=qp1ClBX_;M@RFO5%}iV}*GeP> z!;;*uWqCG0Q=t91?7>w4l)%~x`6(mU8CsJ?0vl1gb2yqlz3`MYYymdsq*HtzlEgM< zS>{)2V|@5F4wb<@Jr0gi&fVRSV=K)iHY3z<+@@VKNNgU%Ds-k_TH;01) zO}P{p)XQd;EljAcqUFNps^{#>G$Qv=K~uPl#&DL1J%LrDdv&aC&bC1o9;%JBtk)$r z_mHQFbqyBAkxZ=q;72EcT;i~8H`Sob*%=-oz!q*kJg+wBH^!%`OWMf&*ANLnFacnNM|=AeyWbHAEAT+$ z2yrcF*V{b}fStli+%TtNwE#LVtuaJO)FZPhCkLGm+!_Xr=5{39*!efE40Q7}WAlm{ zRK@n0#sxBKef4Ynw_s)3itYW@ZE*pP^-g6#8g*3;E(Y%t1WH>sjk=ERWCmYmXBU)mSp#J|-u}nln)@y#KuVdci$zze%l6U&fRQSJoUoE1ABi+g0ku zAv7|dKQr*mN$w<3zOnt}KR2!iXTT%HOtXKR!${P}aA7snw5ZbJs9ZVe z8>*9}ys?;Lc;zLF%S=DwceBKcPm(D2B(o(BFx3uT8xV*FhkS-XwC*xas3lL>cY`f13dO5p|O&5`M~5gq1~Gh4|nDm*1VT zKjDnEbrK_G$$n|=>756L!_7N+RZGtt_*vAv%EF$_ov>UuDJkdTJi(?)p@%1v7H&_EI$$c zz$T4YcVXBqCBs>%OE)l#%WtyerV=ZY@YKApygZzVqt~7x%&UrAMiY-_WY#|&nm%TZ z5zT*J;Ld&E$Rud1vVVI*s}uE!v0CpzXJqfUf3Nn-!3@B&`Y8wLD&RLVdfa!sQsz%@ z=l1ON=Ea*&{$N!G)`d4*ltHnJlU3ART3Hjb)ZE5|m$Q8FSu%j<;Ft@87~k!ea{wFn zL@|fw4-UTG+OEjG`^y5Q2xCOp&cwkZ(j>x3d>;3^zx0dsFIYUwNleruSRK zansG?WUQMOd&)KgiH8UmO<$a4+BD&F%fT*Xce1jy*TIhwTV`{))R|bZlxR*rhPZ}O z`#{_D3^sf)>BC_SY_}#p>tU%F{Lm8Z0+~iE>a?T&*$)UKOBOY|uqZ#(VO~OYOE?Q( z6?wst0pap1>KOawB-n*0vNqaS;;>c{%*0;8s^%hkb}&W-&IHZmv>bZ)?-$}9U!*_( z_a|&?#D@#FIePhorr#)v5axW0ck~nWur&%^j{}X1b15A1n849n^;dbMiuf?RF=MZP zh%!X5jE3-!mD(wd?Y}WlQW}>8F)~(kz6{4MyqII~k4Jr3`*|J?QrwY)AqweLJxSGa=|&Z}OP|L9 zy!hcoNk*&L z|4CvUE0nKO^R1X)R3z6Wc~Rc?KUu>;k`b!5@{9+&et(QBXcLr6Y&%}E2%X_2rbBW= zZ(QOz`}?EjjZHA(Wv`4~k1BOH(E3GQPVz=OnXk z%KmQFY@&j%4?U)Ma9QOh3nP!Y=43tVY3*QJkF#<7ED!8zo7$Oy>gShm7~ z0D8vEXj>?MOIjan-z+3%spL!*=~{p4wWT}$9n}_Y8%dt4;=zqT+L!tBcRP-vm6l!K zMCwrL=(E(_Z2*+%xe4~QjF!h^328c`&uqoVdW}RTX-~?inSnq{-v4Sjqz7Ay5DbV$ zxH-6BX7;f&qs2^MPbDGfn3Xz7zH9>8kQI3S+U|&VPH@mX=ic6Py&&Xe{}{o8SGLAu zCwujvv$!?P#ajZJ7Lt9-#Y|P)BFi!6Dg##QavWpldt^5Y`+Cr`e#6c;@7WZ?fK0Oa z&2S5KF;A^>!bM>Ao4Vsrj$zUM>WkDdLM(WBy7JUmM^BrU09{UvYQNfKK&YC~C`~P6 zs=8cr638_>YRE*URUie)+I)=Z+2?hmeSPA#ddg%}1XQ}0sAp3QNh+RH_HO~>*3u|_ zYO7T!YDDm2Sz#r|;KVrJ<`^r47*n-=cduzpgCs(wx{vmu4zsG}UP)ALhWqV7go4MP zudbriS4wFc{?-fc$|gKGx>b83_V(zT=amAdHZ(THxD;j?C&*iBtIn|CdST!B3Jo9c zI>-GO)w)5A9=ft6Q2hZke=o|eL&xflL_e28ocF-c+Kxk_3?#qYwV~fCd^nPr}RMo@=GR42IGN+oog0|wUj3foL8kZm*1Oik<$D1`^d`>jM z@+}+GalibJ*SZk2bpe`&vh%NV03Vg-{&7L*YWo62V>Ue}B>f$SpS=0T7Z$zpHUGsf zLDlbs+>c9#(H`zSXhJ-U(+OaYvp%fQe?~P|-p8qr7x;hinJJ|!4_j>%&5XVjzoEOe zb1jQM*4KH$YW`c`P5@9t#2`6l+4^s*JA?z}#ZNa9B!R4&S~Hg;5_ECi8ahyLxUwli zx;tQ)_e-KaMo@M?C)ijEhn-treYxYA4nUz;qiRuvxgESCM*P$>wunlmH3bkv>e0+Phj zRX!Vrj#+66-8Qw!!821@tHCzoZ6vi%J8RF+)tn+G`7~cI zTYn)yn~rL;6qYu`yey}zR#knr*9v>yl%QneFY(ap`Gn}Cndl;tY=i3E^r9R@*l-n& z6}J_jmT#fm_@4qeD&MSm(hNVAK%c8Z{=6m7DpRTh^(F1BZupOH(}V&?hK*BCJ5G3H z5e~))G=60lcv6}aZv22zdY3dk0q{c3Vo0qUBiAPOU&w*%HO!zDWUL-lE=mx&jS!kLd4u|efZ|Jvc+b%!Pq+PAP-Xm;?8*}@ z203Kg&jPC;l9wg5E40iKG z1#Vn6I{hI<%|9)eyc9eJR3}ua9iZ|b?z-{XT)u%-q;eLWyqullonz@S=2JF#u(XOP z;`W7%`QKck6v?S{8mOWE>T%vOQJT$(@#0puyEh13F}{9<0FL@YT1XoMS8?uofO}cL zPy&X{^4~yLkT_Ne?v(Z~wv0o|o90#|?ffyS&B1WTs6CEea5$KU^Dv3g$Vi4x+HKSi zS#Lx+2%T!l^r8Bv5lbaO(h!6@YPh(i)0igiP93p2m{Y5P@}=D5nA*?1iljPl zrPprX9}L;pVrD|?L6uUDhQ9hOF>=Dhx~16j!hu7bOVup%%@wJIoSb#tm$RDv+T>E9 zO~T(%kuGJ_(afJS)lkECz2nvp$h?tp3JU+}Tk%Ow?PvLES8BGz@`P1&=X7E$eGSjR zG6*v25`O)-e5%m>aGv+9u^xrp$ur}sUf&A6{OMo@BNd{Js`=yuKC(6E@d@dUoAu}EKfeUerTk058Aj6x!` ztaWU2O(IypO~PV#PQ|L{SOt-+3x$Vi{3LKV|%A>Of0IoKLuU?1|WIf zJ}!pvD~mnMpMu(kG~31D6MtWKP%^72p9bC>~PDAI~ zohw}?`L+9BshZzJV}lG5QySY_Ud*l#tk)4?hpkIsU$4!#MujyMdC+ax5g&NKL=7*s zJ)`K7Y&|j>eR3QCK}PMsL?ga!v;Qc-(*R zhci0mUyiGkmY*k_Oj<>`A$uu9xCH>mg~MlZNYFtHT(5kC+1$}H;=$TMR6RGd(7e#q zk*+hBzs^u^m%;nZ_on`+kBKFzG{Oktd_0;M`$TSGN{7Cr78iW~cup zK|p1D6T>de#MU2U7F~~!AHM?U{XzO2&u z+!2513A`45f@^7xIxEJ8)0 z%~0USnI$=2R{nhJOFW+KnPX$l6C$(7pf&E*#fCkvVB!@#nmtT*(|qhfgs{Wx&4~PE zZ&hh*R7rc2NR|ucSbe0YM2tUVIQV5UCZt*|sLMxbCj8dtEgjV6(h2I57z=)bRo?uY zEb>88cw^^7URfVZK+)3F4d$qi&WeaW`uFge>;l;`-&Zp8fQm&{wty>~iA_lz>Q1|U8DS{T@!ns*^*kZsE0^mZI3g0nhWvy zp~&n6qj|luvGwdxi}|uO?mrki7ivxp;1N;eSv(QK&A+8m@%#(-B-3hxoRr)QOmWvA zdZ>frts^nqMk}K{%@`PPhH^FgUK5dVMJO+T&83<>D-n2q!o>rR?(HJ?V53U4lIse6 zjnv1*d*3~U{Iyv)zEV~=S1OyIb!1Dpzx9kw;lkMB2d@e~6F=qwq7{H&5m6tB7WR6p zcJTJH8)@naLKMzv=n%KRVv+Jz6F%KHZ=1cF5Q_j9fp2LlUFctnacc|nF+!xd-iF+$ zE8H6WR+sI+9yUt6AWt>|a+WG+gGe|#n=BsAwD|z$@iwMact&|Y`4{pD`@^h5HvvBDt^;MNLdbbs zwZ4Elqo73p_ymZV&%&OqEApkZ);-}&5m5=4Ivt4$QCWDG6*6)j1|3`x#fMe11#}(| zZQZ6m3P96Cec~d$c!|oS+CU9MXE>Mz?M|JLsPYzpws**(ke>aCfHksp*MZeB*S52) zp8ScKAs3P+A;&)c(q}p25p?W}q$r1$NS@`%{2a5GtE~VqcA4CnR?jZg@`{5Q_Sjmv zBAbGR>&C+)OWb|Ew{e=5^Tn);m}A%Pqk86V30SgJ)E6 zr&}`5;DqdWg$SJrM&lIsgHV#!Q}$xW^*`%gDxW+ zsHgcmaYC6M#tAuK9p-Kkj{B_vHoaNs$0*Vy%s*Q1X&Wpaos!F(w17 zbkHG-`=!x~wa`fxg^ZjKJYVdPk8%t4${W%_n+n4&;!CF}$GUC&lVRDh1}A*je4{7} zH%kgN2aHF6KDq8nL`P;n$fw-G!= zyy+^B6<~=rM*(saIE&&Ie!B+=W8>-L`k<0|g$*77R+Q)AxW_X;bt~qTa3QDR_Ac1^!|qvNM7K0coDGw+X;y_~cVF26QYPIz3UkMzk0vo)44ez4C})C8=FLw-F2 z&GvZmiZw4~zdySh9?KGZj`@m5Fo(PIt2S5B?ALM*S{_VV+|0Hy4>UBs_Qh;J!OWn| zkmZCTb5;|ssG{wI^n(O{booVk~uj*{e(Th_0S;Z z>mogibq&hAqIEj@Vo`G-&T}bVYrIxH(+pE>+{|21s`)}2H>(j3@utT5R%ip*GjGED zqsH(N+3M#}#kKsZ##+)*a7vMHwEK^CLj3=YY@tx#-?9by08g>_t|Ou!nqg@TEfRM` zlRJ7l;ULIKJxpDlHdFF(>Ee7&mq3)xPi`-l7uqUycDbi0c`YbunnyJ(ZYt5uN~&)(hQPaJw>LVGdZl9Fir!?4L>DgO6VVcK7YH#S^OIPbVuBiX>b;89Pia6p%pNXea?a)s@5(5>s|aWawMX z)75o0xNqdKS7VPR=V9pC`G;OfJ(eel4C`F=yvgVTyv`{WwE)34UHUG)OqE!lP|&;S zuusa=a8TR>i>f{V4H17FGD`*Hc1p#M+igM9Eh1WIR@jmxe5(Nl;wjW!CB$a^$B7{{A zJJFd+BVZu_hZ>`8)XC20a|#}(cswJ4C0$L~g@Qn_5@2?p%(*vd8;U_dP1V!msp_vyJeN?eOCu8NCZ3IRPWZd3Q zy7F|!qLyFIm0?2k2bz#ii*ycER9sGy!GS*~x?b$)Ao1Y_%jq(_>gk+w+Wd%*E4c6Y z%iL;0umFZxgIM)x*x{xP{WE=tJpsLi_9_+|`3rpe*=m8M8lf=fNH=-s#b(|^970C! zN-bdw)rEr0LY$6NE{!=#c*gRK*^v%6$N z1*E?8J`Ot9c|PgW)jb}l%wvikmK=Q@;eBGeko)=6%*_TkNTmCS4zy$Uyn?OrJn&4&vD&Zu&Vs$Legk*3R*Z!Az+W4nJf*zaLT}k}UkkilVayi$dD!fN~&(K`Y zv$)h>Ae9AUbU20nrBzTU4mx5+9>m8IFE<*TR{bT6>~->Ar-@T+aLwD-NdOvrUFiP| zS_AaO|HeQ6#ZlryMF=yKh5s3`w>+Ga?~7Vcvllpuwjt;H}b%S8}XN?@z}Us!$|yp&U*dBB=Ok6 zjmaN`(JAsj3IOp@6$z%wB*L%Z{pRTOQjVK@`hUm=F=>EX-=1X28f$lrLSSZ&U>idm z{U)HcpE4eMob^AR{67dC#1HF!0aAsEM0D+t>qFcKjyBiSq8>Z$9hC?xo0Bz40yAYHiC@x^5xyAFkgvR|W40sBl-?6BG6|K_cd_L5b6h*_V%AVdRCs$}AHQ&h$jF zu6~crsd~4Q3YGss`Io-s@2UN9*92n^6#b*mCoC74gahs@Ii^`J_Ssb?WWqtecp5e3 zXMT7a`X2?uB{nfTBklJAR}N4t2o;2#`*qBq0fEa7|s zV3l#$)+B3T%R;ayK&AXcbZO7#0PK~w>c8|{VKLX-QzrpIpwK`xFR(#0B2K-XxekJR zFnv*77g%ec^!{Y}^BK^f5*w^im(K#Uwk~3?ic5fRK%Y0Q^B-da)GSv4 zb0{|bZxK(?|4>`J2Dq%>l$7>W!T+m$@joRSIcj*UE$Pp_`66(+NurEv8!n-QHoU!1 zI(o{$UC}8SSG}JkVTv>~^rDFidyRx6Zx@ywf{~v>p0MW93KMTcqfPe;%X0lg;%bmV zQIB`rErJdYxR*!=LuyFvyELWcgYUZ=Y4%E=jD0UXMUPA3UXDvcc9)lML0gIEuhCwI zy1SH_kDDfXrv>djj~x4|Z|3tp44@Ot1$%YV6t;?}$f7{|N?m}(FjQOra+{n!cp#>+ z4be;_I}zGFx$e06zHaYX`ZSC6!;*p1hcwHl6}2W9;y49a$UY+FQgjw51%#>q^aZ1s z9;NQk?5?q8Ggi;7Y_QO-Ody!ke{7rU#WU`Hz3TO5R5#^4Ke-1PYADIEM55^gI#8aT zEJa2$`#B545{^DIY(n%xp0+qY?@n(%O(x1UvNj1FTt$mM13Pyu=FW{Pg{V&hhr%ba zTn?}1QNCZTq68fl8dK5YiqO|17vjWl)X`-x>6oA2fOxM_Y3}Avzich42TkUNGnL-C zAsJtn&1Qg)!1~S00z+fvT4rKsjds#g~rc> ztvPaaeJlcbh3=%pnQZIQZQr)A23>J7r7OJ#K2px9e?Mp8;K|8?UROd(7 z`D9)hSic(~Esc@{XxAm2G{Oog_4W)TxAYZaO|(FJLil_H(LnOq@Rig>mMDr`#gdp?huB&`oB1Z?3JKYfCocs^>7Zdi-%`^#f>pe8VJmCnXL+239amKGJ?n!~PwH|GU9s>y zv0s5!|M=b7MS$gEe)4u$G0M1)VOQDGlmp5(Zka*`Tm4qS{;d zi{2YM0O`FD_AS0)qJN)j>q|RBzH^dD0^|U_DR-aVOP7Zs9?>#as&eRf*;2#XQ9D}Z zsOmEGaf;rDfQinHhGL;k9T!Lz#ay+G%-?6w*30b_dx!5JaRr6{u3&k=S4R-LnQ>wl zbjWcKg06xVpmJ9v?xQ}5ql^z99M%nTkw1teq;BHgk#ltfj;}D7n;oijg76ZO65Fp_ z!>S8p9nXZiDGwfu;8IeaFjVF`vnb>VwtKJ09fM68ImJT5dTrjen|-D{SO&p4Xibk8 zyyA`v7W4PT#v7%B@AR0gDt!z-4Ss8Y*?46$E}Eu7zQ)+>GorME zw~w2noVooP*)xkIpDlUBvE1c$OlnbNA20F>^-J$j4|vXnXD9VtxOt6PD<}zoGIriR zMquK0EOpz;OkQaIf z-M9?aa8|o5X9D#BJc|R{R-J6e_n_SE6x6wRL%77e+P166q6O?O`R4HMtKs_KPg>1a z3EXk~R%qJ~8T51a!Jqg`sajS&O=R-H8*Fm5_ z7QABDVxV?}?`*=oYsfJr)-OEmTG)v{Y7L@>wJbWLHa{2&AQ~N8rzF~6$jle@B8N9k zmMRVy_ZxgPCnQ0bHi|9vX1iR-3ozyuD&Nr1Ei47`P?tMch5livlqHM{-}jB!M~_(p2Ttgf?`nldl>Gy?k69xB`CI)UjlO^ozCeppcW_nBvLBY>Bx z!k`6ll2L)i(7uaVvHP4VlYR5E026#RefB8IvsnLX-ya?L4gbv;t(xHOF{g;z5!>~18==Y z)d6H!NPGfw>2fM+GB~~|5-=&0R4mz%AU1>?G?gq1bULw=Tl#unBD-E>YZLKdbEEyB zaTfFb3{1s~#hmi06^D0(8ZO^%D+MG@+)&ZKuq zW4&~Y2EYpRmMLsrqP6^*viw@3O=sMmWU{vR%Fa;Fp3|fyxnFi6??0S{X(q>$X}3zi zH_0vP1*!mR8DNy|?3*;$O`B1zHxJ|5O&!oV^JtK-FTLd|M|*Y+ey3C69WEQL*B+Ks zB$>p|w7mgMQJ<8@c;Vsa*G0VWnho+_?Xk79XV@A;+Q>auR|NZVESq7#@f2gPPg;Bl zx9*Zoj~M z_Q@0wA=~eIcIBwgAnOZ2&bW5)BOKcPV(i|Bw|%&WqKH97Wc$*)yT@HTDNJ$4 zAv%*sTBFm?`}}w!lI%0Cgg`o&A_F`puly2g34VAE+x(g_uoD&~6ZHhx^N$hsXCzl3 zFXJa9Rep&^7)mnb?!FqB7|9%!?f$VF*4eHj8Hrrh7agFf&|T6pX?kQ#nEIeu$P1a- zBE*|L+tmDT24!TI%FhE-eMZYx4nmK5c|AcpF7ZieVUh%~=pi`=6dJ8C!vxK^od>ep zaYPbKGkT=aUK}p*6}4h=%QhVCcEPihsDX)gQgFMg+U$s20xTgC(Hbn3p(CJ4GpB9{ z97yKLH5cmtJ@*jW%`tIoM zgRH9Mys4r0$ht^u$co8V#L;r%ev+q>t)4Y49~;Zuc=wcvFsK;qI=*}qpWq=Q+d(8H z%kHZTslBKeaPmZUQ?pV2Z4P2q$l~Xa4VBhDQ=#9bB!esC*&ha#q;mRZ_4~VbN3by zOr(AV7!+3mu~~KMblwoMXzn&s?{VtFK9=lyX2MJWhl$KKn!|!vD*CwhAT_H3jIJ`IH;L5jO!JZGoF5M)|aW z9xtavuVq51=7;@F$xDKhh}%Q$79yktN|9o|A=}~h4#Wp1KM8UbgB|j&Et%XN$|i&; zj{RDuebyAAoCU)nwHXgXnuA+y2?ybu`H+gucZp=<7s8NYuwXx!^d49xktdx-!S`F- zzX8Xn5z;?O^|}Ec{lD0I�!-hHVr?0i{F*iAV{^C@5eA6b2~)6mcFE1Qb!kP^3vy zN(6)i1O@31UDVK|NDD1U4Mkc2L1`hODIF5&f%LPPXPjr=b>8!x^SwXLI%lo<7f5#Y z&fV|pzRIguZ57Ss7DU8s2jj;5K@>)uZoj4ff!)8-y_OI0~YO3Q}trrSiK3 zL_uq4zSEU;fK)d$`^&A4-I_EuV~0=#GL2rR$ucB7F~kkdePaiWi~MOWW<_)^mL~VBpI*Lp*ljTBy_@pNrpfWn$bA zY!iALrl(a_`(=c%6O{yVV~_$XU0r-RUKwv5X_MG(inv?LC*fEt=IlEmS_@-J@}Uo-x)6D+w^xsG80lccE*K{)fxB9@sN z^CnK_7P2I$`yR|fn1D*m79?K0z;wNY41A%~-gZQ4h1t+L(Dca}V0sd8dO{?;1 z8k^E5xJjR106Iji@OjT8q5Cw&?jvkJt52u)Z^>Qa7>+zqc?kKU1PFHyN;Ric#eA(i zT>JwLANWB@u)3B~3|Vg#Cb!M>W(~1ieeVXTo$sHA^y1n!Q>ef7niM00sOJ~93DAb4 zBF<{oFoR9FPY^hpdZ;%i*9pP@1w2@ndifJU+BI@<^Th7AX7?uM^WD2m^|QaC6t@#R z#>`n(0})S3K6wb0y;SjK8-29%Zy3_woS*a*33#uKjE&d^N)yjuT{O@1rK{fs%%SpGy@Ra9?ELdqKaF9^it;Q_>>i|>f}fX3nA z(I!Vu!S!k*p8bNnVc7j{c@N6g4=G?~XYleEZk>Yzzjb|+2o))c+RD3fzx^*dzJVyf zeGX0ET>YefhQO_<&geuFKgoHXPUViAQNh`uxnHWz@^r$1gT2zwksI>BE(cz@AO9Ip za*_i7L$gIwcJ#9K{hQ*VFZZc=d0kDi z;e-AC7d!@P7sKtu)!LH5tucSazUlapn-TBT{4l$V^~;RPeMbdl_-kb#vs~(F5W2-> zGvqh5z~J6L_FHYovt0fWi1p;J}CSTlmcz&!z@Lh5Ym&`g`hC zoo)VD1)>O*&CzePf7vXKt}o*J?MJva$F7DMC=m_{Naw7JEVO{@zd{SUgY_3&ivn{- zn+z6Aj6L*aMPC9uVmVjTcp?!54vV*pxfce`TM+&wKHI(FSfdqrO&?QXdrT)KY?h$ie3IPfB+$8T=Gk^UO@`NBl zl=j=-swx0|zai#?m>)lnXpM}4=_tT9ohlHvG8RmHB@pq~cmg<%FoVBli3k+v?Fl`k zZW$6BQk5itXhJMyW&~WW-0jD``%&1em>`BPN!p*<5BVROt z))6uO`|%5>o~B3rY%{XQOnL!TKyU}pqO$v{j^_Utp-9Q!&I&FS0t&jnE~Eb`ECp`+ zy5(=8(!^a+sYgmg>_LNbJU@|S=f^Kr%BvZ86nn*^8%5{iME0pPSlCxLyr``>DYfL0 zrqQh|t@b(SLEJ~+Ealv>zpk@Iun3Jm$~m6n3cY_t62pNj*o-1DC+sq_W;phrBVWqrlb0BZq;c$1EplKt_bHLorzPil@za`HPD zm*0N>sj#m}-o*(7Df#FTzxmKBVuVcZM^!rPNQH;!zNAxce4iGtzu<{E%W^ ztpM90vuE3MrouGGArIBl@X_Vzc3lT{17V|)oAMnH8rEo_gPPvfy1Cq(5TuI3T6P7s zy$yDer0FIqdwn5_%99`_Isaql+xB<@Dxf21yA<^Z#G>AM(o83j3>ry*bH`)w{iVj8 zhP?4?94F=}-4o?gt-W^X_QXAQV{*#cKTQ9BnEwAT{r_S5|HJhEhw1+h)Bhi)|36Is zf0+LNF#Z2w`v1f9|A*=S57Ynu$Hs*JF#Z2w`v1f9|A*=S57YnuJ4}CP)H{+_3JHU7 zxOtKY*(&`5HDBoaU(x={ZOT}GvZZ72h0peBM|6GS%ZNEP`XejBXr-o&wadXTqxN=L zaYqZ$`FGP308cq>(1g|KPoIB9IGNabJ~j~GA@-u{EX+49dd5sPp23MXJt6TG6L7@_ zg^Uq5H!v2OnB3&GR~wT^ z&aEmT>ls`2*tS)L%gmMtgMIQ=#|?NWa*^Wy-gE>N=}1~xxrub_ke82sr1rz0UhF^^xhf?*|;RBx@dzFFc;)|256dt*Lb6 zT@8+8C#ydCT?c%mz!l_csJoEyx$?KChW0{KyM(7-cHDmfxWRHRdF*07g5v3l1Hkce zMcGzdOq@mDIIHw|<1u(ur6;Pk@t8{Eui?5?i&T8`3rkvRdLf}8kLh^B_if?^Oj6<~ z>Q?)K&2J(Tt@4RO3u{)-KTyn%V4dJ`JXh0}C0K(!h?id?mT?igKWcC@_-iOQ8lhON zOl^+cjnmL#vE{~pw7xL%^*%?pYm?zqNmDRN>$)VGATiR*{{6$gz)af^uE{BId+-)i zeUy0aU>(&4bfFAtOfb6Zwr6D-N(AcTONKO0CdyQdS@4AY(3kW2x+RCO{oNk+pf_vY8OvfD12n9&l(FA_&?nzb@ksl{cBp84R$w)m z-P5Ki1WeXIN6T-+AuYYVmbcQQl;&cE$UA2qLKgI2 zJmyQQvUkt820X!`?tCJu_&e%A@>@FcqId!OxpCUst(&kW^^11~%iQ@h;sO&ZVjYMh zzb~QL_fbnnVqz?jw~I84w*&5b;AIL+|QYe0tWrbKITtKQAT2=2HAg+2yy4I-EB_P<(C@R8Kb&TffH z0`tux&d#6Fn{i-ciY$-u{w~#DZ<)%w+DHyQjqjt6Hbd$N6fR+wt0)Ft9;MofSXORf zm$AfMA@yE?Gej_YxmW^kKPoc>Go)BuzAxaggysowHmQ6}Fjiq;+4&+H8GWh3Qh~;_ z7Ts!>PCjeWaIz)i3vO)dV#C?-uRZIoCx+Rq0@NMd%`{&Bq-6;`V_2GFrW74Auy~d8 z@Vy(&LE!`HVcDge{t&Ji7P$6@$N`z)p5%h#l{(56@-RudMng_gbJ2MfN^n%U2H9iS~__6$z#@9r~AggJH( zYsx#_mBUb^@i&kJL<9G-r#w%69<+>F=5Gfuk7NpGuWB29Kt&R(qw6hN8e8YHmQU8_>(?@l}*J)=FpS+ac4Wm9&@B3>4KJzJS5{IUw3#)e*nr=a&>i{Wmh8&JOW zs=J)_b&`73O4K?~nkD}#WTYIgjOnY2jLrS4xmO(YJ?HPR1n}8CV}w6iK8&a<{7K^Ljz9ck*N)bME!Wp>Cgu z$Bl|-M+S7umidqbHa|cH3;;YeGXvj9VpMlrgH8*qAEAi><(|+DcL;2e{%Sj!J0I*$ z5ADPDT&D49Zf5jV#uGl4xeW78WShiKkZD@K6hYQ=OX|uga^C^E#8K@vnJ3@2#pr;< z>n1%d4@|F2Ejcl)g5YZBRhZg)XqkL);c=$4@Y%$ukV`esYcai1O>@duP1$2M?w@St z+QbXfPYexbyi@N?LMLQ%EY(RoF7a)|j$DAUN)3JEkVlj;?BNh*ZVom&%$)L~@M+4s zVZ)PB_PNI*4DKf%as>#Vmqz=8=9-1>_Z?mqAU=SQDv#r21xTfCocV?Bbv`f7cW**2 zm%sif^|@p~zti0#cSms(MSE25C98qfgw@?T(0{1duu!^R&qo zmNXTZJzGwQjzxLv<~8}bR8hNT`?}J2GkC0L$`%c`6!Cn=X5psdpRiYzdWlU>H|6nq zXku;KQtI(JRH~RO?vbQ^imFxkZVCT)-oLVCv1L4GPlB`vNR_q>N%gZ4WEn&8GN_RN z{+r>!gn0!#TB4{yGu1wUyP#TVi95(ie$TQ|QP8jc8M;-K5WJ$*ZDRE_)B%?t)ZMwv z&CV2>5TC_{b@WXw~&1*26{4QDEMVWfO5_ z%;CIpih$ke|(e5-0;NV%N||x68Dtuglznw^zj%6-MSt_sw|jw%q2E_{tFw z1MK40=VKRC0VoK*d7p;r`CsdL$W`n*uRykcu{#aG=K;c zsw7fIXv+ra3h;Po1{{0$kjf7uO3c#t?s}e;CZ|!OYh8%)00({P#E^1>JK0Rd)1d^! z%lCIA{Ni*A9F#2W*cHmA>az~*(2z5%=+VAeFa&Tw4w?4>T$TM~PVJG}wdv@!RMGq( zAxBmckYnXb_mLYG z%Mb2@R^L09zpOI(6;N@d_$%F_r&BF;SuaPS4<}XE1A?K1nJN9t*kb&%09hDW2K8m9 zQjf1TQ@2zZo^H61|SRzDP|LJl}=lkP9$+vybGFr z78JODKuM{CShZP&+S*gM6&-hvRV?U`8SB?u71!VxFKj*2{B*8aall0`Y?gqiuB_#p zvSW{~r(H(FIr_{x)$@&BThsH$Iks9=>`0m?DaGX$U49c(`IJ=v9`64A1CO5cjL>WW z{dqtEGdXKL3W)dMnp}V{7!i<_ZHO%0*}*tk+ByV~62Mw{-An;qrq@>Yt8TanBg`u} zFur3w+$*>FjJs;F5=IJ}_x@ijJv`2G($dZs<1%bgpJ(EyypD@V<@oif-htx_;5Wg` zPCdw?pi6H6=V}f}$rHim>8`~oJ&E#s3rJ@y`7i8y+Z`p!(|?(!DI-%2F0TsFP7J?5 zwcB&_Xn&at&^JB#z*~j!-JZ*Fw}&B%z0y6Ep3BBC0|quG@=W{XR(3fCWycP87lN5n z1pG}WQ)DHdrmt9%vN=VP7}keMln&<22j2>Jqaq<<)%3s~oduGv?bGaWJL*L%%&P-_ zN$!(lTjqnh4W`HjQ5t-2pz|#Iy`;lgl%_;IOkFo4 z+$$eGsOv0%d?6GoS8w#RKi56IpEtjXbGMBny4g49Ag~Q5i|Cy$)vKz7f3=wya1WyK>dp`72eR2fMj7Ql z)6Mu-ab9yEchVMk#(`OVp{&uk%7lOwbA=gMhF`8S@n9y2ql>bK3JiYG%h=&tiqnvn z){{Mbn>S+1Z3KKPk>7R5ZEiesWwUww8!y}sQG+~~#PY(*xP{;T&q7p@EHOLq8R(;H zd&;7$&q-IpkmA?`AJ0R`wM}(kRPa0-&(P@5Ti_fgHn0H|!RSggf-SAIgbul}%cN{D z0Sw!ZWQofxbtxzfxaHi5dNaYz3PxOh+A-7nEGTB(e%*0kWOiGOrwZjQn8-Hy@?*>M z38x#0)1Ls3cs9(?^Kd}$knE*NKpgFS3KL(}@sSG*a{yliNK2QrfYH14WyGqpH_9&K zkB@o(7oqom6}0}(-g52+vPw#In673`IB2#Yf#VCB1-uyW`#Ez(SLF9~`~4|Sz2xf^ z<{JjPVq~2rhQA?(7n}`tk1w}UM~zYy1<2Gncvog4_A`aQovTB8>34cz<52{2^KxAt z9gN&|#QiA|=M_^HCpW!Ns(Ly1LVK)!T213SVk6v#S4yEeef-hJ)u-yk2Nd{dXn@EE z_|B)I_xcuomtbW=Iwi2yUmQp0paaNT`V(2}64x8p*geir?44pt$^nCOuM<&MR$|uw zB-8OEU;%b)MQ`tgW~Urd6Q2X3!eg>yW+%GbbzHC&HuD^^nhBWn6-noOK=mtsHK{sZ zM(eJGA6e?MQmmmm^G(6%%Dy^@-2VO4ULZ@zkn^Xd``&zjw(BU7enL9DI^D>lNkKq^ zY3N08tbKk#K@F?Y@j_^EnAhiy7%Md9kG&#qAFmkRL)w;4_Y01Ydq^E{Qr~H<-yFgh zo2W}+Sgo5!H!JdWF<@8ogL8dVkO4w0pydtoZDv@G+oZFulHBSrF)kwh`Q|ryC&Gi8 z&h1IR;xsBTQun}*%l6x|Rhuk6pv<;P|3$!EtI#E_n^PpP z=s(GDGx?_owi>6oR?1|*IktH`ivKL+V*L4BBDcvls*Wc1in)_2kCp|fjL#X;+^*(ST#Kr@i2CiLZKa;ZaO^L`(#dC0 zC9KqT_Nnz4YzvlOqeJ~IBB~w4vK_V6?CfUx36c0{=I-t5?m?~VYiX)nPq9*7)AF0A zdQ2ek5SOE+-;OHstXtLP;#d=L z7Ji_!RXr1evGuiY0x!HsZ&ps$$k)nkXsh_>hpFP_Su>BN6e|A;ZupN z!tDpXwydYYhCDNs)001-7S!-g4F*%*N9*|+%}c?nIoI#l^kGF#eLx+tV@cp!*X=0+ zFgM1Ze#n13dycwFfsm+%pQa`(4<9VNM`G-r9Zy({B3b{%Z30N+DxqF~l34ELksA*i zb0Z2_x~V1anQc>#*?iT!=irj5{A@)V-E$y{zCWWOQoX8%`O7WjA-4_;5)(poT0L-q zY_-D^l=%EyI>;+{+hM5}0jP}KZgeDrmpNyP^HKIR<^GV8-m6oETROius2P4x29zZZ zzipMC*e&IU;_yFdnP2*CjNI()G|p;j4wyN}P?oNKUO8~ZrZwywuU`C%-mLKlk2lhY zexv0JQA0ul@DJka(C$i`)p93*2 z-GX#xFFMK?h_2jdBlpAT2a{h(pfR8waXO~;oE5HLmsPfoDN+67Fp|JQFBg}sG6j+gNH0Ukq)z~&Hd z{CsX23A!0uV+VIVsk?A`?M|JUP6Jj_%;Gc4KoG|E`G>e4fqYYNS9`l1bYq zwI~|H?$zt*9{o=0JDR$=0}pSy<0vrS&=L9K{vW*=qZ_W-WKSRoV?z&iY1-4~YyQfU zt(B%r(|Iev?AZ}(9=9Ded>8Ch0~mb_cKj_(xt5QMF5o@?*?ZT=9{tG9tDoF;r=91CGY7*5jt$&h1p>$kj`w^|RZqq@939+S|1?@1oL)9)y84gHk*5loRJ^~o{q zc6Zd$*4y%J%F3_Pmpst|AEk>Xz9WvE*II@v)1E4&3NRJTac{$;pODax2l!}OdTH3` zfRFPu2t$v@e{&o6c*K2zjF1S#q(FAfzIRQ$gXa5Id=_Ou8}l=NqZKbD!q$gN$KS&n z2S`rwau0y`u1TpDz z=A|2X-{V&iCZ0E(-Fxb)b!bvYA#ICTekH5 z{V2I5usNpyzo6NU0RT<3O6 zrf@`$!TgCpbMfU2L&ytL7e{oK5XaQEL)#w6tdtS1bv|Lw$Vv&&pxcR0lzI@_KDM+r zFPS`4)l79%+Z)s(N8r=P_`V6woQ|*4Y(q z@@DY|do3=Y(**`5caD1Qt>3W|xkhfmfDg`oY)XZIk3s!%Mx)}xz%rEflWK`EsyD=7 zDK$5^SrsBzEroRlo*dkOxNxn!65EiiBq_AetvFs3NFj_U1wdc+l}{No(((m$I?38A zj^x65?2#9A7Bc>&ta)N#{^Y_AZFz}rEF@z8yb#L|ru(I@MO9{Yh zDTqXO($E`FC)hlUJan;~Da>olOmbt)6Fuj3!V$bKd&F^*c8!FU(uo~5_eXW|d1?Hk z@l_(1U!SXRX`l&<;450!=(WMOg}+hAdW009i1wm(+;F&fK$j|`c+nZAuF?E%sNi3w z?Bn8qT5_vE6_s*g9^F34w|*AYFb&ef<+IFHhx?L8I@e6wb(eav`v6HaU^!9zFC`dj zfSI+HAeW40yC5UXN8FXPP|V=i=U-o!7#;y9uv3a++r*H{5BnqJg zlAcCP*yoeJrS|PqQcP+}3daZrXD%;52D)eB7H62 zij?r7-$k9a?-1{1K21z_<=kUX&e?rn^nc)E9SxSG0WvnPh3Ac7TT8_&=+$Ea4%FHR z>y6}9x_sHPqN8FG%t)D8J*!UV?RhkK&)4y|TPMr=W8oZ`NWwB3PG(~pWHKP=( z{nu*#CDk3j(MMJE$%xv#0Vc4k7P0U&v5F)-I$MlUw&pcNll@274F!y^&Sqm-a-J@2h$Ifgon=WeA`RX_)-#iih$;XSGbSHZHC>;Twm<3l|;q)&V zjR_mQ8^;*V1A(dDlL1@caSxHQkZ^|-_@cBQ<;eCpj@_I$J=jJJhb$eqAlmi#0uQ>_ z+Uz7@*Z1Zk*{(Y0qqC zq-9n2TBFmVqx+PhbIWxH5yknsdo(Kuu#Y@79|rFn+o0X+yaJc=1)~v17!@61hG|8JiyKZH-JCxoM8~D{M`e{{l z-JEQnv78ekVWb6!FZGJUZR9fMpvWJ&%J=7XgLN5nEI2EKZ_LaKNWbtkVSZDt-ipI{ zI~CneNaCsQ=!!i7wkBSmY_aJ80;8tx$ja8NyY2(q{t9%wD49Zst{eV0GZFB^_{Wp1H|pCcVY2K^3G{9={gO z1LhQ!UOxL4TB$Qzc^x(n*)pRR_ViUvx%(1~G|Oy^N`QI{U*U&OM*Y~dm!0G38Y&c@ zgninr%$nuCrS=l&oRRpotFwKkQ7p?l!@>%$IsBxEzP*uEw+>WV|6sh&2jOp=fHC*` zzT%Z|4!=9MB5BAz(iP8})()pkgG8$yUx7!*B?i^Eq}*lT4p4|V=kyR=oHNkDm^YLO z&21JP$4`HPwyQ>Uy^$0kP|o10)2%^b_I!Oy>!G+{-!=C4U+3ZK90uDrp#j9~p`{eV zjgCFgS%yVDYdsE%5AMOJ~PukYMP&MX7sJ0lkM!j7h1S)3XxUvQB%j=gNahGE=60W7|8 zcNGZBDag5l{y<)`zS@0{9b4lWVa5=Tg-zK1-ul4R_cxR~j8}_17BV0eY7yAgcfJ4A z0kN66(h|}G1Ao5vbK4Q9oF3+~gCX0AnSI{_X+rB`R(=0j^i+o20FsUEF_|1TXCqm^ zPeP@3R!UEzI0vIygH=wY={J(=*hL;>`e`jxVx|})Q&N4^`oZo5*Mya#JomnOAklOV zr~nifL-u__x97_(jrAkFlUrsp@E%A@z<@j!)gy+_zo46H@!~FE1y68p-*MeudsPni zE~}b5 zImF6*A#_AsJC`9cTSMsHN$ED>1m5LhZu$!Vvl{fC8{K{ZJTa7wta`>5F|pYxr~5IO zbZOVW`3Dt6D*-705nT7@t8O!kp`GZ2(9v$=4bZ7sTv_m0!sEfTc?6>)=_W?b5qD_y z_(`hW-1DOevnK^|zwqL1kNpwr1y*+p_gd!3U=4;fdA0n;73Mn`*g-6TDs);VzrD07 zEn*?gk>JLcQx`?L`>J4DEeJVQ6^p5MEZKVKJj&8D-VdIG@ zGWpeOlx#N*9>-DzI$rgVEo2sW%>IHrRuDT#7+ACiQuu&~)+I0!XK9lMjC5j8V}QYt zTq|cd0{=nJ($+xbIEMw*8$f9BvGgXCmdX1$*sx9|=xJwbZw{{u?TgT1$a zSx`G#@79r8WSYYq*eOIYs~9^9@m6i(eJqp;myON020R?P6K*yVf7}#0-tu7j} zaNNYSfu4n04$vz&n5F$O<8cUae=xY@t&-QLljlv}A>46N@=^>o{57&!{0b*C#*YHbPyRMK8dyi+^RwkJCpO)}k_%6T;Kc`ZokZVoy1SV(}lpW~E z*A%M#_?d0^9eJY-epz1L7&`KhLBzpcXx3Z6Q842EUiblOs+a=9uvzZ@j#q(K;8mZm zKPdB|5(1w$sx^HkUaaPzEa!y>l21Y^mBNnxI=HcWe?w>FAxh+5nQ#Ad<^O;23ox|e z)!Lc-T!zzq{bx8x9qasIHsr#Ij;A@M#yc8Gh8Ip*Ufw|u-r`^8N5xAsmgWCj>@_6o zKiI3;Y$=YsvlxR|q5RyH^Y{PWEbP{4mhSA)+1jh6>n-P@fx6<<55yFX3TfafZ!f<% zIxE8g0uf6!35PZkGCXbtX2Ujl{9Cb#L?sf_fmnLF)9NNNyI^F}f{W6oURcEnP~`s- z&3@i6$owg@&M1B+HRk4BVGJCRsjup%jTe!&RWx<8$#*qcS3&;Y_yY6K6Al&f6)Zt~ zqBgxHjM4$?r(2ot>Z^WtjYDv;vnS0)E{ezVxli`k7*)_UG#vlLbF;hvp8|KcWIxFX z59u22Y^AYPaF=cRqS#VK58))AgnEvZM8CtGO^`ZZL(cY)(3Y^w(48=s=0Qw}u%g_L z*~%Ru2g7=T%Ey8m^Taq8w+_oX%a+=ypdt@UFZ;ba)|${> z`BHoE)M=iTR$ZH8A=h&%$h$kqjga-5NLLAr^(hdkYpz<=D(~Qe3QF8YaR^pGzkPxX zzcGkP99@?QMf(}nZYetcj0@ER7%pySKbr3Gi|ZB9P(*}=`gmLhpsP&51%0&3H9kaZ zSpBkv_1E5$Zn9Ed3u9k$jTfB+*mJ2DjEn+BYQ5}kRvO50O;ck%> z{c36oRjEIwO73M<_=z60D=fDR6&aXaj8SgsExVovnD<;nSt2Zj*HvNnTdDf&o9ShN zR+E^(z+Azmckbb1JNttUYz*+t7#(YO{k0t*%p5+m3uCq*_%?NhP>HR4_*cSO*w~nL z)1N>PpxZt~3Y68ssuUb|y8BSAVwWxh_Hs*|;@Jc!0zS}CVRe(SXalgssa=0AZouX! z)789P==D%_x=P^X8bn3%@t%}D>zM0y5^bWZ`)bJ`v-ZoUp;u<14`GEVJak(K=^Rz% zD@}LUqTf(D7E({V@Z5{gY^i_OQg5oi+jzGH&;5 zL8|pF$d7k`L_Yd(a+0^&bLKZkwQBLJA?tSOm+750-27Yl3G1VFtm;KW*53a6hUvt6 z$lp)n__6kuFoGUK&2OI+Rhb#0i#K@P%ct4IP~CE@%Oir-^XWo~iGbLbi)s50uy7-(lEUr_GP1ikl}&9Q{`PaZZv3we*%BMzm! znLHBTG`HVHFjz(?-LS6f`<&XC-ytippt4%qV;YW$dBMn|*D{pTVZ$di(aL3Bsgu}( zeT%lzSI^*14Ba-&7$dyg8|Y{?{fXaggLd>vRMinuV&fdsmeYLsR|`A66lzO(Fhvm` zMNgSE_XsIIOAy;5P7-zy8>-@;c@DRQgc0Z+Gb%-KwgIM`Hljwd{7A=tFk?QTL8}2+ z?1w+FSh4>ei~aQfDi#a45s7Z0DG;pFukBM$-Nybb*}X2#K=gL#Ga)H{jhmX9%`d_) zj1e9UTop%yZ8dZ=S&z&gP;!@SeeKwfTaOKt`74i&z3^WCZze zI>`M9nTwjL)nD3~RsEkcLSiUZ;BS_?qM0^E0xFA*AmA|U!q1j?umOnfR=%VZ+;9A8 z>6f1l7n|w_5S0hZxD2!o6(;;NTlhi(Jg*Ht?ct|fDl-1FamZfs%m%*o5MVGU^7k*j zKHeNF~)KMDB8Hk23wE5ynrC1Ei%$3SGK-F+C3GkJry&TpV(wAUyPXRmv0un*T3_% zJ>|)@3E^{cogxAR+gO03hp$)REshJV|`<287Z#ZF|Zw zAJJO6I3b4})_$mTvBI`&!DO>}L)1SQH|7db#?8&U-(Z;KTa*5jHt0sH1VQHRtF&1& z=w?8m`Wtddpvy}M^^K_#FS8ju%x?B3hy9}3#7D&B-Bh>s{I~2t##B`_XR9g3zjJtd z0^wAAXW93p@~@xA*IHNV!KTr1*yPWaK(^_KOB z><_LjWs&!xjTzh8oar-=*b5u?wv%G}5@B@4)5o-ZRAX%6FH|IGdY&{^ zd7`@xkHW~YcDE4QDoVKMAi5#VH(ev;G@81??y9h+Yz)FDwn}685s7K=%upI=13bIq z25gjY*yu;>>3em6ac_ygS`VCe=K$J#cBT?xiZKX_$hrEhcPeKlIWeM3uYqFz0C$L;vX9+CRKLx+Vf>*o zO$UB{*zHGB4 zadkYxKYOgIRGA`saq1gE!U?fY%b;eQJR(BwOfq3;G*!Qk63;`mPue=MVamHIO$W^6JOT-8MkK86t#vifDH1!Lq{_cR!Ww8Q#L-?=4^z8F zOGh}TGwzW}GOn>&+09Oc&`)~r%`iOR3BZARlF|}uotPXTA8N@yZa}Ju8Yfo!mLXPk zD_<2pQ6_MO3Z0(Pc_zy5%$%+-_0j5T=HmB>_XKjSpCnkNs{l@z;nD*DcqTRb$v7d{ z^6iJ<-V5sM@{?*K*6fz!s9O8cx8aO!e!D4g(Rh1r6yTcgv1?JI8CQ>^an{PLn}y%` zG`Ylfqv2!?0E?0m6#V3|b*0N|_F{Om=w5BdAnMs_AgF-jb}+}v%21T{;(}~{h^nc{ zyLHS3Lz;yC(r^@zKshUo+E&JW>DV6=1WKD3hHd4HR5kOyL0E2onr5GC(}8M_IJkai zw&@-Xp4wxKR01-)JVU9E4fgtHde68xJsmB!La(~~s?06LD|hhGuPoyEA1%-whD=B3|T+nu&im_gzD(@I!vK9*3gtziKE?yGN>WQ56a7#&dBFIpRM{s z&V0RS9se2?C1yWtaT*o*Aqp@THf6s-y=F0(=;Ufi7)ac`(0p;KJCOGz}Ql z8N^Vk&;Y_aqtIEjFH3E6FhZDnV)MC~(EF*d%Sxy#uTV8iO%9ja9n#*dD(q5bJ}B}* z29yy#6BzcU@wx7l4lZHQM2@8HgnLKrKIA$XpSA8yv-g^@=Y%-MO*Xp0zn+=yYYguh z9v$XhDcq|Sxg^ZOu67B;tu8GA|Vrw2`S^Li8FKQj##re6#FmleX@Zz@$x>SS(I*U%D&tr zME?WoJu($N{D~fjHHcdoglG)ZbUfI^obcezx zsRkOHC_LQ1Pv<`P!mmfX@JS;dJ#lM=&Mct&l->HJP?ggT;Ou{(wI(6utu@JsYo*uN ziKzUj@i*t95I-O{X6*R`g?52j1(80ef_>hBQ;Z^1G37Qd0ZUbOz(W&YL#T%H$Zv3X z71TLpE&Kuzi}A(!Ha#r_FA+CS`+`ns>00ulPqmceXN+`OY7Y`ZbO=s&-2!u#jt&U9 z5C5z(OU^vA)v2;UXl8IZ{2=Q&5UH}-(z5TRKqUA?(^Gr_mM+c}#aJj_e z*r7i}ygwu58l#tF!9Y4BFtxb_NBg@?@u(66OyGp6z3!hB05^SP`yXqisAp34DY4$J z$R}BoomL;e_3hJA0B!Ire2J@Q)WM}kH%Hy0by@)jDDc#gHTBhk8hKW!xv+NJJUwS zrzlDL5jpGgWVKWgynY&xcB1E(;R4iu{A+l~+Yy61qcrfRIoOgKZ0eq5dbl})QtF

^0IYpih>v`=^JtXn!R>f9`i_9E| z*ormRp_sQLb=V0vbp_lAUizP5{SwW_vcy+-kV4z@uqaWHV`o(FC4Ow$U1vOaup#A| z4VF1$xkQL+yv7C&CVrDuUpz8eEfW3q+?q7|Ye_FC@?`O8pbzn%(c^tfaQ}rXPu;KZ zzyr+70Z9u!Z0aP@2J;E8x5IT)v0@fjzdi>-?Jt=3X%1R%ESYiY54_253cWfHSL^LQ zc7)snE4c@RaM!XQxA6)@;`tn&Nv&otJTUsed=Gy;Js{J%KW;VTeGL;}H8jB@38-sj z>5QMyaEzrtuzN7RIh8CncA=#_#Txk@HMj4Zosy$4%3XzGn*Lfh`*u$%5g`aG9cf2h)%6P(C#AjTrkWn1gNVg(T6)V(MJlO3M$ zYN&Jg8sF->MQ4d*ms0Qg@JR=vyap^$cIYy>?onW7;7r$Sp7wcUu};qPk=k!Nd{{v% z6d|M`)%}L&A5#p>M%*?4kbsz@(lv7BKI!FnVX~#Ba%|-(h~VZ3#0d-eUAw@D5WTt* znBmjVVE#Q4Wn7U$7W-8WE~Yfc24vR-8UAf^sdCVHWGm0v^AY|R-^fcL8Y=yZx0{8f zrCdkst^T&%yoAafA3{7E+wB z;xEZSDAOO?S0b>-3Ogzzy6@ay7Kxkkz)QQ}zru;^GIn3`0K2U}H@rOi{hs|#jv8akulI{3>=HwnyMpRD8>1eRXSKK1V%Ytl86i@MbA{VRiVN<8jOS=? zfL#7hxb`dZFS-@)0RRt}a!gP6eb#rSIs}9#JB~NTb*+gzjMR$1Ky?D##rPAsR+s*7 z*s(c(Aqgrs{!NROP@2HoXvB>Ln~HhtM9pA&eFS`OIJ()#PH}xL7FsA*qVq|V>UiTu zs{O?+73%!Mq;@WG;wOoF0Y*Ba2{QM-)M6svhL5H60Yq*iiiXj=DZp;TzQ|h1)_PUf zLWAJQ`KnGj<7Q91B)ue>6tZm#IUCb-^6MQc5U5>8)AK)#s}drVB!*KSY*?igoAYm` z!k+)FPyr0qqXe_RBHh}9m*RMMaV&mr=GsN^{!rb4RAKT9u>VMLZBF|2y0l6OQ~JY! zRp|gA)5vX3W3miOauiiv8o*wUI#dWAaKE3}hrM*6yV6o?GvzFlawkK=)#$J*=xQo# z1bNM#BTBn$Wot!13J$nbJ2U$s$x_Lo^*wPTn4oyrkfRc?SW|+aPw`-YGzAT$0lI~` z$xVCpV(nOLndsjt2>>kZ(xEx_1N+j(18==j6I!9Xh;t*=ewlwRnQpMd)gaKPIo_QS zdR;~%yT7gdE79(lPm>zRV^#O@M#Q!SWD%j}s?lv2Ml;is#_neY3l&Q6H0q_W^zno% zv-=8|h7_kQFFhF9KD}M{@`cF`;(^-E*0s>vR(UsswT>!Y5$JYe-^4w~y50T2&g@e_ zZygBfvt2z-<}2iBNl-WZTW0`JhY0t>-0BYy*}r{k?(U*#-s1H+NSK;CoUZdHy4pA% zZNQg4qMB|x0|F?0qw(%z6S~UyhM# zr8n~u(ankr7|(B@O;fS2Zn~Y^6Db86B%Pr~jJ}lB{UT0lWfnVTwi?i#s6#_ahqvPs zuLD!-zX2$qCtVDcDvK!{pPHSyb~P!avJk3?Q3D zTGN3PV|spT?{STW#5Mmz2sLpcNMm}~M3Z?Ck4@&gRY#=Xd6m_@-HO9|%ADm+<@~$g7g!gt>@RTsoUGAN)Pm)O_lBL`xrbb<14^6kc^TZexGp&W zcCFbFhlRi?ryr#<>ZWfp+5QyGvr<)v;+}VE!{6(E@B}LG)-f7*$0)O$&7=G4mb1A0 zMyxg6!k#M*JQw*28@GN1FR;B~|6=uwINJi5 zZBYvAMK?5_OY8B@o^P_azj(o!v@7c`KHh%uGxML9z2DmIZ$0z*;g5{#31yp)O$U0G z>zZ}M_k?*{-zV>?b^Rfm_4o0uvvuwdcB`rS*VZT0oqM??LGtSw`QX$o$*tuc z>k`}@X0Z2O?b#)CLw{BN)oxjn)%6~m--_5Wue~UBTUZMZznD)B${bzpd&wi!-#L@ox z<+@te&Bwy6FLb;P=D1j4AfJ4<`RC1j{V&bB7yVTH(s_U351HR~4p!F-r`&$O{lo9R zZ#hw-0eqjXZ?5b4=^3zHYu`orUyXYbZ;RH}-vUie&AcJ&-TbA&)93+s$THhDce3pJ=FTYFd4?|gs!P7JudcMIx%TXigd-G8Zz`$K>0U$>t&2RO0Rvvlgxtlv=+ z*ees2)%3mB)vnnCoLaiV0KBFBfw5IH_uYbn-mxxAKX_SwKBV{N_Q69YuOfo#mUWae z{N4KPW%}N^tg|;Z%5MuiwLE~W+DxAFx60J{8+@JT8hwjY-=g|-TVKxZh`3X2ad&<` zb)CDYoAYVt%g39J95c!c`M-R{_WIx{JMFrE9T(ai<`DRA(I-nci#m<`oo> zWLk2&l5Nukx6QwMectFUhGJ?yt37w(w32+W6(=J3n$= zesd^hDVP6!y6(PGWnSHbKU~sV*bTlb+`ZMHcY0C%hK+|0{!icjljpsmePr#PRhP`hkvZrgclpY0c+6grKy+80k+A81$aEpG0 zzqAjyV()t7kM`~QRL;+v?tPePr6FvUXszhK_c8Mm;0E3DA1BoB{O9((d)VK>P^p#y O2s~Z=T-G@yGywpl#8a{W literal 0 HcmV?d00001 From 9ee903d03c139de48b91fe2efcdaa68f6cba1841 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 19 Jul 2022 14:44:00 +0800 Subject: [PATCH 077/204] fix monitor process for multiple task --- det-yolov5-tmi/mining/mining_cald.py | 19 +++++++++++++++- det-yolov5-tmi/start.py | 34 ++++++++++++++++++++-------- det-yolov5-tmi/train.py | 30 ++++++++++++------------ det-yolov5-tmi/utils/ymir_yolov5.py | 28 +++++++++++++++++++---- 4 files changed, 81 insertions(+), 30 deletions(-) diff --git a/det-yolov5-tmi/mining/mining_cald.py b/det-yolov5-tmi/mining/mining_cald.py index d93fb43..ba0f825 100644 --- a/det-yolov5-tmi/mining/mining_cald.py +++ b/det-yolov5-tmi/mining/mining_cald.py @@ -6,6 +6,7 @@ from typing import Dict, List, Tuple import cv2 +from easydict import EasyDict as edict import numpy as np from nptyping import NDArray from scipy.stats import entropy @@ -32,6 +33,21 @@ def split_result(result: NDArray) -> Tuple[BBOX, NDArray, NDArray]: class MiningCald(YmirYolov5): + def __init__(self, cfg: edict): + super().__init__(cfg) + + if cfg.ymir.run_mining and cfg.ymir.run_infer: + mining_task_idx = 0 + # infer_task_idx = 1 + task_num = 2 + else: + mining_task_idx = 0 + # infer_task_idx = 0 + task_num = 1 + + self.task_idx = mining_task_idx + self.task_num = task_num + def mining(self) -> List: N = dr.items_count(env.DatasetType.CANDIDATE) monitor_gap = max(1, N // 100) @@ -86,7 +102,8 @@ def mining(self) -> List: idx += 1 if idx % monitor_gap == 0: - percent = get_ymir_process(stage=YmirStage.TASK, p=idx / N) + percent = get_ymir_process(stage=YmirStage.TASK, p=idx / N, + task_idx=self.task_idx, task_num=self.task_num) monitor.write_monitor_logger(percent=percent) return mining_result diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py index 9e82742..61c4dbe 100644 --- a/det-yolov5-tmi/start.py +++ b/det-yolov5-tmi/start.py @@ -11,7 +11,8 @@ from ymir_exc import env, monitor from ymir_exc import result_writer as rw -from utils.ymir_yolov5 import (YmirStage, YmirYolov5, convert_ymir_to_yolov5, download_weight_file, get_merged_config, +from utils.ymir_yolov5 import (YmirStage, YmirYolov5, convert_ymir_to_yolov5, + download_weight_file, get_merged_config, get_weight_file, get_ymir_process) @@ -23,10 +24,19 @@ def start() -> int: if cfg.ymir.run_training: _run_training(cfg) else: + if cfg.ymir.run_mining and cfg.ymir.run_infer: + mining_task_idx = 0 + infer_task_idx = 1 + task_num = 2 + else: + mining_task_idx = 0 + infer_task_idx = 0 + task_num = 1 + if cfg.ymir.run_mining: - _run_mining(cfg) + _run_mining(cfg, mining_task_idx, task_num) if cfg.ymir.run_infer: - _run_infer(cfg) + _run_infer(cfg, infer_task_idx, task_num) return 0 @@ -109,25 +119,28 @@ def _run_training(cfg: edict) -> None: monitor.write_monitor_logger(percent=1.0) -def _run_mining(cfg: edict()) -> None: +def _run_mining(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: # generate data.yaml for mining out_dir = cfg.ymir.output.root_dir convert_ymir_to_yolov5(cfg) logging.info(f'generate {out_dir}/data.yaml') - monitor.write_monitor_logger(percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0)) + monitor.write_monitor_logger(percent=get_ymir_process( + stage=YmirStage.PREPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) command = 'python3 mining/mining_cald.py' logging.info(f'mining: {command}') subprocess.run(command.split(), check=True) - monitor.write_monitor_logger(percent=1.0) + monitor.write_monitor_logger(percent=get_ymir_process( + stage=YmirStage.POSTPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) -def _run_infer(cfg: edict) -> None: +def _run_infer(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: # generate data.yaml for infer out_dir = cfg.ymir.output.root_dir convert_ymir_to_yolov5(cfg) logging.info(f'generate {out_dir}/data.yaml') - monitor.write_monitor_logger(percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0)) + monitor.write_monitor_logger(percent=get_ymir_process( + stage=YmirStage.PREPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) N = dr.items_count(env.DatasetType.CANDIDATE) infer_result = dict() @@ -142,11 +155,12 @@ def _run_infer(cfg: edict) -> None: idx += 1 if idx % monitor_gap == 0: - percent = get_ymir_process(stage=YmirStage.TASK, p=idx / N) + percent = get_ymir_process(stage=YmirStage.TASK, p=idx / N, task_idx=task_idx, task_num=task_num) monitor.write_monitor_logger(percent=percent) rw.write_infer_result(infer_result=infer_result) - monitor.write_monitor_logger(percent=1.0) + monitor.write_monitor_logger(percent=get_ymir_process( + stage=YmirStage.PREPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) if __name__ == '__main__': diff --git a/det-yolov5-tmi/train.py b/det-yolov5-tmi/train.py index 449c85d..c42098b 100644 --- a/det-yolov5-tmi/train.py +++ b/det-yolov5-tmi/train.py @@ -40,25 +40,25 @@ sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative -from utils.ymir_yolov5 import write_ymir_training_result, YmirStage, get_ymir_process, get_merged_config, write_old_ymir_training_result -from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, select_device, torch_distributed_zero_first -from utils.plots import plot_evolve, plot_labels -from utils.metrics import fitness -from utils.loss import ComputeLoss -from utils.loggers.wandb.wandb_utils import check_wandb_resume -from utils.loggers import Loggers +import val # for end-of-epoch mAP +from models.experimental import attempt_load +from models.yolo import Model +from utils.autoanchor import check_anchors +from utils.autobatch import check_train_batch_size +from utils.callbacks import Callbacks +from utils.datasets import create_dataloader +from utils.downloads import attempt_download from utils.general import (LOGGER, check_dataset, check_file, check_git_status, check_img_size, check_requirements, check_suffix, check_version, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer) -from utils.downloads import attempt_download -from utils.datasets import create_dataloader -from utils.callbacks import Callbacks -from utils.autobatch import check_train_batch_size -from utils.autoanchor import check_anchors -from models.yolo import Model -from models.experimental import attempt_load -import val # for end-of-epoch mAP +from utils.loggers import Loggers +from utils.loggers.wandb.wandb_utils import check_wandb_resume +from utils.loss import ComputeLoss +from utils.metrics import fitness +from utils.plots import plot_evolve, plot_labels +from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, select_device, torch_distributed_zero_first +from utils.ymir_yolov5 import write_ymir_training_result, YmirStage, get_ymir_process, get_merged_config, write_old_ymir_training_result LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) diff --git a/det-yolov5-tmi/utils/ymir_yolov5.py b/det-yolov5-tmi/utils/ymir_yolov5.py index bc3fe7e..aa80a72 100644 --- a/det-yolov5-tmi/utils/ymir_yolov5.py +++ b/det-yolov5-tmi/utils/ymir_yolov5.py @@ -32,7 +32,13 @@ class YmirStage(IntEnum): CV_IMAGE = NDArray[Shape['*,*,3'], UInt8] -def get_ymir_process(stage: YmirStage, p: float) -> float: +def get_ymir_process(stage: YmirStage, p: float, task_idx: int=0, task_num: int=1) -> float: + """ + stage: pre-process/task/post-process + p: percent for stage + task_idx: index for multiple tasks like mining (task_idx=0) and infer (task_idx=1) + task_num: the total number of multiple tasks. + """ # const value for ymir process PREPROCESS_PERCENT = 0.1 TASK_PERCENT = 0.8 @@ -41,12 +47,14 @@ def get_ymir_process(stage: YmirStage, p: float) -> float: if p < 0 or p > 1.0: raise Exception(f'p not in [0,1], p={p}') + init = task_idx * 1.0 / task_num + ratio = 1.0 / task_num if stage == YmirStage.PREPROCESS: - return PREPROCESS_PERCENT * p + return init + PREPROCESS_PERCENT * p * ratio elif stage == YmirStage.TASK: - return PREPROCESS_PERCENT + TASK_PERCENT * p + return init + (PREPROCESS_PERCENT + TASK_PERCENT * p) * ratio elif stage == YmirStage.POSTPROCESS: - return PREPROCESS_PERCENT + TASK_PERCENT + POSTPROCESS_PERCENT * p + return init + (PREPROCESS_PERCENT + TASK_PERCENT + POSTPROCESS_PERCENT * p) * ratio else: raise NotImplementedError(f'unknown stage {stage}') @@ -101,6 +109,18 @@ class YmirYolov5(): def __init__(self, cfg: edict): self.cfg = cfg + if cfg.ymir.run_mining and cfg.ymir.run_infer: + # mining_task_idx = 0 + infer_task_idx = 1 + task_num = 2 + else: + # mining_task_idx = 0 + infer_task_idx = 0 + task_num = 1 + + self.task_idx=infer_task_idx + self.task_num=task_num + device = select_device(cfg.param.get('gpu_id', 'cpu')) self.model = self.init_detector(device) From 691bed7fcedc629e933223a8dae4672208d3e3d9 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 19 Jul 2022 14:53:45 +0800 Subject: [PATCH 078/204] update dockerfile --- det-yolov5-tmi/cuda102.dockerfile | 4 ++-- det-yolov5-tmi/cuda111.dockerfile | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/det-yolov5-tmi/cuda102.dockerfile b/det-yolov5-tmi/cuda102.dockerfile index 3afe7e4..bd7fd97 100644 --- a/det-yolov5-tmi/cuda102.dockerfile +++ b/det-yolov5-tmi/cuda102.dockerfile @@ -20,13 +20,13 @@ RUN apt-get update && apt-get install -y gnupg2 git libglib2.0-0 \ # install ymir-exc sdk RUN if [ "${SERVER_MODE}" = "dev" ]; then \ - pip install --force-reinstall -U "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ + pip install "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ else \ pip install ymir-exc; \ fi # Copy file from host to docker and install requirements -ADD ./det-yolov5-tmi /app +COPY . /app RUN mkdir /img-man && mv /app/*-template.yaml /img-man/ \ && pip install -r /app/requirements.txt diff --git a/det-yolov5-tmi/cuda111.dockerfile b/det-yolov5-tmi/cuda111.dockerfile index ca19784..f0ab4cc 100644 --- a/det-yolov5-tmi/cuda111.dockerfile +++ b/det-yolov5-tmi/cuda111.dockerfile @@ -21,13 +21,13 @@ RUN apt-get update && apt-get install -y gnupg2 git libglib2.0-0 \ # install ymir-exc sdk RUN if [ "${SERVER_MODE}" = "dev" ]; then \ - pip install --force-reinstall -U "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ + pip install "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ else \ pip install ymir-exc; \ fi # Copy file from host to docker and install requirements -ADD ./det-yolov5-tmi /app +COPY . /app RUN mkdir /img-man && mv /app/*-template.yaml /img-man/ \ && pip install -r /app/requirements.txt From 95fac1ed1769299279325e71f37dc0f68613463f Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 19 Jul 2022 15:52:08 +0800 Subject: [PATCH 079/204] add system config into training-template --- det-mmdetection-tmi/docker/Dockerfile.cuda102 | 2 + det-mmdetection-tmi/docker/Dockerfile.cuda111 | 2 + det-mmdetection-tmi/infer-template.yaml | 8 +- det-mmdetection-tmi/mining-template.yaml | 8 +- .../mmdet/core/evaluation/eval_hooks.py | 3 + det-mmdetection-tmi/mmdet/datasets/ymir.py | 19 +---- det-mmdetection-tmi/mmdet/utils/util_ymir.py | 77 ++++++++++++++++--- det-mmdetection-tmi/start.py | 5 -- det-mmdetection-tmi/training-template.yaml | 2 + det-mmdetection-tmi/ymir_infer.py | 19 ++++- det-mmdetection-tmi/ymir_mining.py | 20 ++++- 11 files changed, 115 insertions(+), 50 deletions(-) diff --git a/det-mmdetection-tmi/docker/Dockerfile.cuda102 b/det-mmdetection-tmi/docker/Dockerfile.cuda102 index 62ea15e..dd73fb5 100644 --- a/det-mmdetection-tmi/docker/Dockerfile.cuda102 +++ b/det-mmdetection-tmi/docker/Dockerfile.cuda102 @@ -7,6 +7,7 @@ FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel # mmcv>=1.3.17, <=1.5.0 ARG MMCV="1.4.3" ARG SERVER_MODE=prod +ARG YMIR="1.1.0" ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" @@ -14,6 +15,7 @@ ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" ENV LANG=C.UTF-8 ENV FORCE_CUDA="1" ENV PYTHONPATH=. +ENV YMIR_VERSION=${YMIR} # Set timezone RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \ && echo 'Asia/Shanghai' >/etc/timezone diff --git a/det-mmdetection-tmi/docker/Dockerfile.cuda111 b/det-mmdetection-tmi/docker/Dockerfile.cuda111 index 08fe8f4..e4320d4 100644 --- a/det-mmdetection-tmi/docker/Dockerfile.cuda111 +++ b/det-mmdetection-tmi/docker/Dockerfile.cuda111 @@ -7,12 +7,14 @@ FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-runtime # mmcv>=1.3.17, <=1.5.0 ARG MMCV="1.4.3" ARG SERVER_MODE=prod +ARG YMIR="1.1.0" ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" ENV FORCE_CUDA="1" ENV PYTHONPATH=. +ENV YMIR_VERSION=${YMIR} # Set timezone RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \ && echo 'Asia/Shanghai' >/etc/timezone diff --git a/det-mmdetection-tmi/infer-template.yaml b/det-mmdetection-tmi/infer-template.yaml index cc2f1e7..bf61d79 100644 --- a/det-mmdetection-tmi/infer-template.yaml +++ b/det-mmdetection-tmi/infer-template.yaml @@ -1,8 +1,4 @@ -# samples_per_gpu: 2 -# workers_per_gpu: 2 -# max_epochs: 300 -# config_file: 'configs/yolox/yolox_nano_8x8_300e_coco.py' -# args_options: '' +shm_size: '32G' +export_format: 'ark:raw' cfg_options: '' conf_threshold: 0.2 -# port: 12345 diff --git a/det-mmdetection-tmi/mining-template.yaml b/det-mmdetection-tmi/mining-template.yaml index 7dd411c..5649a3c 100644 --- a/det-mmdetection-tmi/mining-template.yaml +++ b/det-mmdetection-tmi/mining-template.yaml @@ -1,7 +1,3 @@ -# samples_per_gpu: 2 -# workers_per_gpu: 2 -# max_epochs: 300 -# config_file: 'configs/yolox/yolox_nano_8x8_300e_coco.py' -# args_options: '' +shm_size: '32G' +export_format: 'ark:raw' cfg_options: '' -# port: 12345 diff --git a/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py b/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py index dc40801..6b10dc1 100644 --- a/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py +++ b/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py @@ -84,6 +84,8 @@ def _do_evaluate(self, runner): # the best checkpoint if self.save_best and key_score: self._save_ckpt(runner, key_score) + + # TODO obtain best_score from runner # best_score = runner.meta['hook_msgs'].get( # 'best_score', self.init_value_map[self.rule]) # if self.compare_func(key_score, best_score): @@ -176,6 +178,7 @@ def _do_evaluate(self, runner): if self.save_best and key_score: self._save_ckpt(runner, key_score) + # TODO obtain best_score from runner # best_score = runner.meta['hook_msgs'].get( # 'best_score', self.init_value_map[self.rule]) # if self.compare_func(key_score, best_score): diff --git a/det-mmdetection-tmi/mmdet/datasets/ymir.py b/det-mmdetection-tmi/mmdet/datasets/ymir.py index 1276310..9215624 100644 --- a/det-mmdetection-tmi/mmdet/datasets/ymir.py +++ b/det-mmdetection-tmi/mmdet/datasets/ymir.py @@ -2,8 +2,6 @@ # wangjiaxin 2022-04-25 import os.path as osp - -# from PIL import Image import imagesize import json @@ -58,10 +56,6 @@ def load_annotations(self, ann_file): for line in lines: # split any white space img_path, ann_path = line.strip().split() - img_path = osp.join(self.data_root, self.img_prefix, img_path) - ann_path = osp.join(self.data_root, self.ann_prefix, ann_path) - # img = Image.open(img_path) - # width, height = img.size width, height = imagesize.get(img_path) images.append( dict(id=image_counter, @@ -104,8 +98,7 @@ def load_annotations(self, ann_file): self.img_ids = self.coco.get_img_ids() # self.img_ids = list(self.coco.imgs.keys()) assert len(self.img_ids) > 0, 'image number must > 0' - N = len(self.img_ids) - print(f'load {N} image from YMIR dataset') + print(f'load {len(self.img_ids)} image from YMIR dataset') data_infos = [] total_ann_ids = [] @@ -136,10 +129,6 @@ def get_txt_ann_info(self, txt_path): Returns: dict: Annotation info of specified index. """ - - # img_id = self.data_infos[idx]['id'] - # txt_path = osp.splitext(img_path)[0]+'.txt' - # txt_path = self.get_ann_path_from_img_path(img_path) anns = [] if osp.exists(txt_path): with open(txt_path, 'r') as fp: @@ -150,13 +139,10 @@ def get_txt_ann_info(self, txt_path): obj = [int(x) for x in line.strip().split(',')[0:5]] # YMIR category id starts from 0, coco from 1 category_id, xmin, ymin, xmax, ymax = obj - bbox = [xmin, ymin, xmax, ymax] h, w = ymax-ymin, xmax-xmin ignore = 0 if self.min_size: assert not self.test_mode - w = bbox[2] - bbox[0] - h = bbox[3] - bbox[1] if w < self.min_size or h < self.min_size: ignore = 1 @@ -185,10 +171,7 @@ def get_cat_ids(self, idx): """ cat_ids = [] - # img_path = self.data_infos[idx]['file_name'] - # txt_path = self.get_ann_path_from_img_path(img_path) txt_path = self.data_infos[idx]['ann_path'] - txt_path = osp.join(self.data_root, self.ann_prefix, txt_path) if osp.exists(txt_path): with open(txt_path, 'r') as fp: lines = fp.readlines() diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index 2c232e2..21bbc62 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -7,12 +7,13 @@ import os import os.path as osp from enum import IntEnum -from typing import Any, List +from typing import Any, List, Optional import mmcv from easydict import EasyDict as edict from mmcv import Config from nptyping import NDArray, Shape, UInt8 +from packaging.version import Version from ymir_exc import env from ymir_exc import result_writer as rw @@ -27,7 +28,13 @@ class YmirStage(IntEnum): CV_IMAGE = NDArray[Shape['*,*,3'], UInt8] -def get_ymir_process(stage: YmirStage, p: float = 0.0) -> float: +def get_ymir_process(stage: YmirStage, p: float, task_idx: int = 0, task_num: int = 1) -> float: + """ + stage: pre-process/task/post-process + p: percent for stage + task_idx: index for multiple tasks like mining (task_idx=0) and infer (task_idx=1) + task_num: the total number of multiple tasks. + """ # const value for ymir process PREPROCESS_PERCENT = 0.1 TASK_PERCENT = 0.8 @@ -36,12 +43,14 @@ def get_ymir_process(stage: YmirStage, p: float = 0.0) -> float: if p < 0 or p > 1.0: raise Exception(f'p not in [0,1], p={p}') + init = task_idx * 1.0 / task_num + ratio = 1.0 / task_num if stage == YmirStage.PREPROCESS: - return PREPROCESS_PERCENT * p + return init + PREPROCESS_PERCENT * p * ratio elif stage == YmirStage.TASK: - return PREPROCESS_PERCENT + TASK_PERCENT * p + return init + (PREPROCESS_PERCENT + TASK_PERCENT * p) * ratio elif stage == YmirStage.POSTPROCESS: - return PREPROCESS_PERCENT + TASK_PERCENT + POSTPROCESS_PERCENT * p + return init + (PREPROCESS_PERCENT + TASK_PERCENT + POSTPROCESS_PERCENT * p) * ratio else: raise NotImplementedError(f'unknown stage {stage}') @@ -158,7 +167,15 @@ def get_weight_file(cfg: edict) -> str: return "" -def write_ymir_training_result(last: bool = False, key_score=None): +def write_ymir_training_result(last: bool = False, key_score: Optional[float] = None): + YMIR_VERSION = os.environ.get('YMIR_VERSION', '1.2.0') + if Version(YMIR_VERSION) >= Version('1.2.0'): + write_latest_ymir_training_result(last, key_score) + else: + write_ancient_ymir_training_result(key_score) + + +def write_latest_ymir_training_result(last: bool = False, key_score: Optional[float] = None): if key_score: logging.info(f'key_score is {key_score}') COCO_EVAL_TMP_FILE = os.getenv('COCO_EVAL_TMP_FILE') @@ -170,14 +187,14 @@ def write_ymir_training_result(last: bool = False, key_score=None): # eval_result may be empty dict {}. map = eval_result.get('bbox_mAP_50', 0) - work_dir = os.getenv('YMIR_MODELS_DIR') - if work_dir is None or not osp.isdir(work_dir): + WORK_DIR = os.getenv('YMIR_MODELS_DIR') + if WORK_DIR is None or not osp.isdir(WORK_DIR): raise Exception( - f'please set valid environment variable YMIR_MODELS_DIR, invalid directory {work_dir}') + f'please set valid environment variable YMIR_MODELS_DIR, invalid directory {WORK_DIR}') # assert only one model config file in work_dir result_files = [osp.basename(f) for f in glob.glob( - osp.join(work_dir, '*')) if osp.basename(f) != 'result.yaml'] + osp.join(WORK_DIR, '*')) if osp.basename(f) != 'result.yaml'] if last: # save all output file @@ -186,7 +203,7 @@ def write_ymir_training_result(last: bool = False, key_score=None): stage_name='last') else: # save newest weight file in format epoch_xxx.pth or iter_xxx.pth - weight_files = [osp.join(work_dir, f) for f in result_files if f.startswith( + weight_files = [osp.join(WORK_DIR, f) for f in result_files if f.startswith( ('iter_', 'epoch_')) and f.endswith('.pth')] if len(weight_files) > 0: @@ -194,7 +211,7 @@ def write_ymir_training_result(last: bool = False, key_score=None): max(weight_files, key=os.path.getctime)) stage_name = osp.splitext(newest_weight_file)[0] - training_result_file = osp.join(work_dir, 'result.yaml') + training_result_file = osp.join(WORK_DIR, 'result.yaml') if osp.exists(training_result_file): with open(training_result_file, 'r') as f: training_result = yaml.safe_load(f) @@ -207,3 +224,39 @@ def write_ymir_training_result(last: bool = False, key_score=None): rw.write_model_stage(files=[newest_weight_file] + config_files, mAP=float(map), stage_name=stage_name) + + +def write_ancient_ymir_training_result(key_score: Optional[float] = None): + if key_score: + logging.info(f'key_score is {key_score}') + + COCO_EVAL_TMP_FILE = os.getenv('COCO_EVAL_TMP_FILE') + if COCO_EVAL_TMP_FILE is None: + raise Exception( + 'please set valid environment variable COCO_EVAL_TMP_FILE to write result into json file') + + eval_result = mmcv.load(COCO_EVAL_TMP_FILE) + # eval_result may be empty dict {}. + map = eval_result.get('bbox_mAP_50', 0) + + WORK_DIR = os.getenv('YMIR_MODELS_DIR') + if WORK_DIR is None or not osp.isdir(WORK_DIR): + raise Exception( + f'please set valid environment variable YMIR_MODELS_DIR, invalid directory {WORK_DIR}') + + # assert only one model config file in work_dir + result_files = [osp.basename(f) for f in glob.glob( + osp.join(WORK_DIR, '*')) if osp.basename(f) != 'result.yaml'] + + training_result_file = osp.join(WORK_DIR, 'result.yaml') + if osp.exists(training_result_file): + with open(training_result_file, 'r') as f: + training_result = yaml.safe_load(f) + + training_result['model'] = result_files + training_result['map'] = max(map, training_result['map']) + else: + training_result = dict(model=result_files, map=map) + + with open(training_result_file, 'w') as f: + yaml.safe_dump(training_result, f) diff --git a/det-mmdetection-tmi/start.py b/det-mmdetection-tmi/start.py index e4b1398..12a6f9c 100644 --- a/det-mmdetection-tmi/start.py +++ b/det-mmdetection-tmi/start.py @@ -39,17 +39,12 @@ def _run_mining() -> None: command = 'python3 ymir_mining.py' logging.info(f'start mining: {command}') subprocess.run(command.split(), check=True) - - monitor.write_monitor_logger(percent=1.0) logging.info("mining finished") - def _run_infer() -> None: command = 'python3 ymir_infer.py' logging.info(f'start infer: {command}') subprocess.run(command.split(), check=True) - - monitor.write_monitor_logger(percent=1.0) logging.info("infer finished") diff --git a/det-mmdetection-tmi/training-template.yaml b/det-mmdetection-tmi/training-template.yaml index a56133d..37b2da9 100644 --- a/det-mmdetection-tmi/training-template.yaml +++ b/det-mmdetection-tmi/training-template.yaml @@ -1,3 +1,5 @@ +shm_size: '32G' +export_format: 'ark:raw' samples_per_gpu: 2 workers_per_gpu: 2 max_epochs: 300 diff --git a/det-mmdetection-tmi/ymir_infer.py b/det-mmdetection-tmi/ymir_infer.py index b4716e2..0530bf0 100644 --- a/det-mmdetection-tmi/ymir_infer.py +++ b/det-mmdetection-tmi/ymir_infer.py @@ -81,6 +81,18 @@ class YmirModel: def __init__(self, cfg: edict): self.cfg = cfg + if cfg.ymir.run_mining and cfg.ymir.run_infer: + # mining_task_idx = 0 + infer_task_idx = 1 + task_num = 2 + else: + # mining_task_idx = 0 + infer_task_idx = 0 + task_num = 1 + + self.task_idx=infer_task_idx + self.task_num=task_num + # Specify the path to model config and checkpoint file config_file = get_config_file(cfg) checkpoint_file = get_weight_file(cfg) @@ -120,11 +132,14 @@ def main(): idx += 1 if idx % monitor_gap == 0: - percent = get_ymir_process(stage=YmirStage.TASK, p=idx / N) + percent = get_ymir_process( + stage=YmirStage.TASK, p=idx / N, task_idx=model.task_idx, task_num=model.task_num) monitor.write_monitor_logger(percent=percent) rw.write_infer_result(infer_result=infer_result) - + percent = get_ymir_process(stage=YmirStage.POSTPROCESS, + p=1, task_idx=model.task_idx, task_num=model.task_num) + monitor.write_monitor_logger(percent=percent) return 0 diff --git a/det-mmdetection-tmi/ymir_mining.py b/det-mmdetection-tmi/ymir_mining.py index f4bea0c..0299edc 100644 --- a/det-mmdetection-tmi/ymir_mining.py +++ b/det-mmdetection-tmi/ymir_mining.py @@ -7,6 +7,7 @@ from typing import Any, Dict, List, Tuple import cv2 +from easydict import EasyDict as edict import numpy as np from nptyping import NDArray from scipy.stats import entropy @@ -238,6 +239,19 @@ def split_result(result: NDArray) -> Tuple[BBOX, NDArray, NDArray]: class YmirMining(YmirModel): + def __init__(self, cfg: edict): + super().__init__(cfg) + if cfg.ymir.run_mining and cfg.ymir.run_infer: + mining_task_idx = 0 + # infer_task_idx = 1 + task_num = 2 + else: + mining_task_idx = 0 + # infer_task_idx = 0 + task_num = 1 + self.task_idx = mining_task_idx + self.task_num = task_num + def mining(self): N = dr.items_count(env.DatasetType.CANDIDATE) monitor_gap = max(1, N // 100) @@ -295,7 +309,8 @@ def mining(self): idx += 1 if idx % monitor_gap == 0: - percent = get_ymir_process(stage=YmirStage.TASK, p=idx / N) + percent = get_ymir_process( + stage=YmirStage.TASK, p=idx / N, task_idx=self.task_idx, task_num=self.task_num) monitor.write_monitor_logger(percent=percent) return mining_result @@ -350,6 +365,9 @@ def main(): mining_result = miner.mining() rw.write_mining_result(mining_result=mining_result) + percent = get_ymir_process(stage=YmirStage.POSTPROCESS, + p=1, task_idx=miner.task_idx, task_num=miner.task_num) + monitor.write_monitor_logger(percent=percent) return 0 From d59e3230c1903f97c5b6b3301f1330874b96cb86 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 19 Jul 2022 16:01:26 +0800 Subject: [PATCH 080/204] update comment --- det-mmdetection-tmi/mmdet/utils/util_ymir.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index 21bbc62..dd9b333 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -134,7 +134,7 @@ def modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: interval = max(1, mmdet_cfg.runner.max_epochs//30) mmdet_cfg.evaluation.interval = interval mmdet_cfg.evaluation.metric = ymir_cfg.param.get('metric', 'bbox') - # Whether to evaluating the AP for each class + # TODO Whether to evaluating the AP for each class # mmdet_cfg.evaluation.classwise = True return mmdet_cfg @@ -142,7 +142,7 @@ def modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: def get_weight_file(cfg: edict) -> str: """ return the weight file path by priority - find weight file in cfg.param.model_params_path or cfg.param.model_params_path + find weight file in cfg.param.pretrained_model_params or cfg.param.model_params_path """ if cfg.ymir.run_training: model_params_path: List = cfg.param.pretrained_model_params From 685f2c766fe731b8b3ea1282e71b71bfd6d87ab1 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 19 Jul 2022 16:27:24 +0800 Subject: [PATCH 081/204] add default value --- det-mmdetection-tmi/mmdet/utils/util_ymir.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index dd9b333..e8819b3 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -145,9 +145,9 @@ def get_weight_file(cfg: edict) -> str: find weight file in cfg.param.pretrained_model_params or cfg.param.model_params_path """ if cfg.ymir.run_training: - model_params_path: List = cfg.param.pretrained_model_params + model_params_path: List = cfg.param.get('pretrained_model_params', []) else: - model_params_path: List = cfg.param.model_params_path + model_params_path: List = cfg.param.get('model_params_path', []) model_dir = cfg.ymir.input.models_dir model_params_path = [ From 0c134eca39c56f95d65a6447a564a18456ad4db9 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 20 Jul 2022 19:33:53 +0800 Subject: [PATCH 082/204] update runtime --- det-mmdetection-tmi/requirements/runtime.txt | 1 + det-mmdetection-tmi/ymir_infer.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/det-mmdetection-tmi/requirements/runtime.txt b/det-mmdetection-tmi/requirements/runtime.txt index 9754131..cf0fac6 100644 --- a/det-mmdetection-tmi/requirements/runtime.txt +++ b/det-mmdetection-tmi/requirements/runtime.txt @@ -2,6 +2,7 @@ matplotlib numpy pycocotools six +scipy terminaltables easydict nptyping diff --git a/det-mmdetection-tmi/ymir_infer.py b/det-mmdetection-tmi/ymir_infer.py index 0530bf0..ecec19e 100644 --- a/det-mmdetection-tmi/ymir_infer.py +++ b/det-mmdetection-tmi/ymir_infer.py @@ -60,9 +60,9 @@ def mmdet_result_to_ymir(results: List[DETECTION_RESULT], def get_config_file(cfg): if cfg.ymir.run_training: - model_params_path: List = cfg.param.pretrained_model_params + model_params_path: List = cfg.param.get('pretrained_model_params',[]) else: - model_params_path: List = cfg.param.model_params_path + model_params_path: List = cfg.param.get('model_params_path',[]) model_dir = cfg.ymir.input.models_dir config_files = [ From 384c75ebe5204834bdf7f7a034c697a67025442a Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 20 Jul 2022 19:35:31 +0800 Subject: [PATCH 083/204] fix asnumpy error --- det-yolov4-mining/write_result.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/det-yolov4-mining/write_result.py b/det-yolov4-mining/write_result.py index 6ee21d9..ea3f19f 100644 --- a/det-yolov4-mining/write_result.py +++ b/det-yolov4-mining/write_result.py @@ -247,16 +247,12 @@ def _write_results(prediction, num_classes, input_dim, confidence=0.5, nms_conf= # """ # yolov3 alexey def _prep_results(load_images, img_batch, output, input_dim): - im_dim_list = nd.array([(x.shape[1], x.shape[0]) for x in load_images]) - im_dim_list = nd.tile(im_dim_list, 2) - im_dim_list = im_dim_list[output[:, 0], :] - scaling_factor = input_dim / im_dim_list - output[:, 3:7] /= scaling_factor + output = output.asnumpy() for i in range(output.shape[0]): - output[i, [3, 5]] = nd.clip(output[i, [3, 5]], a_min=0.0, a_max=im_dim_list[i][0].asscalar()) - output[i, [4, 6]] = nd.clip(output[i, [4, 6]], a_min=0.0, a_max=im_dim_list[i][1].asscalar()) + h, w = load_images[0].shape[0:2] + output[i, [3, 5]] = np.clip(output[i, [3, 5]]*w/input_dim, a_min=0.0, a_max=w) + output[i, [4, 6]] = np.clip(output[i, [4, 6]]*h/input_dim, a_min=0.0, a_max=h) - output = output.asnumpy() boxes = [] for i in range(len(load_images)): bboxs = [] From e903573020c4acdbf7dc4c1afd381e402ae7c76e Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 22 Jul 2022 11:35:28 +0800 Subject: [PATCH 084/204] update result writer --- .gitignore | 4 - det-yolov5-tmi/Dockerfile | 64 ------ det-yolov5-tmi/README.md | 306 ++------------------------- det-yolov5-tmi/README_yolov5.md | 304 ++++++++++++++++++++++++++ det-yolov5-tmi/cuda102.dockerfile | 2 + det-yolov5-tmi/cuda111.dockerfile | 3 + det-yolov5-tmi/mining/mining_cald.py | 3 +- det-yolov5-tmi/start.py | 62 +++--- det-yolov5-tmi/train.py | 19 +- det-yolov5-tmi/utils/ymir_yolov5.py | 106 +++++----- 10 files changed, 412 insertions(+), 461 deletions(-) delete mode 100644 det-yolov5-tmi/Dockerfile create mode 100644 det-yolov5-tmi/README_yolov5.md diff --git a/.gitignore b/.gitignore index 6dbd818..5563689 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,3 @@ -# dockerfile for China -*.dockerfile.cn -det-mmdetection-tmi/docker/*.cn - *.png *.jpg *.img diff --git a/det-yolov5-tmi/Dockerfile b/det-yolov5-tmi/Dockerfile deleted file mode 100644 index 489dd04..0000000 --- a/det-yolov5-tmi/Dockerfile +++ /dev/null @@ -1,64 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license - -# Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:21.10-py3 - -# Install linux packages -RUN apt update && apt install -y zip htop screen libgl1-mesa-glx - -# Install python dependencies -COPY requirements.txt . -RUN python -m pip install --upgrade pip -RUN pip uninstall -y torch torchvision torchtext -RUN pip install --no-cache -r requirements.txt albumentations wandb gsutil notebook \ - torch==1.10.2+cu113 torchvision==0.11.3+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html -# RUN pip install --no-cache -U torch torchvision - -# Create working directory -RUN mkdir -p /usr/src/app -WORKDIR /usr/src/app - -# Copy contents -COPY . /usr/src/app - -# Downloads to user config dir -ADD https://ultralytics.com/assets/Arial.ttf /root/.config/Ultralytics/ - -# Set environment variables -# ENV HOME=/usr/src/app - - -# Usage Examples ------------------------------------------------------------------------------------------------------- - -# Build and Push -# t=ultralytics/yolov5:latest && sudo docker build -t $t . && sudo docker push $t - -# Pull and Run -# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t - -# Pull and Run with local directory access -# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t - -# Kill all -# sudo docker kill $(sudo docker ps -q) - -# Kill all image-based -# sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest) - -# Bash into running container -# sudo docker exec -it 5a9b5863d93d bash - -# Bash into stopped container -# id=$(sudo docker ps -qa) && sudo docker start $id && sudo docker exec -it $id bash - -# Clean up -# docker system prune -a --volumes - -# Update Ubuntu drivers -# https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/ - -# DDP test -# python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3 - -# GCP VM from Image -# docker.io/ultralytics/yolov5:latest diff --git a/det-yolov5-tmi/README.md b/det-yolov5-tmi/README.md index b03a7c5..fba577d 100644 --- a/det-yolov5-tmi/README.md +++ b/det-yolov5-tmi/README.md @@ -1,304 +1,30 @@ -

-

- - -

-
-
- CI CPU testing - YOLOv5 Citation - Docker Pulls -
- Open In Colab - Open In Kaggle - Join Forum -
+# yolov5-ymir readme +- [yolov5 readme](./README_yolov5.md) -
-

-YOLOv5 🚀 is a family of object detection architectures and models pretrained on the COCO dataset, and represents Ultralytics - open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. -

+## change log - +- add `start.py` and `utils/ymir_yolov5.py` for train/infer/mining - +- add `utils/ymir_yolov5.py` for useful functions -
+ - `get_merged_config()` add ymir path config `cfg.yaml` and hyper-parameter `cfg.param` -##
Documentation
+ - `convert_ymir_to_yolov5()` generate yolov5 dataset config file `data.yaml` -See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. + - `write_ymir_training_result()` save model weight, map and other files. -##
Quick Start Examples
+ - `get_weight_file()` get pretrained weight or init weight file from ymir system -
-Install +- modify `utils/datasets.py` for ymir dataset format -Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a -[**Python>=3.7.0**](https://www.python.org/) environment, including -[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). +- modify `train.py` for training process monitor -```bash -git clone https://github.com/ultralytics/yolov5 # clone -cd yolov5 -pip install -r requirements.txt # install -``` +- add `mining/data_augment.py` and `mining/mining_cald.py` for mining -
+- add `training/infer/mining-template.yaml` for `/img-man/training/infer/mining-template.yaml` -
-Inference +- add `cuda102/111.dockerfile`, remove origin `Dockerfile` -Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) -. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest -YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). +- modify `requirements.txt` -```python -import torch - -# Model -model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5m, yolov5l, yolov5x, custom - -# Images -img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list - -# Inference -results = model(img) - -# Results -results.print() # or .show(), .save(), .crop(), .pandas(), etc. -``` - -
- - - -
-Inference with detect.py - -`detect.py` runs inference on a variety of sources, downloading [models](https://github.com/ultralytics/yolov5/tree/master/models) automatically from -the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. - -```bash -python detect.py --source 0 # webcam - img.jpg # image - vid.mp4 # video - path/ # directory - path/*.jpg # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream -``` - -
- -
-Training - -The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) -results. [Models](https://github.com/ultralytics/yolov5/tree/master/models) -and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest -YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are -1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://github.com/ultralytics/yolov5/issues/475) times faster). Use the -largest `--batch-size` possible, or pass `--batch-size -1` for -YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB. - -```bash -python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 128 - yolov5s 64 - yolov5m 40 - yolov5l 24 - yolov5x 16 -``` - - - -
- -
-Tutorials - -* [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)  🚀 RECOMMENDED -* [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)  ☘️ - RECOMMENDED -* [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289)  🌟 NEW -* [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)  🌟 NEW -* [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) -* [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)  ⭐ NEW -* [TFLite, ONNX, CoreML, TensorRT Export](https://github.com/ultralytics/yolov5/issues/251) 🚀 -* [Test-Time Augmentation (TTA)](https://github.com/ultralytics/yolov5/issues/303) -* [Model Ensembling](https://github.com/ultralytics/yolov5/issues/318) -* [Model Pruning/Sparsity](https://github.com/ultralytics/yolov5/issues/304) -* [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607) -* [Transfer Learning with Frozen Layers](https://github.com/ultralytics/yolov5/issues/1314)  ⭐ NEW -* [TensorRT Deployment](https://github.com/wang-xinyu/tensorrtx) - -
- -##
Environments
- -Get started in seconds with our verified environments. Click each icon below for details. - - - -##
Integrations
- - - -|Weights and Biases|Roboflow ⭐ NEW| -|:-:|:-:| -|Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | - - - - -##
Why YOLOv5
- -

-
- YOLOv5-P5 640 Figure (click to expand) - -

-
-
- Figure Notes (click to expand) - -* **COCO AP val** denotes mAP@0.5:0.95 metric measured on the 5000-image [COCO val2017](http://cocodataset.org) dataset over various inference sizes from 256 to 1536. -* **GPU Speed** measures average inference time per image on [COCO val2017](http://cocodataset.org) dataset using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100 instance at batch-size 32. -* **EfficientDet** data from [google/automl](https://github.com/google/automl) at batch size 8. -* **Reproduce** by `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` -
- -### Pretrained Checkpoints - -[assets]: https://github.com/ultralytics/yolov5/releases - -[TTA]: https://github.com/ultralytics/yolov5/issues/303 - -|Model |size
(pixels) |mAPval
0.5:0.95 |mAPval
0.5 |Speed
CPU b1
(ms) |Speed
V100 b1
(ms) |Speed
V100 b32
(ms) |params
(M) |FLOPs
@640 (B) -|--- |--- |--- |--- |--- |--- |--- |--- |--- -|[YOLOv5n][assets] |640 |28.0 |45.7 |**45** |**6.3**|**0.6**|**1.9**|**4.5** -|[YOLOv5s][assets] |640 |37.4 |56.8 |98 |6.4 |0.9 |7.2 |16.5 -|[YOLOv5m][assets] |640 |45.4 |64.1 |224 |8.2 |1.7 |21.2 |49.0 -|[YOLOv5l][assets] |640 |49.0 |67.3 |430 |10.1 |2.7 |46.5 |109.1 -|[YOLOv5x][assets] |640 |50.7 |68.9 |766 |12.1 |4.8 |86.7 |205.7 -| | | | | | | | | -|[YOLOv5n6][assets] |1280 |36.0 |54.4 |153 |8.1 |2.1 |3.2 |4.6 -|[YOLOv5s6][assets] |1280 |44.8 |63.7 |385 |8.2 |3.6 |16.8 |12.6 -|[YOLOv5m6][assets] |1280 |51.3 |69.3 |887 |11.1 |6.8 |35.7 |50.0 -|[YOLOv5l6][assets] |1280 |53.7 |71.3 |1784 |15.8 |10.5 |76.8 |111.4 -|[YOLOv5x6][assets]
+ [TTA][TTA]|1280
1536 |55.0
**55.8** |72.7
**72.7** |3136
- |26.2
- |19.4
- |140.7
- |209.8
- - -
- Table Notes (click to expand) - -* All checkpoints are trained to 300 epochs with default settings. Nano and Small models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, all others use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). -* **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -* **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.
Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1` -* **TTA** [Test Time Augmentation](https://github.com/ultralytics/yolov5/issues/303) includes reflection and scale augmentations.
Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` - -
- -##
Contribute
- -We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! - - - -##
Contact
- -For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For business inquiries or -professional support requests please visit [https://ultralytics.com/contact](https://ultralytics.com/contact). - -
- - +- other modify support onnx export, not important. diff --git a/det-yolov5-tmi/README_yolov5.md b/det-yolov5-tmi/README_yolov5.md new file mode 100644 index 0000000..b03a7c5 --- /dev/null +++ b/det-yolov5-tmi/README_yolov5.md @@ -0,0 +1,304 @@ +
+

+ + +

+
+
+ CI CPU testing + YOLOv5 Citation + Docker Pulls +
+ Open In Colab + Open In Kaggle + Join Forum +
+ +
+

+YOLOv5 🚀 is a family of object detection architectures and models pretrained on the COCO dataset, and represents Ultralytics + open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. +

+ + + + + +
+ +##
Documentation
+ +See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. + +##
Quick Start Examples
+ +
+Install + +Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a +[**Python>=3.7.0**](https://www.python.org/) environment, including +[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). + +```bash +git clone https://github.com/ultralytics/yolov5 # clone +cd yolov5 +pip install -r requirements.txt # install +``` + +
+ +
+Inference + +Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) +. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest +YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). + +```python +import torch + +# Model +model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5m, yolov5l, yolov5x, custom + +# Images +img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list + +# Inference +results = model(img) + +# Results +results.print() # or .show(), .save(), .crop(), .pandas(), etc. +``` + +
+ + + +
+Inference with detect.py + +`detect.py` runs inference on a variety of sources, downloading [models](https://github.com/ultralytics/yolov5/tree/master/models) automatically from +the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. + +```bash +python detect.py --source 0 # webcam + img.jpg # image + vid.mp4 # video + path/ # directory + path/*.jpg # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream +``` + +
+ +
+Training + +The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) +results. [Models](https://github.com/ultralytics/yolov5/tree/master/models) +and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest +YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are +1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://github.com/ultralytics/yolov5/issues/475) times faster). Use the +largest `--batch-size` possible, or pass `--batch-size -1` for +YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB. + +```bash +python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 128 + yolov5s 64 + yolov5m 40 + yolov5l 24 + yolov5x 16 +``` + + + +
+ +
+Tutorials + +* [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)  🚀 RECOMMENDED +* [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)  ☘️ + RECOMMENDED +* [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289)  🌟 NEW +* [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)  🌟 NEW +* [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) +* [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)  ⭐ NEW +* [TFLite, ONNX, CoreML, TensorRT Export](https://github.com/ultralytics/yolov5/issues/251) 🚀 +* [Test-Time Augmentation (TTA)](https://github.com/ultralytics/yolov5/issues/303) +* [Model Ensembling](https://github.com/ultralytics/yolov5/issues/318) +* [Model Pruning/Sparsity](https://github.com/ultralytics/yolov5/issues/304) +* [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607) +* [Transfer Learning with Frozen Layers](https://github.com/ultralytics/yolov5/issues/1314)  ⭐ NEW +* [TensorRT Deployment](https://github.com/wang-xinyu/tensorrtx) + +
+ +##
Environments
+ +Get started in seconds with our verified environments. Click each icon below for details. + + + +##
Integrations
+ + + +|Weights and Biases|Roboflow ⭐ NEW| +|:-:|:-:| +|Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | + + + + +##
Why YOLOv5
+ +

+
+ YOLOv5-P5 640 Figure (click to expand) + +

+
+
+ Figure Notes (click to expand) + +* **COCO AP val** denotes mAP@0.5:0.95 metric measured on the 5000-image [COCO val2017](http://cocodataset.org) dataset over various inference sizes from 256 to 1536. +* **GPU Speed** measures average inference time per image on [COCO val2017](http://cocodataset.org) dataset using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100 instance at batch-size 32. +* **EfficientDet** data from [google/automl](https://github.com/google/automl) at batch size 8. +* **Reproduce** by `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` +
+ +### Pretrained Checkpoints + +[assets]: https://github.com/ultralytics/yolov5/releases + +[TTA]: https://github.com/ultralytics/yolov5/issues/303 + +|Model |size
(pixels) |mAPval
0.5:0.95 |mAPval
0.5 |Speed
CPU b1
(ms) |Speed
V100 b1
(ms) |Speed
V100 b32
(ms) |params
(M) |FLOPs
@640 (B) +|--- |--- |--- |--- |--- |--- |--- |--- |--- +|[YOLOv5n][assets] |640 |28.0 |45.7 |**45** |**6.3**|**0.6**|**1.9**|**4.5** +|[YOLOv5s][assets] |640 |37.4 |56.8 |98 |6.4 |0.9 |7.2 |16.5 +|[YOLOv5m][assets] |640 |45.4 |64.1 |224 |8.2 |1.7 |21.2 |49.0 +|[YOLOv5l][assets] |640 |49.0 |67.3 |430 |10.1 |2.7 |46.5 |109.1 +|[YOLOv5x][assets] |640 |50.7 |68.9 |766 |12.1 |4.8 |86.7 |205.7 +| | | | | | | | | +|[YOLOv5n6][assets] |1280 |36.0 |54.4 |153 |8.1 |2.1 |3.2 |4.6 +|[YOLOv5s6][assets] |1280 |44.8 |63.7 |385 |8.2 |3.6 |16.8 |12.6 +|[YOLOv5m6][assets] |1280 |51.3 |69.3 |887 |11.1 |6.8 |35.7 |50.0 +|[YOLOv5l6][assets] |1280 |53.7 |71.3 |1784 |15.8 |10.5 |76.8 |111.4 +|[YOLOv5x6][assets]
+ [TTA][TTA]|1280
1536 |55.0
**55.8** |72.7
**72.7** |3136
- |26.2
- |19.4
- |140.7
- |209.8
- + +
+ Table Notes (click to expand) + +* All checkpoints are trained to 300 epochs with default settings. Nano and Small models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, all others use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). +* **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` +* **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.
Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1` +* **TTA** [Test Time Augmentation](https://github.com/ultralytics/yolov5/issues/303) includes reflection and scale augmentations.
Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` + +
+ +##
Contribute
+ +We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! + + + +##
Contact
+ +For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For business inquiries or +professional support requests please visit [https://ultralytics.com/contact](https://ultralytics.com/contact). + +
+ + diff --git a/det-yolov5-tmi/cuda102.dockerfile b/det-yolov5-tmi/cuda102.dockerfile index bd7fd97..031859d 100644 --- a/det-yolov5-tmi/cuda102.dockerfile +++ b/det-yolov5-tmi/cuda102.dockerfile @@ -3,7 +3,9 @@ ARG CUDA="10.2" ARG CUDNN="7" FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-runtime +# support SERVER_MODE=dev or prod ARG SERVER_MODE=prod +# support YMIR=1.0.0, 1.1.0 or 1.2.0 ARG YMIR="1.1.0" ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" diff --git a/det-yolov5-tmi/cuda111.dockerfile b/det-yolov5-tmi/cuda111.dockerfile index f0ab4cc..c238bd5 100644 --- a/det-yolov5-tmi/cuda111.dockerfile +++ b/det-yolov5-tmi/cuda111.dockerfile @@ -4,9 +4,12 @@ ARG CUDNN="8" # cuda11.1 + pytorch 1.9.0 + cudnn8 not work!!! FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-runtime +# support SERVER_MODE=dev or prod ARG SERVER_MODE=prod +# support YMIR=1.0.0, 1.1.0 or 1.2.0 ARG YMIR="1.1.0" + ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" diff --git a/det-yolov5-tmi/mining/mining_cald.py b/det-yolov5-tmi/mining/mining_cald.py index ba0f825..0fde401 100644 --- a/det-yolov5-tmi/mining/mining_cald.py +++ b/det-yolov5-tmi/mining/mining_cald.py @@ -37,12 +37,11 @@ def __init__(self, cfg: edict): super().__init__(cfg) if cfg.ymir.run_mining and cfg.ymir.run_infer: + # multiple task, run mining first, infer later mining_task_idx = 0 - # infer_task_idx = 1 task_num = 2 else: mining_task_idx = 0 - # infer_task_idx = 0 task_num = 1 self.task_idx = mining_task_idx diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py index 61c4dbe..4f0648f 100644 --- a/det-yolov5-tmi/start.py +++ b/det-yolov5-tmi/start.py @@ -13,7 +13,7 @@ from utils.ymir_yolov5 import (YmirStage, YmirYolov5, convert_ymir_to_yolov5, download_weight_file, get_merged_config, - get_weight_file, get_ymir_process) + get_weight_file, get_ymir_process, write_ymir_training_result) def start() -> int: @@ -25,6 +25,7 @@ def start() -> int: _run_training(cfg) else: if cfg.ymir.run_mining and cfg.ymir.run_infer: + # multiple task, run mining first, infer later mining_task_idx = 0 infer_task_idx = 1 task_num = 2 @@ -59,12 +60,20 @@ def _run_training(cfg: edict) -> None: batch_size = cfg.param.batch_size model = cfg.param.model img_size = cfg.param.img_size - save_period = cfg.param.save_period + save_period = max(1, min(epochs // 10, int(cfg.param.save_period))) args_options = cfg.param.args_options gpu_id = str(cfg.param.gpu_id) gpu_count = len(gpu_id.split(',')) if gpu_id else 0 port = int(cfg.param.get('port', 29500)) sync_bn = cfg.param.get('sync_bn', False) + if isinstance(sync_bn, str): + if sync_bn.lower() in ['f', 'false']: + sync_bn = False + elif sync_bn.lower() in ['t', 'true']: + sync_bn = True + else: + raise Exception(f'unknown bool str sync_bn = {sync_bn}') + weights = get_weight_file(cfg) if not weights: # download pretrained weight @@ -72,38 +81,35 @@ def _run_training(cfg: edict) -> None: models_dir = cfg.ymir.output.models_dir + commands = ['python3'] if gpu_count == 0: - command = f'python3 train.py --epochs {epochs} ' + \ - f'--batch-size {batch_size} --data {out_dir}/data.yaml --project /out ' + \ - f'--cfg models/{model}.yaml --name models --weights {weights} ' + \ - f'--img-size {img_size} ' + \ - f'--save-period {save_period} ' + \ - f'--device cpu' + device = 'cpu' elif gpu_count == 1: - command = f'python3 train.py --epochs {epochs} ' + \ - f'--batch-size {batch_size} --data {out_dir}/data.yaml --project /out ' + \ - f'--cfg models/{model}.yaml --name models --weights {weights} ' + \ - f'--img-size {img_size} ' + \ - f'--save-period {save_period} ' + \ - f'--device {gpu_id}' + device = gpu_id else: - command = f'python3 -m torch.distributed.launch --nproc_per_node {gpu_count} ' + \ - f'--master_port {port} train.py --epochs {epochs} ' + \ - f'--batch-size {batch_size} --data {out_dir}/data.yaml --project /out ' + \ - f'--cfg models/{model}.yaml --name models --weights {weights} ' + \ - f'--img-size {img_size} ' + \ - f'--save-period {save_period} ' + \ - f'--device {gpu_id}' - - if sync_bn: - command += " --sync-bn" + device = gpu_id + commands += f'-m torch.distributed.launch --nproc_per_node {gpu_count} --master_port {port}'.split() + + commands += ['train.py', + '--epochs', str(epochs), + '--batch-size', str(batch_size), + '--data', f'{out_dir}/data.yaml', + '--project', '/out', + '--cfg', f'models/{model}.yaml', + '--name', 'models', '--weights', weights, + '--img-size', str(img_size), + '--save-period', str(save_period), + '--device', device] + + if gpu_count > 1 and sync_bn: + commands.append("--sync-bn") if args_options: - command += f" {args_options}" + commands += args_options.split() - logging.info(f'start training: {command}') + logging.info(f'start training: {commands}') - subprocess.run(command.split(), check=True) + subprocess.run(commands, check=True) monitor.write_monitor_logger(percent=get_ymir_process(stage=YmirStage.TASK, p=1.0)) # 3. convert to onnx and save model weight to design directory @@ -114,7 +120,7 @@ def _run_training(cfg: edict) -> None: # save hyperparameter shutil.copy(f'models/{model}.yaml', f'{models_dir}/{model}.yaml') - + write_ymir_training_result(cfg) # if task done, write 100% percent log monitor.write_monitor_logger(percent=1.0) diff --git a/det-yolov5-tmi/train.py b/det-yolov5-tmi/train.py index c42098b..513c25b 100644 --- a/det-yolov5-tmi/train.py +++ b/det-yolov5-tmi/train.py @@ -24,7 +24,6 @@ from pathlib import Path import numpy as np -from packaging.version import Version import torch import torch.distributed as dist import torch.nn as nn @@ -58,7 +57,7 @@ from utils.metrics import fitness from utils.plots import plot_evolve, plot_labels from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, select_device, torch_distributed_zero_first -from utils.ymir_yolov5 import write_ymir_training_result, YmirStage, get_ymir_process, get_merged_config, write_old_ymir_training_result +from utils.ymir_yolov5 import write_ymir_training_result, YmirStage, get_ymir_process, get_merged_config LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) @@ -77,12 +76,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary opt.ymir_cfg = '' # yaml cannot dump edict, remove it here log_dir = Path(ymir_cfg.ymir.output.tensorboard_dir) - YMIR_VERSION = os.environ.get('YMIR_VERSION', '1.2.0') - if Version(YMIR_VERSION) >= Version('1.2.0'): - latest_ymir = True - else: - latest_ymir = False - # Directories w = save_dir # weights dir (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir @@ -425,10 +418,7 @@ def lf(x): return (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear if (epoch > 0) and (opt.save_period > 0) and (epoch % opt.save_period == 0): torch.save(ckpt, w / f'epoch{epoch}.pt') weight_file = str(w / f'epoch{epoch}.pt') - if latest_ymir: - write_ymir_training_result(ymir_cfg, map50=results[2], epoch=epoch, weight_file=weight_file) - else: - write_old_ymir_training_result(ymir_cfg, results, maps, rewrite=True) + write_ymir_training_result(ymir_cfg, map50=results[2], epoch=epoch, weight_file=weight_file) del ckpt callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) @@ -477,10 +467,7 @@ def lf(x): return (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear torch.cuda.empty_cache() # save the best and last weight file with other files in models_dir if RANK in [-1, 0]: - if latest_ymir: - write_ymir_training_result(ymir_cfg, map50=best_fitness, epoch=epochs, weight_file='') - else: - write_old_ymir_training_result(ymir_cfg, (), np.array([0]), rewrite=False) + write_ymir_training_result(ymir_cfg, map50=best_fitness, epoch=epochs, weight_file='') return results diff --git a/det-yolov5-tmi/utils/ymir_yolov5.py b/det-yolov5-tmi/utils/ymir_yolov5.py index aa80a72..6f16c2c 100644 --- a/det-yolov5-tmi/utils/ymir_yolov5.py +++ b/det-yolov5-tmi/utils/ymir_yolov5.py @@ -2,16 +2,18 @@ utils function for ymir and yolov5 """ import glob +import os import os.path as osp import shutil from enum import IntEnum from typing import Any, Dict, List, Tuple +from easydict import EasyDict as edict import numpy as np import torch import yaml -from easydict import EasyDict as edict from nptyping import NDArray, Shape, UInt8 +from packaging.version import Version from ymir_exc import env from ymir_exc import result_writer as rw @@ -32,7 +34,7 @@ class YmirStage(IntEnum): CV_IMAGE = NDArray[Shape['*,*,3'], UInt8] -def get_ymir_process(stage: YmirStage, p: float, task_idx: int=0, task_num: int=1) -> float: +def get_ymir_process(stage: YmirStage, p: float, task_idx: int = 0, task_num: int = 1) -> float: """ stage: pre-process/task/post-process p: percent for stage @@ -47,8 +49,9 @@ def get_ymir_process(stage: YmirStage, p: float, task_idx: int=0, task_num: int= if p < 0 or p > 1.0: raise Exception(f'p not in [0,1], p={p}') - init = task_idx * 1.0 / task_num ratio = 1.0 / task_num + init = task_idx / task_num + if stage == YmirStage.PREPROCESS: return init + PREPROCESS_PERCENT * p * ratio elif stage == YmirStage.TASK: @@ -110,16 +113,15 @@ class YmirYolov5(): def __init__(self, cfg: edict): self.cfg = cfg if cfg.ymir.run_mining and cfg.ymir.run_infer: - # mining_task_idx = 0 + # multiple task, run mining first, infer later infer_task_idx = 1 task_num = 2 else: - # mining_task_idx = 0 infer_task_idx = 0 task_num = 1 - self.task_idx=infer_task_idx - self.task_num=task_num + self.task_idx = infer_task_idx + self.task_num = task_num device = select_device(cfg.param.get('gpu_id', 'cpu')) @@ -225,15 +227,30 @@ def convert_ymir_to_yolov5(cfg: edict) -> None: def write_ymir_training_result(cfg: edict, - map50: float, - epoch: int, - weight_file: str) -> int: + map50: float = 0.0, + epoch: int = 0, + weight_file: str = "") -> int: + YMIR_VERSION = os.getenv('YMIR_VERSION', '1.2.0') + if Version(YMIR_VERSION) >= Version('1.2.0'): + write_latest_ymir_training_result(cfg, map50, epoch, weight_file) + else: + write_ancient_ymir_training_result(cfg, map50) + + +def write_latest_ymir_training_result(cfg: edict, + map50: float, + epoch: int, + weight_file: str) -> int: """ for ymir>=1.2.0 cfg: ymir config map50: map50 epoch: stage weight_file: saved weight files, empty weight_file will save all files + + 1. save weight file for each epoch. + 2. save weight file for last.pt, best.pt and other config file + 3. save weight file for best.onnx, no valid map50, attach to stage f"{model}_last_and_best" """ model = cfg.param.model # use `rw.write_training_result` to save training result @@ -246,63 +263,38 @@ def write_ymir_training_result(cfg: edict, files = [osp.basename(f) for f in glob.glob(osp.join(cfg.ymir.output.models_dir, '*')) if not f.endswith('.pt')] + ['last.pt', 'best.pt'] + training_result_file = cfg.ymir.output.training_result_file + if osp.exists(training_result_file): + with open(cfg.ymir.output.training_result_file, 'r') as f: + training_result = yaml.safe_load(stream=f) + + map50 = max(training_result.get('map',0.0), map50) rw.write_model_stage(stage_name=f"{model}_last_and_best", files=files, mAP=float(map50)) return 0 -def write_training_result(model: List[str], map: float, class_aps: Dict[str, float], **kwargs: dict) -> None: +def write_ancient_ymir_training_result(cfg: edict, map50: float) -> None: """ for 1.0.0 <= ymir <=1.1.0 """ - training_result = { - 'model': model, - 'map': map, - 'class_aps': class_aps, - } - training_result.update(kwargs) + + files = [osp.basename(f) for f in glob.glob(osp.join(cfg.ymir.output.models_dir, '*'))] + training_result_file = cfg.ymir.output.training_result_file + if osp.exists(training_result_file): + with open(cfg.ymir.output.training_result_file, 'r') as f: + training_result = yaml.safe_load(stream=f) + + training_result['model'] = files + training_result['map'] = max(training_result.get('map', 0), map50) + else: + training_result = { + 'model': files, + 'map': map50, + 'stage_name': f'{cfg.param.model}' + } env_config = env.get_current_env() with open(env_config.output.training_result_file, 'w') as f: yaml.safe_dump(training_result, f) - - -def write_old_ymir_training_result(cfg: edict, results: Tuple, maps: NDArray, rewrite=False) -> int: - """ - for 1.0.0 <= ymir <=1.1.0 - cfg: ymir config - results: (mp, mr, map50, map, loss) - maps: map@0.5:0.95 for all classes - rewrite: set true to ensure write the best result - """ - - if not rewrite: - training_result_file = cfg.ymir.output.training_result_file - if osp.exists(training_result_file): - with open(cfg.ymir.output.training_result_file, 'r') as f: - training_result = yaml.safe_load(stream=f) - - files = [osp.basename(f) for f in glob.glob(osp.join(cfg.ymir.output.models_dir, '*'))] - - training_result['model_names'] = files + ['best.onnx'] - write_training_result(**training_result) - - return 0 - - class_names = cfg.param.class_names - mp = results[0] # mean of precision - mr = results[1] # mean of recall - map50 = results[2] # mean of ap@0.5 - map = results[3] # mean of ap@0.5:0.95 - - files = [osp.basename(f) for f in glob.glob(osp.join(cfg.ymir.output.models_dir, '*'))] - # use `rw.write_training_result` to save training result - write_training_result(model=files + ['best.onnx'], - map=float(map), - map50=float(map50), - precision=float(mp), - recall=float(mr), - class_aps={class_name: v - for class_name, v in zip(class_names, maps.tolist())}) - return 0 From 4e36ca567792b147dcef91e0231d3c26ca5d3419 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 22 Jul 2022 11:55:04 +0800 Subject: [PATCH 085/204] remove s --- det-yolov5-tmi/train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/det-yolov5-tmi/train.py b/det-yolov5-tmi/train.py index 513c25b..d28fdb8 100644 --- a/det-yolov5-tmi/train.py +++ b/det-yolov5-tmi/train.py @@ -426,7 +426,7 @@ def lf(x): return (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear if RANK == -1 and stopper(epoch=epoch, fitness=fi): break - # Stop DDP TODO: known issues shttps://github.com/ultralytics/yolov5/pull/4576 + # Stop DDP TODO: known issues https://github.com/ultralytics/yolov5/pull/4576 # stop = stopper(epoch=epoch, fitness=fi) # if RANK == 0: # dist.broadcast_object_list([stop], 0) # broadcast 'stop' to all ranks From 7410409658c58744e9282ce8613dbf539806e55d Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 22 Jul 2022 12:10:23 +0800 Subject: [PATCH 086/204] use _ to help coder --- det-mmdetection-tmi/mmdet/utils/util_ymir.py | 11 ++++++----- det-mmdetection-tmi/tools/train.py | 4 ++-- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index e8819b3..aac1df8 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -68,8 +68,9 @@ def get_merged_config() -> edict: return merged_cfg -def modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: +def _modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: """ + useful for training process - modify dataset config - modify model output channel - modify epochs, checkpoint, tensorboard config @@ -170,12 +171,12 @@ def get_weight_file(cfg: edict) -> str: def write_ymir_training_result(last: bool = False, key_score: Optional[float] = None): YMIR_VERSION = os.environ.get('YMIR_VERSION', '1.2.0') if Version(YMIR_VERSION) >= Version('1.2.0'): - write_latest_ymir_training_result(last, key_score) + _write_latest_ymir_training_result(last, key_score) else: - write_ancient_ymir_training_result(key_score) + _write_ancient_ymir_training_result(key_score) -def write_latest_ymir_training_result(last: bool = False, key_score: Optional[float] = None): +def _write_latest_ymir_training_result(last: bool = False, key_score: Optional[float] = None): if key_score: logging.info(f'key_score is {key_score}') COCO_EVAL_TMP_FILE = os.getenv('COCO_EVAL_TMP_FILE') @@ -226,7 +227,7 @@ def write_latest_ymir_training_result(last: bool = False, key_score: Optional[fl stage_name=stage_name) -def write_ancient_ymir_training_result(key_score: Optional[float] = None): +def _write_ancient_ymir_training_result(key_score: Optional[float] = None): if key_score: logging.info(f'key_score is {key_score}') diff --git a/det-mmdetection-tmi/tools/train.py b/det-mmdetection-tmi/tools/train.py index 74121ff..b3b6d65 100644 --- a/det-mmdetection-tmi/tools/train.py +++ b/det-mmdetection-tmi/tools/train.py @@ -17,7 +17,7 @@ from mmdet.datasets import build_dataset from mmdet.models import build_detector from mmdet.utils import collect_env, get_root_logger, setup_multi_processes -from mmdet.utils.util_ymir import modify_mmdet_config, get_merged_config +from mmdet.utils.util_ymir import _modify_mmdet_config, get_merged_config def parse_args(): @@ -101,7 +101,7 @@ def main(): cfg = Config.fromfile(args.config) print(cfg) # modify mmdet config from file - cfg = modify_mmdet_config(mmdet_cfg=cfg, ymir_cfg=ymir_cfg) + cfg = _modify_mmdet_config(mmdet_cfg=cfg, ymir_cfg=ymir_cfg) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) From 7c667d5778862de3f33e111f303a85adabd6aaa6 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 22 Jul 2022 12:11:59 +0800 Subject: [PATCH 087/204] use _ to help user --- det-yolov5-tmi/utils/ymir_yolov5.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/det-yolov5-tmi/utils/ymir_yolov5.py b/det-yolov5-tmi/utils/ymir_yolov5.py index 6f16c2c..be78660 100644 --- a/det-yolov5-tmi/utils/ymir_yolov5.py +++ b/det-yolov5-tmi/utils/ymir_yolov5.py @@ -232,12 +232,12 @@ def write_ymir_training_result(cfg: edict, weight_file: str = "") -> int: YMIR_VERSION = os.getenv('YMIR_VERSION', '1.2.0') if Version(YMIR_VERSION) >= Version('1.2.0'): - write_latest_ymir_training_result(cfg, map50, epoch, weight_file) + _write_latest_ymir_training_result(cfg, map50, epoch, weight_file) else: - write_ancient_ymir_training_result(cfg, map50) + _write_ancient_ymir_training_result(cfg, map50) -def write_latest_ymir_training_result(cfg: edict, +def _write_latest_ymir_training_result(cfg: edict, map50: float, epoch: int, weight_file: str) -> int: @@ -275,7 +275,7 @@ def write_latest_ymir_training_result(cfg: edict, return 0 -def write_ancient_ymir_training_result(cfg: edict, map50: float) -> None: +def _write_ancient_ymir_training_result(cfg: edict, map50: float) -> None: """ for 1.0.0 <= ymir <=1.1.0 """ From 9a2c5449ce3f93f11ba312442a25111286efee72 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 27 Jul 2022 14:34:04 +0800 Subject: [PATCH 088/204] update master --- README.MD | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/README.MD b/README.MD index 3618622..72e60b0 100644 --- a/README.MD +++ b/README.MD @@ -1,5 +1,13 @@ # ymir-executor 使用文档 +## ymir-1.0.0 official image + +- yolov4 + +- yolov5 + +- mmdetection + ## det-yolov4-training - yolov4的训练镜像,采用mxnet与darknet框架,默认的 `Dockerfile` cuda版本为`10.1`,无法直接在高版本显卡如GTX3080/GTX3090上运行,需要修改dockerfile将cuda版本提升为11.1以上,参考 `cuda112.dockerfile` 进行构建。 From 87e18b3d50f63720964f411a3dca58107be99937 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 28 Jul 2022 15:08:57 +0800 Subject: [PATCH 089/204] update --- README.MD | 25 +- det-mmdetection-tmi/README.md | 342 ++-------------------------- det-mmdetection-tmi/README_mmdet.md | 329 ++++++++++++++++++++++++++ det-mmdetection-tmi/README_ymir.md | 25 -- det-yolov5-tmi/README.md | 6 + 5 files changed, 373 insertions(+), 354 deletions(-) create mode 100644 det-mmdetection-tmi/README_mmdet.md delete mode 100644 det-mmdetection-tmi/README_ymir.md diff --git a/README.MD b/README.MD index 72e60b0..3d7f8e4 100644 --- a/README.MD +++ b/README.MD @@ -1,6 +1,8 @@ # ymir-executor 使用文档 -## ymir-1.0.0 official image +- [ymir](https://github.com/IndustryEssentials/ymir) + +## ymir-1.1.0 official image - yolov4 @@ -8,9 +10,17 @@ - mmdetection +- [detectron2](https://github.com/yzbx/ymir-detectron2) + + - ymir1.0.0的镜像与ymir1.1.0兼容 + + ``` + docker pull youdaoyzbx/ymir-executor:ymir1.0.0-detectron2-tmi + ``` + ## det-yolov4-training -- yolov4的训练镜像,采用mxnet与darknet框架,默认的 `Dockerfile` cuda版本为`10.1`,无法直接在高版本显卡如GTX3080/GTX3090上运行,需要修改dockerfile将cuda版本提升为11.1以上,参考 `cuda112.dockerfile` 进行构建。 +- yolov4的训练镜像,采用mxnet与darknet框架,默认的 `Dockerfile` cuda版本为`10.1`,无法在高版本显卡如GTX3080/GTX3090上运行,需要修改dockerfile将cuda版本提升为11.1以上,参考 `cuda112.dockerfile` 进行构建。 ``` cd det-yolov4-training @@ -35,7 +45,7 @@ docker build -t ymir-executor/yolov4:cuda112-mi -f cuda112.dockerfile . ## det-yolov5-tmi -- [修改说明](./det-yolov5-tmi/README_yolov5.md) +- [change log](./det-yolov5-tmi/README.md) - yolov5训练、挖掘及推理镜像,镜像构建时会从github上下载权重, 如果访问github不稳定, 建议提前将模型权重下载并在构建时复制到镜像中. @@ -62,7 +72,7 @@ docker build -t ymir-executor/live-code:mxnet-tmi -f mxnet.dockerfile ## det-mmdetection-tmi -- [修改说明](./det-mmdetection-tmi/README_ymir.md) +- [change log](./det-mmdetection-tmi/README.md) ``` cd det-mmdetection-tmi @@ -71,11 +81,12 @@ docker build -t youdaoyzbx/ymir-executor:ymir1.1.0-mmdet-cu102-tmi -f docker/Doc docker build -t youdaoyzbx/ymir-executor:ymir1.1.0-mmdet-cu111-tmi -f docker/Dockerfile.cuda111 --build-arg SERVER_MODE=dev --build-arg YMIR=1.1.0 . ``` - ## 如何制作自己的ymir-executor - [ymir-executor 制作指南](https://github.com/IndustryEssentials/ymir/blob/dev/docs/ymir-dataset-zh-CN.md) +- [ymir-executor-sdk](https://github.com/yzbx/ymir-executor-sdk) ymir镜像开发辅助库 + ## 如何导入预训练模型 - [如何导入外部模型](https://github.com/IndustryEssentials/ymir/blob/dev/docs/import-extra-models.md) @@ -101,9 +112,9 @@ docker build -t youdaoyzbx/ymir-executor:ymir1.1.0-mmdet-cu111-tmi -f docker/Doc - 回到项目根目录或docker file对应根目录,确保docker file 中`COPY/ADD`的文件与文件夹能够访问,以yolov5为例. ``` - cd ymir-executor + cd ymir-executor/det-yolov5-tmi - docker build -t ymir-executor/yolov5 . -f det-yolov5-tmi/cuda111.dockerfile + docker build -t ymir-executor/yolov5:cuda111 . -f cuda111.dockerfile --build-arg SERVER_MODE=dev ``` ## 镜像运行完`/in`与`/out`目录中的文件被清理 diff --git a/det-mmdetection-tmi/README.md b/det-mmdetection-tmi/README.md index c1d63cc..b2ed690 100644 --- a/det-mmdetection-tmi/README.md +++ b/det-mmdetection-tmi/README.md @@ -1,329 +1,27 @@ -
- -
 
-
- OpenMMLab website - - - HOT - - -      - OpenMMLab platform - - - TRY IT OUT - - -
-
 
+# det-mmdetection-tmi -[![PyPI](https://img.shields.io/pypi/v/mmdet)](https://pypi.org/project/mmdet) -[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmdetection.readthedocs.io/en/latest/) -[![badge](https://github.com/open-mmlab/mmdetection/workflows/build/badge.svg)](https://github.com/open-mmlab/mmdetection/actions) -[![codecov](https://codecov.io/gh/open-mmlab/mmdetection/branch/master/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmdetection) -[![license](https://img.shields.io/github/license/open-mmlab/mmdetection.svg)](https://github.com/open-mmlab/mmdetection/blob/master/LICENSE) -[![open issues](https://isitmaintained.com/badge/open/open-mmlab/mmdetection.svg)](https://github.com/open-mmlab/mmdetection/issues) +- [mmdetection](./README_mmdet.md) - +`mmdetection` framework for object `det`ection `t`raining/`m`ining/`i`nfer task -[📘Documentation](https://mmdetection.readthedocs.io/en/v2.21.0/) | -[🛠️Installation](https://mmdetection.readthedocs.io/en/v2.21.0/get_started.html) | -[👀Model Zoo](https://mmdetection.readthedocs.io/en/v2.21.0/model_zoo.html) | -[🆕Update News](https://mmdetection.readthedocs.io/en/v2.21.0/changelog.html) | -[🚀Ongoing Projects](https://github.com/open-mmlab/mmdetection/projects) | -[🤔Reporting Issues](https://github.com/open-mmlab/mmdetection/issues/new/choose) - -
- -## Introduction - -English | [简体中文](README_zh-CN.md) - -MMDetection is an open source object detection toolbox based on PyTorch. It is -a part of the [OpenMMLab](https://openmmlab.com/) project. - -The master branch works with **PyTorch 1.5+**. - -
-Major features - -- **Modular Design** - - We decompose the detection framework into different components and one can easily construct a customized object detection framework by combining different modules. - -- **Support of multiple frameworks out of box** - - The toolbox directly supports popular and contemporary detection frameworks, *e.g.* Faster RCNN, Mask RCNN, RetinaNet, etc. - -- **High efficiency** - - All basic bbox and mask operations run on GPUs. The training speed is faster than or comparable to other codebases, including [Detectron2](https://github.com/facebookresearch/detectron2), [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark) and [SimpleDet](https://github.com/TuSimple/simpledet). - -- **State of the art** - - The toolbox stems from the codebase developed by the *MMDet* team, who won [COCO Detection Challenge](http://cocodataset.org/#detection-leaderboard) in 2018, and we keep pushing it forward. - -
- -Apart from MMDetection, we also released a library [mmcv](https://github.com/open-mmlab/mmcv) for computer vision research, which is heavily depended on by this toolbox. - -## License - -This project is released under the [Apache 2.0 license](LICENSE). - -## Changelog - -**2.22.0** was released in 24/2/2022: - -- Support [MaskFormer](configs/maskformer), [DyHead](configs/dyhead), [OpenImages Dataset](configs/openimages) and [TIMM backbone](configs/timm_example) -- Support visualization for Panoptic Segmentation -- Release a good recipe of using ResNet in object detectors pre-trained by [ResNet Strikes Back](https://arxiv.org/abs/2110.00476), which consistently brings about 3~4 mAP improvements over RetinaNet, Faster/Mask/Cascade Mask R-CNN - -Please refer to [changelog.md](docs/en/changelog.md) for details and release history. - -For compatibility changes between different versions of MMDetection, please refer to [compatibility.md](docs/en/compatibility.md). - -## Overview of Benchmark and Model Zoo - -Results and models are available in the [model zoo](docs/en/model_zoo.md). - -
- Architectures -
- - - - - - - - - - - - - - - - - -
- Object Detection - - Instance Segmentation - - Panoptic Segmentation - - Other -
- - - - - - - -
  • Contrastive Learning
  • - - -
  • Distillation
  • - - -
    - -
    - Components -
    - - - - - - - - - - - - - - - - - -
    - Backbones - - Necks - - Loss - - Common -
    - - - - - - - -
    - -Some other methods are also supported in [projects using MMDetection](./docs/en/projects.md). - -## Installation - -Please refer to [get_started.md](docs/en/get_started.md) for installation. - -## Getting Started - -Please see [get_started.md](docs/en/get_started.md) for the basic usage of MMDetection. -We provide [colab tutorial](demo/MMDet_Tutorial.ipynb), and full guidance for quick run [with existing dataset](docs/en/1_exist_data_model.md) and [with new dataset](docs/en/2_new_data_model.md) for beginners. -There are also tutorials for [finetuning models](docs/en/tutorials/finetune.md), [adding new dataset](docs/en/tutorials/customize_dataset.md), [designing data pipeline](docs/en/tutorials/data_pipeline.md), [customizing models](docs/en/tutorials/customize_models.md), [customizing runtime settings](docs/en/tutorials/customize_runtime.md) and [useful tools](docs/en/useful_tools.md). - -Please refer to [FAQ](docs/en/faq.md) for frequently asked questions. - -## Contributing - -We appreciate all contributions to improve MMDetection. Ongoing projects can be found in out [GitHub Projects](https://github.com/open-mmlab/mmdetection/projects). Welcome community users to participate in these projects. Please refer to [CONTRIBUTING.md](.github/CONTRIBUTING.md) for the contributing guideline. - -## Acknowledgement - -MMDetection is an open source project that is contributed by researchers and engineers from various colleges and companies. We appreciate all the contributors who implement their methods or add new features, as well as users who give valuable feedbacks. -We wish that the toolbox and benchmark could serve the growing research community by providing a flexible toolkit to reimplement existing methods and develop their own new detectors. - -## Citation - -If you use this toolbox or benchmark in your research, please cite this project. +# build docker image ``` -@article{mmdetection, - title = {{MMDetection}: Open MMLab Detection Toolbox and Benchmark}, - author = {Chen, Kai and Wang, Jiaqi and Pang, Jiangmiao and Cao, Yuhang and - Xiong, Yu and Li, Xiaoxiao and Sun, Shuyang and Feng, Wansen and - Liu, Ziwei and Xu, Jiarui and Zhang, Zheng and Cheng, Dazhi and - Zhu, Chenchen and Cheng, Tianheng and Zhao, Qijie and Li, Buyu and - Lu, Xin and Zhu, Rui and Wu, Yue and Dai, Jifeng and Wang, Jingdong - and Shi, Jianping and Ouyang, Wanli and Loy, Chen Change and Lin, Dahua}, - journal= {arXiv preprint arXiv:1906.07155}, - year={2019} -} -``` +docker build -t ymir-executor/mmdet:cuda102-tmi --build-arg SERVER_MODE=dev --build-arg YMIR=1.1.0 -f docker/Dockerfile.cuda102 . -## Projects in OpenMMLab +docker build -t ymir-executor/mmdet:cuda111-tmi --build-arg SERVER_MODE=dev --build-arg YMIR=1.1.0 -f docker/Dockerfile.cuda111 . +``` -- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab foundational library for computer vision. -- [MIM](https://github.com/open-mmlab/mim): MIM installs OpenMMLab packages. -- [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab image classification toolbox and benchmark. -- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab detection toolbox and benchmark. -- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection. -- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab rotated object detection toolbox and benchmark. -- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab semantic segmentation toolbox and benchmark. -- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab text detection, recognition, and understanding toolbox. -- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark. -- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 3D human parametric model toolbox and benchmark. -- [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab self-supervised learning toolbox and benchmark. -- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab model compression toolbox and benchmark. -- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab fewshot learning toolbox and benchmark. -- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab's next-generation action understanding toolbox and benchmark. -- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab video perception toolbox and benchmark. -- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab optical flow toolbox and benchmark. -- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab image and video editing toolbox. -- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab image and video generative models toolbox. -- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab model deployment framework. +# changelog +- modify `mmdet/datasets/coco.py`, save the evaluation result to `os.environ.get('COCO_EVAL_TMP_FILE')` with json format +- modify `mmdet/core/evaluation/eval_hooks.py`, write training result file and monitor task process +- modify `mmdet/datasets/__init__.py, mmdet/datasets/coco.py` and add `mmdet/datasets/ymir.py`, add class `YmirDataset` to load YMIR dataset. +- modify `requirements/runtime.txt` to add new dependent package. +- add `mmdet/utils/util_ymir.py` for ymir training/infer/mining +- add `ymir_infer.py` for infer +- add `ymir_mining.py` for mining +- add `ymir_train.py` modify `tools/train.py` to update the mmcv config for training +- add `start.py`, the entrypoint for docker image +- add `training-template.yaml, infer-template.yaml, mining-template.yaml` for ymir pre-defined hyper-parameters. +- add `docker/Dockerfile.cuda102, docker/Dockerfile.cuda111` to build docker image +- remove `docker/Dockerfile` to avoid misuse diff --git a/det-mmdetection-tmi/README_mmdet.md b/det-mmdetection-tmi/README_mmdet.md new file mode 100644 index 0000000..c1d63cc --- /dev/null +++ b/det-mmdetection-tmi/README_mmdet.md @@ -0,0 +1,329 @@ +
    + +
     
    +
    + OpenMMLab website + + + HOT + + +      + OpenMMLab platform + + + TRY IT OUT + + +
    +
     
    + +[![PyPI](https://img.shields.io/pypi/v/mmdet)](https://pypi.org/project/mmdet) +[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmdetection.readthedocs.io/en/latest/) +[![badge](https://github.com/open-mmlab/mmdetection/workflows/build/badge.svg)](https://github.com/open-mmlab/mmdetection/actions) +[![codecov](https://codecov.io/gh/open-mmlab/mmdetection/branch/master/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmdetection) +[![license](https://img.shields.io/github/license/open-mmlab/mmdetection.svg)](https://github.com/open-mmlab/mmdetection/blob/master/LICENSE) +[![open issues](https://isitmaintained.com/badge/open/open-mmlab/mmdetection.svg)](https://github.com/open-mmlab/mmdetection/issues) + + + +[📘Documentation](https://mmdetection.readthedocs.io/en/v2.21.0/) | +[🛠️Installation](https://mmdetection.readthedocs.io/en/v2.21.0/get_started.html) | +[👀Model Zoo](https://mmdetection.readthedocs.io/en/v2.21.0/model_zoo.html) | +[🆕Update News](https://mmdetection.readthedocs.io/en/v2.21.0/changelog.html) | +[🚀Ongoing Projects](https://github.com/open-mmlab/mmdetection/projects) | +[🤔Reporting Issues](https://github.com/open-mmlab/mmdetection/issues/new/choose) + +
    + +## Introduction + +English | [简体中文](README_zh-CN.md) + +MMDetection is an open source object detection toolbox based on PyTorch. It is +a part of the [OpenMMLab](https://openmmlab.com/) project. + +The master branch works with **PyTorch 1.5+**. + +
    +Major features + +- **Modular Design** + + We decompose the detection framework into different components and one can easily construct a customized object detection framework by combining different modules. + +- **Support of multiple frameworks out of box** + + The toolbox directly supports popular and contemporary detection frameworks, *e.g.* Faster RCNN, Mask RCNN, RetinaNet, etc. + +- **High efficiency** + + All basic bbox and mask operations run on GPUs. The training speed is faster than or comparable to other codebases, including [Detectron2](https://github.com/facebookresearch/detectron2), [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark) and [SimpleDet](https://github.com/TuSimple/simpledet). + +- **State of the art** + + The toolbox stems from the codebase developed by the *MMDet* team, who won [COCO Detection Challenge](http://cocodataset.org/#detection-leaderboard) in 2018, and we keep pushing it forward. + +
    + +Apart from MMDetection, we also released a library [mmcv](https://github.com/open-mmlab/mmcv) for computer vision research, which is heavily depended on by this toolbox. + +## License + +This project is released under the [Apache 2.0 license](LICENSE). + +## Changelog + +**2.22.0** was released in 24/2/2022: + +- Support [MaskFormer](configs/maskformer), [DyHead](configs/dyhead), [OpenImages Dataset](configs/openimages) and [TIMM backbone](configs/timm_example) +- Support visualization for Panoptic Segmentation +- Release a good recipe of using ResNet in object detectors pre-trained by [ResNet Strikes Back](https://arxiv.org/abs/2110.00476), which consistently brings about 3~4 mAP improvements over RetinaNet, Faster/Mask/Cascade Mask R-CNN + +Please refer to [changelog.md](docs/en/changelog.md) for details and release history. + +For compatibility changes between different versions of MMDetection, please refer to [compatibility.md](docs/en/compatibility.md). + +## Overview of Benchmark and Model Zoo + +Results and models are available in the [model zoo](docs/en/model_zoo.md). + +
    + Architectures +
    + + + + + + + + + + + + + + + + + +
    + Object Detection + + Instance Segmentation + + Panoptic Segmentation + + Other +
    + + + + + + + +
  • Contrastive Learning
  • + + +
  • Distillation
  • + + +
    + +
    + Components +
    + + + + + + + + + + + + + + + + + +
    + Backbones + + Necks + + Loss + + Common +
    + + + + + + + +
    + +Some other methods are also supported in [projects using MMDetection](./docs/en/projects.md). + +## Installation + +Please refer to [get_started.md](docs/en/get_started.md) for installation. + +## Getting Started + +Please see [get_started.md](docs/en/get_started.md) for the basic usage of MMDetection. +We provide [colab tutorial](demo/MMDet_Tutorial.ipynb), and full guidance for quick run [with existing dataset](docs/en/1_exist_data_model.md) and [with new dataset](docs/en/2_new_data_model.md) for beginners. +There are also tutorials for [finetuning models](docs/en/tutorials/finetune.md), [adding new dataset](docs/en/tutorials/customize_dataset.md), [designing data pipeline](docs/en/tutorials/data_pipeline.md), [customizing models](docs/en/tutorials/customize_models.md), [customizing runtime settings](docs/en/tutorials/customize_runtime.md) and [useful tools](docs/en/useful_tools.md). + +Please refer to [FAQ](docs/en/faq.md) for frequently asked questions. + +## Contributing + +We appreciate all contributions to improve MMDetection. Ongoing projects can be found in out [GitHub Projects](https://github.com/open-mmlab/mmdetection/projects). Welcome community users to participate in these projects. Please refer to [CONTRIBUTING.md](.github/CONTRIBUTING.md) for the contributing guideline. + +## Acknowledgement + +MMDetection is an open source project that is contributed by researchers and engineers from various colleges and companies. We appreciate all the contributors who implement their methods or add new features, as well as users who give valuable feedbacks. +We wish that the toolbox and benchmark could serve the growing research community by providing a flexible toolkit to reimplement existing methods and develop their own new detectors. + +## Citation + +If you use this toolbox or benchmark in your research, please cite this project. + +``` +@article{mmdetection, + title = {{MMDetection}: Open MMLab Detection Toolbox and Benchmark}, + author = {Chen, Kai and Wang, Jiaqi and Pang, Jiangmiao and Cao, Yuhang and + Xiong, Yu and Li, Xiaoxiao and Sun, Shuyang and Feng, Wansen and + Liu, Ziwei and Xu, Jiarui and Zhang, Zheng and Cheng, Dazhi and + Zhu, Chenchen and Cheng, Tianheng and Zhao, Qijie and Li, Buyu and + Lu, Xin and Zhu, Rui and Wu, Yue and Dai, Jifeng and Wang, Jingdong + and Shi, Jianping and Ouyang, Wanli and Loy, Chen Change and Lin, Dahua}, + journal= {arXiv preprint arXiv:1906.07155}, + year={2019} +} +``` + +## Projects in OpenMMLab + +- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab foundational library for computer vision. +- [MIM](https://github.com/open-mmlab/mim): MIM installs OpenMMLab packages. +- [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab image classification toolbox and benchmark. +- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab detection toolbox and benchmark. +- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection. +- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab rotated object detection toolbox and benchmark. +- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab semantic segmentation toolbox and benchmark. +- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab text detection, recognition, and understanding toolbox. +- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark. +- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 3D human parametric model toolbox and benchmark. +- [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab self-supervised learning toolbox and benchmark. +- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab model compression toolbox and benchmark. +- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab fewshot learning toolbox and benchmark. +- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab's next-generation action understanding toolbox and benchmark. +- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab video perception toolbox and benchmark. +- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab optical flow toolbox and benchmark. +- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab image and video editing toolbox. +- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab image and video generative models toolbox. +- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab model deployment framework. diff --git a/det-mmdetection-tmi/README_ymir.md b/det-mmdetection-tmi/README_ymir.md deleted file mode 100644 index 1281e7f..0000000 --- a/det-mmdetection-tmi/README_ymir.md +++ /dev/null @@ -1,25 +0,0 @@ -# det-mmdetection-tmi - -`mmdetection` framework for object `det`ection `t`raining/`m`ining/`i`nfer task - -# build docker image - -``` -docker build -t ymir-executor/mmdet:cuda102-tmi -build-arg SERVER_MODE=dev -f docker/Dockerfile.cuda102 . - -docker build -t ymir-executor/mmdet:cuda111-tmi -build-arg SERVER_MODE=dev -f docker/Dockerfile.cuda111 . -``` - -# changelog -- modify `mmdet/datasets/coco.py`, save the evaluation result to `os.environ.get('COCO_EVAL_TMP_FILE')` with json format -- modify `mmdet/core/evaluation/eval_hooks.py`, write training result file and monitor task process -- modify `mmdet/datasets/__init__.py, mmdet/datasets/coco.py` and add `mmdet/datasets/ymir.py`, add class `YmirDataset` to load YMIR dataset. -- modify `requirements/runtime.txt` to add new dependent package. -- add `mmdet/utils/util_ymir.py` for ymir training/infer/mining -- add `ymir_infer.py` for infer -- add `ymir_mining.py` for mining -- add `ymir_train.py` modify `tools/train.py` to update the mmcv config for training -- add `start.py`, the entrypoint for docker image -- add `training-template.yaml, infer-template.yaml, mining-template.yaml` for ymir pre-defined hyper-parameters. -- add `docker/Dockerfile.cuda102, docker/Dockerfile.cuda111` to build docker image -- remove `docker/Dockerfile` to avoid misuse diff --git a/det-yolov5-tmi/README.md b/det-yolov5-tmi/README.md index fba577d..520d78c 100644 --- a/det-yolov5-tmi/README.md +++ b/det-yolov5-tmi/README.md @@ -1,6 +1,12 @@ # yolov5-ymir readme - [yolov5 readme](./README_yolov5.md) +``` +docker build -t ymir/ymir-executor:ymir1.1.0-cuda102-yolov5-tmi --build-arg SERVER_MODE=dev --build-arg YMIR=1.1.0 -f cuda102.dockerfile . + +docker build -t ymir/ymir-executor:ymir1.1.0-cuda111-yolov5-tmi --build-arg SERVER_MODE=dev --build-arg YMIR=1.1.0 -f cuda111.dockerfile . +``` + ## change log - add `start.py` and `utils/ymir_yolov5.py` for train/infer/mining From b13264e64caec1a098ae804f844e1445709925d6 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 2 Aug 2022 17:55:11 +0800 Subject: [PATCH 090/204] update readme --- README.MD | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/README.MD b/README.MD index 3d7f8e4..5b8bc04 100644 --- a/README.MD +++ b/README.MD @@ -2,22 +2,31 @@ - [ymir](https://github.com/IndustryEssentials/ymir) -## ymir-1.1.0 official image +## ymir-1.1.0 official image -- yolov4 +- yolov4 -- yolov5 +- yolov5 -- mmdetection +- mmdetection - [detectron2](https://github.com/yzbx/ymir-detectron2) - + + - [change log](https://github.com/yzbx/ymir-detectron2/blob/master/README.md) + - ymir1.0.0的镜像与ymir1.1.0兼容 ``` docker pull youdaoyzbx/ymir-executor:ymir1.0.0-detectron2-tmi ``` +- [yolov7](https://github.com/yzbx/ymir-yolov7) + + - [change log](https://github.com/yzbx/ymir-yolov7/blob/main/ymir/README.md) + ``` + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov7-cu111-tmi + ``` + ## det-yolov4-training - yolov4的训练镜像,采用mxnet与darknet框架,默认的 `Dockerfile` cuda版本为`10.1`,无法在高版本显卡如GTX3080/GTX3090上运行,需要修改dockerfile将cuda版本提升为11.1以上,参考 `cuda112.dockerfile` 进行构建。 @@ -114,7 +123,7 @@ docker build -t youdaoyzbx/ymir-executor:ymir1.1.0-mmdet-cu111-tmi -f docker/Doc ``` cd ymir-executor/det-yolov5-tmi - docker build -t ymir-executor/yolov5:cuda111 . -f cuda111.dockerfile --build-arg SERVER_MODE=dev + docker build -t ymir-executor/yolov5:cuda111 . -f cuda111.dockerfile --build-arg SERVER_MODE=dev ``` ## 镜像运行完`/in`与`/out`目录中的文件被清理 From aa6e44503d7914bda35dea5aef13ebe81dea079c Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 2 Aug 2022 19:27:05 +0800 Subject: [PATCH 091/204] merge yolov4 training and mining --- README.MD | 18 ++++++++++--- det-yolov4-mining/Dockerfile | 20 --------------- det-yolov4-mining/cuda112.dockerfile | 15 ----------- .../.circleci/config.yml | 0 .../.travis.yml | 0 .../3rdparty/pthreads/bin/pthreadGC2.dll | Bin .../3rdparty/pthreads/bin/pthreadVC2.dll | Bin .../3rdparty/pthreads/include/pthread.h | 0 .../3rdparty/pthreads/include/sched.h | 0 .../3rdparty/pthreads/include/semaphore.h | 0 .../3rdparty/pthreads/lib/libpthreadGC2.a | Bin .../3rdparty/pthreads/lib/pthreadVC2.lib | Bin .../3rdparty/stb/include/stb_image.h | 0 .../3rdparty/stb/include/stb_image_write.h | 0 .../CMakeLists.txt | 0 .../DarknetConfig.cmake.in | 0 .../LICENSE | 0 .../Makefile | 0 .../README.md | 0 .../build.ps1 | 0 .../calc_map.sh | 0 .../cfg/9k.labels | 0 .../cfg/9k.names | 0 .../cfg/9k.tree | 0 .../cfg/Gaussian_yolov3_BDD.cfg | 0 .../cfg/alexnet.cfg | 0 .../cfg/cd53paspp-gamma.cfg | 0 .../cfg/cifar.cfg | 0 .../cfg/cifar.test.cfg | 0 .../cfg/coco.data | 0 .../cfg/coco.names | 0 .../cfg/coco9k.map | 0 .../cfg/combine9k.data | 0 .../cfg/crnn.train.cfg | 0 .../cfg/csdarknet53-omega.cfg | 0 .../cfg/cspx-p7-mish-omega.cfg | 0 .../cfg/cspx-p7-mish.cfg | 0 .../cfg/cspx-p7-mish_hp.cfg | 0 ...csresnext50-panet-spp-original-optimal.cfg | 0 .../cfg/csresnext50-panet-spp.cfg | 0 .../cfg/darknet.cfg | 0 .../cfg/darknet19.cfg | 0 .../cfg/darknet19_448.cfg | 0 .../cfg/darknet53.cfg | 0 .../cfg/darknet53_448_xnor.cfg | 0 .../cfg/densenet201.cfg | 0 .../cfg/efficientnet-lite3.cfg | 0 .../cfg/efficientnet_b0.cfg | 0 .../cfg/enet-coco.cfg | 0 .../cfg/extraction.cfg | 0 .../cfg/extraction.conv.cfg | 0 .../cfg/extraction22k.cfg | 0 .../cfg/go.test.cfg | 0 .../cfg/gru.cfg | 0 .../cfg/imagenet.labels.list | 0 .../cfg/imagenet.shortnames.list | 0 .../cfg/imagenet1k.data | 0 .../cfg/imagenet22k.dataset | 0 .../cfg/imagenet9k.hierarchy.dataset | 0 .../cfg/inet9k.map | 0 .../cfg/jnet-conv.cfg | 0 .../cfg/lstm.train.cfg | 0 .../cfg/openimages.data | 0 .../cfg/resnet101.cfg | 0 .../cfg/resnet152.cfg | 0 .../cfg/resnet152_trident.cfg | 0 .../cfg/resnet50.cfg | 0 .../cfg/resnext152-32x4d.cfg | 0 .../cfg/rnn.cfg | 0 .../cfg/rnn.train.cfg | 0 .../cfg/strided.cfg | 0 .../cfg/t1.test.cfg | 0 .../cfg/tiny-yolo-voc.cfg | 0 .../cfg/tiny-yolo.cfg | 0 .../cfg/tiny-yolo_xnor.cfg | 0 .../cfg/tiny.cfg | 0 .../cfg/vgg-16.cfg | 0 .../cfg/vgg-conv.cfg | 0 .../cfg/voc.data | 0 .../cfg/writing.cfg | 0 .../cfg/yolo-voc.2.0.cfg | 0 .../cfg/yolo-voc.cfg | 0 .../cfg/yolo.2.0.cfg | 0 .../cfg/yolo.cfg | 0 .../cfg/yolo9000.cfg | 0 .../cfg/yolov1/tiny-coco.cfg | 0 .../cfg/yolov1/tiny-yolo.cfg | 0 .../cfg/yolov1/xyolo.test.cfg | 0 .../cfg/yolov1/yolo-coco.cfg | 0 .../cfg/yolov1/yolo-small.cfg | 0 .../cfg/yolov1/yolo.cfg | 0 .../cfg/yolov1/yolo.train.cfg | 0 .../cfg/yolov1/yolo2.cfg | 0 .../cfg/yolov2-tiny-voc.cfg | 0 .../cfg/yolov2-tiny.cfg | 0 .../cfg/yolov2-voc.cfg | 0 .../cfg/yolov2.cfg | 0 .../cfg/yolov3-openimages.cfg | 0 .../cfg/yolov3-spp.cfg | 0 .../cfg/yolov3-tiny-prn.cfg | 0 .../cfg/yolov3-tiny.cfg | 0 .../cfg/yolov3-tiny_3l.cfg | 0 .../cfg/yolov3-tiny_obj.cfg | 0 .../cfg/yolov3-tiny_occlusion_track.cfg | 0 .../cfg/yolov3-tiny_xnor.cfg | 0 .../cfg/yolov3-voc.cfg | 0 .../cfg/yolov3-voc.yolov3-giou-40.cfg | 0 .../cfg/yolov3.cfg | 0 .../cfg/yolov3.coco-giou-12.cfg | 0 .../cfg/yolov3_5l.cfg | 0 .../cfg/yolov4-csp-swish.cfg | 0 .../cfg/yolov4-csp-x-swish-frozen.cfg | 0 .../cfg/yolov4-csp-x-swish.cfg | 0 .../cfg/yolov4-csp.cfg | 0 .../cfg/yolov4-custom.cfg | 0 .../cfg/yolov4-p5-frozen.cfg | 0 .../cfg/yolov4-p5.cfg | 0 .../cfg/yolov4-p6.cfg | 0 .../cfg/yolov4-sam-mish-csp-reorg-bfm.cfg | 0 .../cfg/yolov4-tiny-3l.cfg | 0 .../cfg/yolov4-tiny-custom.cfg | 0 .../cfg/yolov4-tiny.cfg | 0 .../cfg/yolov4-tiny_contrastive.cfg | 0 .../cfg/yolov4.cfg | 0 .../cfg/yolov4_iter1000.cfg | 0 .../cfg/yolov4x-mish.cfg | 0 .../cmake/Modules/FindCUDNN.cmake | 0 .../cmake/Modules/FindPThreads4W.cmake | 0 .../cmake/Modules/FindStb.cmake | 0 .../config_and_train.py | 0 .../convert_label_ark2txt.py | 3 ++- .../convert_model_darknet2mxnet_yolov4.py | 0 .../counters_per_class.txt | 0 .../cuda101.dockerfile | 10 +++++--- .../cuda112.dockerfile | 10 +++++--- .../darknet.py | 0 .../darknet_images.py | 0 .../darknet_video.py | 0 .../data/9k.tree | 0 .../data/coco.names | 0 .../data/coco9k.map | 0 .../data/goal.txt | 0 .../data/imagenet.labels.list | 0 .../data/imagenet.shortnames.list | 0 .../data/labels/make_labels.py | 0 .../data/openimages.names | 0 .../data/voc.names | 0 .../image_yolov3.sh | 0 .../image_yolov4.sh | 0 .../img.txt | 0 .../include/darknet.h | 0 .../include/yolo_v2_class.hpp | 0 .../json_mjpeg_streams.sh | 0 .../make_train_test_darknet.sh | 0 .../mining}/.dockerignore | 0 .../mining}/README.md | 0 .../mining}/active_learning/__init__.py | 0 .../mining}/active_learning/apis/__init__.py | 0 .../mining}/active_learning/apis/al_api.py | 0 .../active_learning/apis/docker_api.py | 0 .../active_learning/dataset/__init__.py | 0 .../active_learning/dataset/datareader.py | 0 .../dataset/labeled_dataset.py | 0 .../dataset/unlabeled_dataset.py | 0 .../model_inference/__init__.py | 0 .../model_inference/centernet.py | 0 .../model_inference/yolo_models.py | 0 .../active_learning/strategy/__init__.py | 0 .../mining}/active_learning/strategy/aldd.py | 0 .../active_learning/strategy/aldd_yolo.py | 0 .../mining}/active_learning/strategy/cald.py | 0 .../active_learning/strategy/data_augment.py | 0 .../strategy/random_strategy.py | 0 .../mining}/active_learning/utils/__init__.py | 0 .../mining}/active_learning/utils/al_log.py | 0 .../mining}/active_learning/utils/operator.py | 0 .../mining}/al_main.py | 0 .../mining}/combined_class.txt | 0 .../mining}/docker_main.py | 15 ++++++----- .../mining}/docker_readme.md | 0 .../mining}/infer-template.yaml | 0 .../mining}/mining-template.yaml | 4 +-- .../mining}/monitor_process.py | 0 .../mining}/start.sh | 0 .../mining}/test_api.py | 0 .../mining}/test_centernet.py | 0 .../mining}/tools/al_strategsy_union.py | 0 .../mining}/tools/imagenet_hard_negative.py | 0 .../mining}/tools/plot_dataset_class_hist.py | 0 .../mining}/tools/visualize_aldd.py | 0 .../mining}/tools/visualize_cald.py | 0 .../mining}/write_result.py | 0 .../net_cam_v3.sh | 0 .../net_cam_v4.sh | 0 .../src/.editorconfig | 0 .../src/activation_kernels.cu | 0 .../src/activation_layer.c | 0 .../src/activation_layer.h | 0 .../src/activations.c | 0 .../src/activations.h | 0 .../src/art.c | 0 .../src/avgpool_layer.c | 0 .../src/avgpool_layer.h | 0 .../src/avgpool_layer_kernels.cu | 0 .../src/batchnorm_layer.c | 0 .../src/batchnorm_layer.h | 0 .../src/blas.c | 0 .../src/blas.h | 0 .../src/blas_kernels.cu | 0 .../src/box.c | 0 .../src/box.h | 0 .../src/captcha.c | 0 .../src/cifar.c | 0 .../src/classifier.c | 0 .../src/classifier.h | 0 .../src/coco.c | 0 .../src/col2im.c | 0 .../src/col2im.h | 0 .../src/col2im_kernels.cu | 0 .../src/compare.c | 0 .../src/connected_layer.c | 0 .../src/connected_layer.h | 0 .../src/conv_lstm_layer.c | 0 .../src/conv_lstm_layer.h | 0 .../src/convolutional_kernels.cu | 0 .../src/convolutional_layer.c | 0 .../src/convolutional_layer.h | 0 .../src/cost_layer.c | 0 .../src/cost_layer.h | 0 .../src/cpu_gemm.c | 0 .../src/crnn_layer.c | 0 .../src/crnn_layer.h | 0 .../src/crop_layer.c | 0 .../src/crop_layer.h | 0 .../src/crop_layer_kernels.cu | 0 .../src/csharp/CMakeLists.txt | 0 .../src/csharp/YoloCSharpWrapper.cs | 0 .../src/dark_cuda.c | 0 .../src/dark_cuda.h | 0 .../src/darknet.c | 0 .../src/darkunistd.h | 0 .../src/data.c | 0 .../src/data.h | 0 .../src/deconvolutional_kernels.cu | 0 .../src/deconvolutional_layer.c | 0 .../src/deconvolutional_layer.h | 0 .../src/demo.c | 0 .../src/demo.h | 0 .../src/detection_layer.c | 0 .../src/detection_layer.h | 0 .../src/detector.c | 0 .../src/dice.c | 0 .../src/dropout_layer.c | 0 .../src/dropout_layer.h | 0 .../src/dropout_layer_kernels.cu | 0 .../src/gaussian_yolo_layer.c | 0 .../src/gaussian_yolo_layer.h | 0 .../src/gemm.c | 0 .../src/gemm.h | 0 .../src/getopt.c | 0 .../src/getopt.h | 0 .../src/gettimeofday.c | 0 .../src/gettimeofday.h | 0 .../src/go.c | 0 .../src/gru_layer.c | 0 .../src/gru_layer.h | 0 .../src/http_stream.cpp | 0 .../src/http_stream.h | 0 .../src/httplib.h | 0 .../src/im2col.c | 0 .../src/im2col.h | 0 .../src/im2col_kernels.cu | 0 .../src/image.c | 0 .../src/image.h | 0 .../src/image_opencv.cpp | 0 .../src/image_opencv.h | 0 .../src/layer.c | 0 .../src/layer.h | 0 .../src/list.c | 0 .../src/list.h | 0 .../src/local_layer.c | 0 .../src/local_layer.h | 0 .../src/lstm_layer.c | 0 .../src/lstm_layer.h | 0 .../src/matrix.c | 0 .../src/matrix.h | 0 .../src/maxpool_layer.c | 0 .../src/maxpool_layer.h | 0 .../src/maxpool_layer_kernels.cu | 0 .../src/network.c | 0 .../src/network.h | 0 .../src/network_kernels.cu | 0 .../src/nightmare.c | 0 .../src/normalization_layer.c | 0 .../src/normalization_layer.h | 0 .../src/option_list.c | 0 .../src/option_list.h | 0 .../src/parser.c | 0 .../src/parser.h | 0 .../src/region_layer.c | 0 .../src/region_layer.h | 0 .../src/reorg_layer.c | 0 .../src/reorg_layer.h | 0 .../src/reorg_old_layer.c | 0 .../src/reorg_old_layer.h | 0 .../src/representation_layer.c | 0 .../src/representation_layer.h | 0 .../src/rnn.c | 0 .../src/rnn_layer.c | 0 .../src/rnn_layer.h | 0 .../src/rnn_vid.c | 0 .../src/route_layer.c | 0 .../src/route_layer.h | 0 .../src/sam_layer.c | 0 .../src/sam_layer.h | 0 .../src/scale_channels_layer.c | 0 .../src/scale_channels_layer.h | 0 .../src/shortcut_layer.c | 0 .../src/shortcut_layer.h | 0 .../src/softmax_layer.c | 0 .../src/softmax_layer.h | 0 .../src/super.c | 0 .../src/swag.c | 0 .../src/tag.c | 0 .../src/tree.c | 0 .../src/tree.h | 0 .../src/upsample_layer.c | 0 .../src/upsample_layer.h | 0 .../src/utils.c | 0 .../src/utils.h | 0 .../src/version.h | 0 .../src/version.h.in | 0 .../src/voxel.c | 0 .../src/writing.c | 0 .../src/yolo.c | 0 .../src/yolo_console_dll.cpp | 0 .../src/yolo_layer.c | 0 .../src/yolo_layer.h | 0 .../src/yolo_v2_class.cpp | 0 det-yolov4-tmi/start.py | 24 ++++++++++++++++++ .../train.sh | 0 .../train_watcher.py | 0 .../train_yolov3.sh | 0 .../training-template.yaml | 2 +- .../video_yolov3.sh | 0 .../video_yolov4.sh | 0 .../warm_up_training.py | 0 347 files changed, 66 insertions(+), 55 deletions(-) delete mode 100644 det-yolov4-mining/Dockerfile delete mode 100644 det-yolov4-mining/cuda112.dockerfile rename {det-yolov4-training => det-yolov4-tmi}/.circleci/config.yml (100%) rename {det-yolov4-training => det-yolov4-tmi}/.travis.yml (100%) rename {det-yolov4-training => det-yolov4-tmi}/3rdparty/pthreads/bin/pthreadGC2.dll (100%) rename {det-yolov4-training => det-yolov4-tmi}/3rdparty/pthreads/bin/pthreadVC2.dll (100%) rename {det-yolov4-training => det-yolov4-tmi}/3rdparty/pthreads/include/pthread.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/3rdparty/pthreads/include/sched.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/3rdparty/pthreads/include/semaphore.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/3rdparty/pthreads/lib/libpthreadGC2.a (100%) rename {det-yolov4-training => det-yolov4-tmi}/3rdparty/pthreads/lib/pthreadVC2.lib (100%) rename {det-yolov4-training => det-yolov4-tmi}/3rdparty/stb/include/stb_image.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/3rdparty/stb/include/stb_image_write.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/CMakeLists.txt (100%) rename {det-yolov4-training => det-yolov4-tmi}/DarknetConfig.cmake.in (100%) rename {det-yolov4-training => det-yolov4-tmi}/LICENSE (100%) rename {det-yolov4-training => det-yolov4-tmi}/Makefile (100%) rename {det-yolov4-training => det-yolov4-tmi}/README.md (100%) rename {det-yolov4-training => det-yolov4-tmi}/build.ps1 (100%) rename {det-yolov4-training => det-yolov4-tmi}/calc_map.sh (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/9k.labels (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/9k.names (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/9k.tree (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/Gaussian_yolov3_BDD.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/alexnet.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/cd53paspp-gamma.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/cifar.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/cifar.test.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/coco.data (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/coco.names (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/coco9k.map (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/combine9k.data (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/crnn.train.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/csdarknet53-omega.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/cspx-p7-mish-omega.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/cspx-p7-mish.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/cspx-p7-mish_hp.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/csresnext50-panet-spp-original-optimal.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/csresnext50-panet-spp.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/darknet.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/darknet19.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/darknet19_448.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/darknet53.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/darknet53_448_xnor.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/densenet201.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/efficientnet-lite3.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/efficientnet_b0.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/enet-coco.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/extraction.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/extraction.conv.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/extraction22k.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/go.test.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/gru.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/imagenet.labels.list (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/imagenet.shortnames.list (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/imagenet1k.data (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/imagenet22k.dataset (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/imagenet9k.hierarchy.dataset (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/inet9k.map (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/jnet-conv.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/lstm.train.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/openimages.data (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/resnet101.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/resnet152.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/resnet152_trident.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/resnet50.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/resnext152-32x4d.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/rnn.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/rnn.train.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/strided.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/t1.test.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/tiny-yolo-voc.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/tiny-yolo.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/tiny-yolo_xnor.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/tiny.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/vgg-16.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/vgg-conv.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/voc.data (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/writing.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolo-voc.2.0.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolo-voc.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolo.2.0.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolo.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolo9000.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov1/tiny-coco.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov1/tiny-yolo.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov1/xyolo.test.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov1/yolo-coco.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov1/yolo-small.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov1/yolo.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov1/yolo.train.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov1/yolo2.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov2-tiny-voc.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov2-tiny.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov2-voc.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov2.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov3-openimages.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov3-spp.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov3-tiny-prn.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov3-tiny.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov3-tiny_3l.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov3-tiny_obj.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov3-tiny_occlusion_track.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov3-tiny_xnor.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov3-voc.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov3-voc.yolov3-giou-40.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov3.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov3.coco-giou-12.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov3_5l.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov4-csp-swish.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov4-csp-x-swish-frozen.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov4-csp-x-swish.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov4-csp.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov4-custom.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov4-p5-frozen.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov4-p5.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov4-p6.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov4-sam-mish-csp-reorg-bfm.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov4-tiny-3l.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov4-tiny-custom.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov4-tiny.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov4-tiny_contrastive.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov4.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov4_iter1000.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cfg/yolov4x-mish.cfg (100%) rename {det-yolov4-training => det-yolov4-tmi}/cmake/Modules/FindCUDNN.cmake (100%) rename {det-yolov4-training => det-yolov4-tmi}/cmake/Modules/FindPThreads4W.cmake (100%) rename {det-yolov4-training => det-yolov4-tmi}/cmake/Modules/FindStb.cmake (100%) rename {det-yolov4-training => det-yolov4-tmi}/config_and_train.py (100%) rename {det-yolov4-training => det-yolov4-tmi}/convert_label_ark2txt.py (97%) rename {det-yolov4-training => det-yolov4-tmi}/convert_model_darknet2mxnet_yolov4.py (100%) rename {det-yolov4-training => det-yolov4-tmi}/counters_per_class.txt (100%) rename det-yolov4-training/Dockerfile => det-yolov4-tmi/cuda101.dockerfile (82%) rename {det-yolov4-training => det-yolov4-tmi}/cuda112.dockerfile (82%) rename {det-yolov4-training => det-yolov4-tmi}/darknet.py (100%) rename {det-yolov4-training => det-yolov4-tmi}/darknet_images.py (100%) rename {det-yolov4-training => det-yolov4-tmi}/darknet_video.py (100%) rename {det-yolov4-training => det-yolov4-tmi}/data/9k.tree (100%) rename {det-yolov4-training => det-yolov4-tmi}/data/coco.names (100%) rename {det-yolov4-training => det-yolov4-tmi}/data/coco9k.map (100%) rename {det-yolov4-training => det-yolov4-tmi}/data/goal.txt (100%) rename {det-yolov4-training => det-yolov4-tmi}/data/imagenet.labels.list (100%) rename {det-yolov4-training => det-yolov4-tmi}/data/imagenet.shortnames.list (100%) rename {det-yolov4-training => det-yolov4-tmi}/data/labels/make_labels.py (100%) rename {det-yolov4-training => det-yolov4-tmi}/data/openimages.names (100%) rename {det-yolov4-training => det-yolov4-tmi}/data/voc.names (100%) rename {det-yolov4-training => det-yolov4-tmi}/image_yolov3.sh (100%) rename {det-yolov4-training => det-yolov4-tmi}/image_yolov4.sh (100%) rename {det-yolov4-training => det-yolov4-tmi}/img.txt (100%) rename {det-yolov4-training => det-yolov4-tmi}/include/darknet.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/include/yolo_v2_class.hpp (100%) rename {det-yolov4-training => det-yolov4-tmi}/json_mjpeg_streams.sh (100%) rename {det-yolov4-training => det-yolov4-tmi}/make_train_test_darknet.sh (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/.dockerignore (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/README.md (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/active_learning/__init__.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/active_learning/apis/__init__.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/active_learning/apis/al_api.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/active_learning/apis/docker_api.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/active_learning/dataset/__init__.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/active_learning/dataset/datareader.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/active_learning/dataset/labeled_dataset.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/active_learning/dataset/unlabeled_dataset.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/active_learning/model_inference/__init__.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/active_learning/model_inference/centernet.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/active_learning/model_inference/yolo_models.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/active_learning/strategy/__init__.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/active_learning/strategy/aldd.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/active_learning/strategy/aldd_yolo.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/active_learning/strategy/cald.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/active_learning/strategy/data_augment.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/active_learning/strategy/random_strategy.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/active_learning/utils/__init__.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/active_learning/utils/al_log.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/active_learning/utils/operator.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/al_main.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/combined_class.txt (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/docker_main.py (88%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/docker_readme.md (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/infer-template.yaml (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/mining-template.yaml (95%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/monitor_process.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/start.sh (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/test_api.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/test_centernet.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/tools/al_strategsy_union.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/tools/imagenet_hard_negative.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/tools/plot_dataset_class_hist.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/tools/visualize_aldd.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/tools/visualize_cald.py (100%) rename {det-yolov4-mining => det-yolov4-tmi/mining}/write_result.py (100%) rename {det-yolov4-training => det-yolov4-tmi}/net_cam_v3.sh (100%) rename {det-yolov4-training => det-yolov4-tmi}/net_cam_v4.sh (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/.editorconfig (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/activation_kernels.cu (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/activation_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/activation_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/activations.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/activations.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/art.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/avgpool_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/avgpool_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/avgpool_layer_kernels.cu (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/batchnorm_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/batchnorm_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/blas.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/blas.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/blas_kernels.cu (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/box.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/box.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/captcha.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/cifar.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/classifier.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/classifier.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/coco.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/col2im.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/col2im.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/col2im_kernels.cu (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/compare.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/connected_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/connected_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/conv_lstm_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/conv_lstm_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/convolutional_kernels.cu (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/convolutional_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/convolutional_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/cost_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/cost_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/cpu_gemm.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/crnn_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/crnn_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/crop_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/crop_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/crop_layer_kernels.cu (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/csharp/CMakeLists.txt (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/csharp/YoloCSharpWrapper.cs (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/dark_cuda.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/dark_cuda.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/darknet.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/darkunistd.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/data.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/data.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/deconvolutional_kernels.cu (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/deconvolutional_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/deconvolutional_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/demo.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/demo.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/detection_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/detection_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/detector.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/dice.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/dropout_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/dropout_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/dropout_layer_kernels.cu (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/gaussian_yolo_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/gaussian_yolo_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/gemm.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/gemm.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/getopt.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/getopt.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/gettimeofday.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/gettimeofday.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/go.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/gru_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/gru_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/http_stream.cpp (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/http_stream.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/httplib.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/im2col.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/im2col.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/im2col_kernels.cu (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/image.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/image.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/image_opencv.cpp (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/image_opencv.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/list.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/list.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/local_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/local_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/lstm_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/lstm_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/matrix.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/matrix.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/maxpool_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/maxpool_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/maxpool_layer_kernels.cu (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/network.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/network.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/network_kernels.cu (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/nightmare.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/normalization_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/normalization_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/option_list.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/option_list.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/parser.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/parser.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/region_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/region_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/reorg_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/reorg_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/reorg_old_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/reorg_old_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/representation_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/representation_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/rnn.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/rnn_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/rnn_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/rnn_vid.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/route_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/route_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/sam_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/sam_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/scale_channels_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/scale_channels_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/shortcut_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/shortcut_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/softmax_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/softmax_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/super.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/swag.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/tag.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/tree.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/tree.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/upsample_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/upsample_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/utils.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/utils.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/version.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/version.h.in (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/voxel.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/writing.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/yolo.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/yolo_console_dll.cpp (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/yolo_layer.c (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/yolo_layer.h (100%) rename {det-yolov4-training => det-yolov4-tmi}/src/yolo_v2_class.cpp (100%) create mode 100644 det-yolov4-tmi/start.py rename {det-yolov4-training => det-yolov4-tmi}/train.sh (100%) rename {det-yolov4-training => det-yolov4-tmi}/train_watcher.py (100%) rename {det-yolov4-training => det-yolov4-tmi}/train_yolov3.sh (100%) rename {det-yolov4-training => det-yolov4-tmi}/training-template.yaml (96%) rename {det-yolov4-training => det-yolov4-tmi}/video_yolov3.sh (100%) rename {det-yolov4-training => det-yolov4-tmi}/video_yolov4.sh (100%) rename {det-yolov4-training => det-yolov4-tmi}/warm_up_training.py (100%) diff --git a/README.MD b/README.MD index 5b8bc04..b03b375 100644 --- a/README.MD +++ b/README.MD @@ -4,11 +4,22 @@ ## ymir-1.1.0 official image -- yolov4 +- [yolov4](https://github.com/yzbx/ymir-executor-fork#det-yolov4-training) -- yolov5 + ``` + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov4-cu111-tmi + ``` + +- [yolov5](https://github.com/yzbx/ymir-executor-fork#det-yolov5-tmi) -- mmdetection + ``` + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov5-cu111-tmi + ``` + +- [mmdetection](https://github.com/yzbx/ymir-executor-fork#det-mmdetection-tmi) + ``` + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-mmdet-cu111-tmi + ``` - [detectron2](https://github.com/yzbx/ymir-detectron2) @@ -23,6 +34,7 @@ - [yolov7](https://github.com/yzbx/ymir-yolov7) - [change log](https://github.com/yzbx/ymir-yolov7/blob/main/ymir/README.md) + ``` docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov7-cu111-tmi ``` diff --git a/det-yolov4-mining/Dockerfile b/det-yolov4-mining/Dockerfile deleted file mode 100644 index 4305760..0000000 --- a/det-yolov4-mining/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -FROM industryessentials/mxnet_python:1.5.0_gpu_cu101mkl_py3_ub18 - -RUN sed -i '/developer\.download\.nvidia\.com\/compute\/cuda\/repos/d' /etc/apt/sources.list.d/* \ - && sed -i '/developer\.download\.nvidia\.com\/compute\/machine-learning\/repos/d' /etc/apt/sources.list.d/* \ - && apt-key del 7fa2af80 \ - && wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/cuda-keyring_1.0-1_all.deb \ - && dpkg -i cuda-keyring_1.0-1_all.deb -RUN apt-get update && apt-get install -y --no-install-recommends libsm6 libxext6 libfontconfig1 libxrender1 libgl1-mesa-glx \ - && apt-get clean && rm -rf /var/lib/apt/lists/* - -RUN pip3 install --upgrade pip setuptools wheel && pip3 install opencv-python pyyaml scipy tqdm && rm -rf /root/.cache/pip3 - -COPY . /app -WORKDIR /app -RUN cp ./start.sh /usr/bin/start.sh && \ - mkdir -p /img-man && \ - cp ./mining-template.yaml /img-man/mining-template.yaml && \ - cp ./infer-template.yaml /img-man/infer-template.yaml && \ - cp ./README.md /img-man/readme.md -CMD sh /usr/bin/start.sh diff --git a/det-yolov4-mining/cuda112.dockerfile b/det-yolov4-mining/cuda112.dockerfile deleted file mode 100644 index 871b00f..0000000 --- a/det-yolov4-mining/cuda112.dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM industryessentials/ymir-executor:cuda112-yolov4-training - -RUN apt-get update && apt-get install -y --no-install-recommends libsm6 libxext6 libfontconfig1 libxrender1 libgl1-mesa-glx \ - && apt-get clean && rm -rf /var/lib/apt/lists/* - -RUN pip3 install --upgrade pip setuptools wheel && pip3 install opencv-python pyyaml scipy tqdm && rm -rf /root/.cache/pip3 - -COPY . /app -WORKDIR /app -RUN cp ./start.sh /usr/bin/start.sh && \ - mkdir -p /img-man && \ - cp ./mining-template.yaml /img-man/mining-template.yaml && \ - cp ./infer-template.yaml /img-man/infer-template.yaml && \ - cp ./README.md /img-man/readme.md -CMD sh /usr/bin/start.sh diff --git a/det-yolov4-training/.circleci/config.yml b/det-yolov4-tmi/.circleci/config.yml similarity index 100% rename from det-yolov4-training/.circleci/config.yml rename to det-yolov4-tmi/.circleci/config.yml diff --git a/det-yolov4-training/.travis.yml b/det-yolov4-tmi/.travis.yml similarity index 100% rename from det-yolov4-training/.travis.yml rename to det-yolov4-tmi/.travis.yml diff --git a/det-yolov4-training/3rdparty/pthreads/bin/pthreadGC2.dll b/det-yolov4-tmi/3rdparty/pthreads/bin/pthreadGC2.dll similarity index 100% rename from det-yolov4-training/3rdparty/pthreads/bin/pthreadGC2.dll rename to det-yolov4-tmi/3rdparty/pthreads/bin/pthreadGC2.dll diff --git a/det-yolov4-training/3rdparty/pthreads/bin/pthreadVC2.dll b/det-yolov4-tmi/3rdparty/pthreads/bin/pthreadVC2.dll similarity index 100% rename from det-yolov4-training/3rdparty/pthreads/bin/pthreadVC2.dll rename to det-yolov4-tmi/3rdparty/pthreads/bin/pthreadVC2.dll diff --git a/det-yolov4-training/3rdparty/pthreads/include/pthread.h b/det-yolov4-tmi/3rdparty/pthreads/include/pthread.h similarity index 100% rename from det-yolov4-training/3rdparty/pthreads/include/pthread.h rename to det-yolov4-tmi/3rdparty/pthreads/include/pthread.h diff --git a/det-yolov4-training/3rdparty/pthreads/include/sched.h b/det-yolov4-tmi/3rdparty/pthreads/include/sched.h similarity index 100% rename from det-yolov4-training/3rdparty/pthreads/include/sched.h rename to det-yolov4-tmi/3rdparty/pthreads/include/sched.h diff --git a/det-yolov4-training/3rdparty/pthreads/include/semaphore.h b/det-yolov4-tmi/3rdparty/pthreads/include/semaphore.h similarity index 100% rename from det-yolov4-training/3rdparty/pthreads/include/semaphore.h rename to det-yolov4-tmi/3rdparty/pthreads/include/semaphore.h diff --git a/det-yolov4-training/3rdparty/pthreads/lib/libpthreadGC2.a b/det-yolov4-tmi/3rdparty/pthreads/lib/libpthreadGC2.a similarity index 100% rename from det-yolov4-training/3rdparty/pthreads/lib/libpthreadGC2.a rename to det-yolov4-tmi/3rdparty/pthreads/lib/libpthreadGC2.a diff --git a/det-yolov4-training/3rdparty/pthreads/lib/pthreadVC2.lib b/det-yolov4-tmi/3rdparty/pthreads/lib/pthreadVC2.lib similarity index 100% rename from det-yolov4-training/3rdparty/pthreads/lib/pthreadVC2.lib rename to det-yolov4-tmi/3rdparty/pthreads/lib/pthreadVC2.lib diff --git a/det-yolov4-training/3rdparty/stb/include/stb_image.h b/det-yolov4-tmi/3rdparty/stb/include/stb_image.h similarity index 100% rename from det-yolov4-training/3rdparty/stb/include/stb_image.h rename to det-yolov4-tmi/3rdparty/stb/include/stb_image.h diff --git a/det-yolov4-training/3rdparty/stb/include/stb_image_write.h b/det-yolov4-tmi/3rdparty/stb/include/stb_image_write.h similarity index 100% rename from det-yolov4-training/3rdparty/stb/include/stb_image_write.h rename to det-yolov4-tmi/3rdparty/stb/include/stb_image_write.h diff --git a/det-yolov4-training/CMakeLists.txt b/det-yolov4-tmi/CMakeLists.txt similarity index 100% rename from det-yolov4-training/CMakeLists.txt rename to det-yolov4-tmi/CMakeLists.txt diff --git a/det-yolov4-training/DarknetConfig.cmake.in b/det-yolov4-tmi/DarknetConfig.cmake.in similarity index 100% rename from det-yolov4-training/DarknetConfig.cmake.in rename to det-yolov4-tmi/DarknetConfig.cmake.in diff --git a/det-yolov4-training/LICENSE b/det-yolov4-tmi/LICENSE similarity index 100% rename from det-yolov4-training/LICENSE rename to det-yolov4-tmi/LICENSE diff --git a/det-yolov4-training/Makefile b/det-yolov4-tmi/Makefile similarity index 100% rename from det-yolov4-training/Makefile rename to det-yolov4-tmi/Makefile diff --git a/det-yolov4-training/README.md b/det-yolov4-tmi/README.md similarity index 100% rename from det-yolov4-training/README.md rename to det-yolov4-tmi/README.md diff --git a/det-yolov4-training/build.ps1 b/det-yolov4-tmi/build.ps1 similarity index 100% rename from det-yolov4-training/build.ps1 rename to det-yolov4-tmi/build.ps1 diff --git a/det-yolov4-training/calc_map.sh b/det-yolov4-tmi/calc_map.sh similarity index 100% rename from det-yolov4-training/calc_map.sh rename to det-yolov4-tmi/calc_map.sh diff --git a/det-yolov4-training/cfg/9k.labels b/det-yolov4-tmi/cfg/9k.labels similarity index 100% rename from det-yolov4-training/cfg/9k.labels rename to det-yolov4-tmi/cfg/9k.labels diff --git a/det-yolov4-training/cfg/9k.names b/det-yolov4-tmi/cfg/9k.names similarity index 100% rename from det-yolov4-training/cfg/9k.names rename to det-yolov4-tmi/cfg/9k.names diff --git a/det-yolov4-training/cfg/9k.tree b/det-yolov4-tmi/cfg/9k.tree similarity index 100% rename from det-yolov4-training/cfg/9k.tree rename to det-yolov4-tmi/cfg/9k.tree diff --git a/det-yolov4-training/cfg/Gaussian_yolov3_BDD.cfg b/det-yolov4-tmi/cfg/Gaussian_yolov3_BDD.cfg similarity index 100% rename from det-yolov4-training/cfg/Gaussian_yolov3_BDD.cfg rename to det-yolov4-tmi/cfg/Gaussian_yolov3_BDD.cfg diff --git a/det-yolov4-training/cfg/alexnet.cfg b/det-yolov4-tmi/cfg/alexnet.cfg similarity index 100% rename from det-yolov4-training/cfg/alexnet.cfg rename to det-yolov4-tmi/cfg/alexnet.cfg diff --git a/det-yolov4-training/cfg/cd53paspp-gamma.cfg b/det-yolov4-tmi/cfg/cd53paspp-gamma.cfg similarity index 100% rename from det-yolov4-training/cfg/cd53paspp-gamma.cfg rename to det-yolov4-tmi/cfg/cd53paspp-gamma.cfg diff --git a/det-yolov4-training/cfg/cifar.cfg b/det-yolov4-tmi/cfg/cifar.cfg similarity index 100% rename from det-yolov4-training/cfg/cifar.cfg rename to det-yolov4-tmi/cfg/cifar.cfg diff --git a/det-yolov4-training/cfg/cifar.test.cfg b/det-yolov4-tmi/cfg/cifar.test.cfg similarity index 100% rename from det-yolov4-training/cfg/cifar.test.cfg rename to det-yolov4-tmi/cfg/cifar.test.cfg diff --git a/det-yolov4-training/cfg/coco.data b/det-yolov4-tmi/cfg/coco.data similarity index 100% rename from det-yolov4-training/cfg/coco.data rename to det-yolov4-tmi/cfg/coco.data diff --git a/det-yolov4-training/cfg/coco.names b/det-yolov4-tmi/cfg/coco.names similarity index 100% rename from det-yolov4-training/cfg/coco.names rename to det-yolov4-tmi/cfg/coco.names diff --git a/det-yolov4-training/cfg/coco9k.map b/det-yolov4-tmi/cfg/coco9k.map similarity index 100% rename from det-yolov4-training/cfg/coco9k.map rename to det-yolov4-tmi/cfg/coco9k.map diff --git a/det-yolov4-training/cfg/combine9k.data b/det-yolov4-tmi/cfg/combine9k.data similarity index 100% rename from det-yolov4-training/cfg/combine9k.data rename to det-yolov4-tmi/cfg/combine9k.data diff --git a/det-yolov4-training/cfg/crnn.train.cfg b/det-yolov4-tmi/cfg/crnn.train.cfg similarity index 100% rename from det-yolov4-training/cfg/crnn.train.cfg rename to det-yolov4-tmi/cfg/crnn.train.cfg diff --git a/det-yolov4-training/cfg/csdarknet53-omega.cfg b/det-yolov4-tmi/cfg/csdarknet53-omega.cfg similarity index 100% rename from det-yolov4-training/cfg/csdarknet53-omega.cfg rename to det-yolov4-tmi/cfg/csdarknet53-omega.cfg diff --git a/det-yolov4-training/cfg/cspx-p7-mish-omega.cfg b/det-yolov4-tmi/cfg/cspx-p7-mish-omega.cfg similarity index 100% rename from det-yolov4-training/cfg/cspx-p7-mish-omega.cfg rename to det-yolov4-tmi/cfg/cspx-p7-mish-omega.cfg diff --git a/det-yolov4-training/cfg/cspx-p7-mish.cfg b/det-yolov4-tmi/cfg/cspx-p7-mish.cfg similarity index 100% rename from det-yolov4-training/cfg/cspx-p7-mish.cfg rename to det-yolov4-tmi/cfg/cspx-p7-mish.cfg diff --git a/det-yolov4-training/cfg/cspx-p7-mish_hp.cfg b/det-yolov4-tmi/cfg/cspx-p7-mish_hp.cfg similarity index 100% rename from det-yolov4-training/cfg/cspx-p7-mish_hp.cfg rename to det-yolov4-tmi/cfg/cspx-p7-mish_hp.cfg diff --git a/det-yolov4-training/cfg/csresnext50-panet-spp-original-optimal.cfg b/det-yolov4-tmi/cfg/csresnext50-panet-spp-original-optimal.cfg similarity index 100% rename from det-yolov4-training/cfg/csresnext50-panet-spp-original-optimal.cfg rename to det-yolov4-tmi/cfg/csresnext50-panet-spp-original-optimal.cfg diff --git a/det-yolov4-training/cfg/csresnext50-panet-spp.cfg b/det-yolov4-tmi/cfg/csresnext50-panet-spp.cfg similarity index 100% rename from det-yolov4-training/cfg/csresnext50-panet-spp.cfg rename to det-yolov4-tmi/cfg/csresnext50-panet-spp.cfg diff --git a/det-yolov4-training/cfg/darknet.cfg b/det-yolov4-tmi/cfg/darknet.cfg similarity index 100% rename from det-yolov4-training/cfg/darknet.cfg rename to det-yolov4-tmi/cfg/darknet.cfg diff --git a/det-yolov4-training/cfg/darknet19.cfg b/det-yolov4-tmi/cfg/darknet19.cfg similarity index 100% rename from det-yolov4-training/cfg/darknet19.cfg rename to det-yolov4-tmi/cfg/darknet19.cfg diff --git a/det-yolov4-training/cfg/darknet19_448.cfg b/det-yolov4-tmi/cfg/darknet19_448.cfg similarity index 100% rename from det-yolov4-training/cfg/darknet19_448.cfg rename to det-yolov4-tmi/cfg/darknet19_448.cfg diff --git a/det-yolov4-training/cfg/darknet53.cfg b/det-yolov4-tmi/cfg/darknet53.cfg similarity index 100% rename from det-yolov4-training/cfg/darknet53.cfg rename to det-yolov4-tmi/cfg/darknet53.cfg diff --git a/det-yolov4-training/cfg/darknet53_448_xnor.cfg b/det-yolov4-tmi/cfg/darknet53_448_xnor.cfg similarity index 100% rename from det-yolov4-training/cfg/darknet53_448_xnor.cfg rename to det-yolov4-tmi/cfg/darknet53_448_xnor.cfg diff --git a/det-yolov4-training/cfg/densenet201.cfg b/det-yolov4-tmi/cfg/densenet201.cfg similarity index 100% rename from det-yolov4-training/cfg/densenet201.cfg rename to det-yolov4-tmi/cfg/densenet201.cfg diff --git a/det-yolov4-training/cfg/efficientnet-lite3.cfg b/det-yolov4-tmi/cfg/efficientnet-lite3.cfg similarity index 100% rename from det-yolov4-training/cfg/efficientnet-lite3.cfg rename to det-yolov4-tmi/cfg/efficientnet-lite3.cfg diff --git a/det-yolov4-training/cfg/efficientnet_b0.cfg b/det-yolov4-tmi/cfg/efficientnet_b0.cfg similarity index 100% rename from det-yolov4-training/cfg/efficientnet_b0.cfg rename to det-yolov4-tmi/cfg/efficientnet_b0.cfg diff --git a/det-yolov4-training/cfg/enet-coco.cfg b/det-yolov4-tmi/cfg/enet-coco.cfg similarity index 100% rename from det-yolov4-training/cfg/enet-coco.cfg rename to det-yolov4-tmi/cfg/enet-coco.cfg diff --git a/det-yolov4-training/cfg/extraction.cfg b/det-yolov4-tmi/cfg/extraction.cfg similarity index 100% rename from det-yolov4-training/cfg/extraction.cfg rename to det-yolov4-tmi/cfg/extraction.cfg diff --git a/det-yolov4-training/cfg/extraction.conv.cfg b/det-yolov4-tmi/cfg/extraction.conv.cfg similarity index 100% rename from det-yolov4-training/cfg/extraction.conv.cfg rename to det-yolov4-tmi/cfg/extraction.conv.cfg diff --git a/det-yolov4-training/cfg/extraction22k.cfg b/det-yolov4-tmi/cfg/extraction22k.cfg similarity index 100% rename from det-yolov4-training/cfg/extraction22k.cfg rename to det-yolov4-tmi/cfg/extraction22k.cfg diff --git a/det-yolov4-training/cfg/go.test.cfg b/det-yolov4-tmi/cfg/go.test.cfg similarity index 100% rename from det-yolov4-training/cfg/go.test.cfg rename to det-yolov4-tmi/cfg/go.test.cfg diff --git a/det-yolov4-training/cfg/gru.cfg b/det-yolov4-tmi/cfg/gru.cfg similarity index 100% rename from det-yolov4-training/cfg/gru.cfg rename to det-yolov4-tmi/cfg/gru.cfg diff --git a/det-yolov4-training/cfg/imagenet.labels.list b/det-yolov4-tmi/cfg/imagenet.labels.list similarity index 100% rename from det-yolov4-training/cfg/imagenet.labels.list rename to det-yolov4-tmi/cfg/imagenet.labels.list diff --git a/det-yolov4-training/cfg/imagenet.shortnames.list b/det-yolov4-tmi/cfg/imagenet.shortnames.list similarity index 100% rename from det-yolov4-training/cfg/imagenet.shortnames.list rename to det-yolov4-tmi/cfg/imagenet.shortnames.list diff --git a/det-yolov4-training/cfg/imagenet1k.data b/det-yolov4-tmi/cfg/imagenet1k.data similarity index 100% rename from det-yolov4-training/cfg/imagenet1k.data rename to det-yolov4-tmi/cfg/imagenet1k.data diff --git a/det-yolov4-training/cfg/imagenet22k.dataset b/det-yolov4-tmi/cfg/imagenet22k.dataset similarity index 100% rename from det-yolov4-training/cfg/imagenet22k.dataset rename to det-yolov4-tmi/cfg/imagenet22k.dataset diff --git a/det-yolov4-training/cfg/imagenet9k.hierarchy.dataset b/det-yolov4-tmi/cfg/imagenet9k.hierarchy.dataset similarity index 100% rename from det-yolov4-training/cfg/imagenet9k.hierarchy.dataset rename to det-yolov4-tmi/cfg/imagenet9k.hierarchy.dataset diff --git a/det-yolov4-training/cfg/inet9k.map b/det-yolov4-tmi/cfg/inet9k.map similarity index 100% rename from det-yolov4-training/cfg/inet9k.map rename to det-yolov4-tmi/cfg/inet9k.map diff --git a/det-yolov4-training/cfg/jnet-conv.cfg b/det-yolov4-tmi/cfg/jnet-conv.cfg similarity index 100% rename from det-yolov4-training/cfg/jnet-conv.cfg rename to det-yolov4-tmi/cfg/jnet-conv.cfg diff --git a/det-yolov4-training/cfg/lstm.train.cfg b/det-yolov4-tmi/cfg/lstm.train.cfg similarity index 100% rename from det-yolov4-training/cfg/lstm.train.cfg rename to det-yolov4-tmi/cfg/lstm.train.cfg diff --git a/det-yolov4-training/cfg/openimages.data b/det-yolov4-tmi/cfg/openimages.data similarity index 100% rename from det-yolov4-training/cfg/openimages.data rename to det-yolov4-tmi/cfg/openimages.data diff --git a/det-yolov4-training/cfg/resnet101.cfg b/det-yolov4-tmi/cfg/resnet101.cfg similarity index 100% rename from det-yolov4-training/cfg/resnet101.cfg rename to det-yolov4-tmi/cfg/resnet101.cfg diff --git a/det-yolov4-training/cfg/resnet152.cfg b/det-yolov4-tmi/cfg/resnet152.cfg similarity index 100% rename from det-yolov4-training/cfg/resnet152.cfg rename to det-yolov4-tmi/cfg/resnet152.cfg diff --git a/det-yolov4-training/cfg/resnet152_trident.cfg b/det-yolov4-tmi/cfg/resnet152_trident.cfg similarity index 100% rename from det-yolov4-training/cfg/resnet152_trident.cfg rename to det-yolov4-tmi/cfg/resnet152_trident.cfg diff --git a/det-yolov4-training/cfg/resnet50.cfg b/det-yolov4-tmi/cfg/resnet50.cfg similarity index 100% rename from det-yolov4-training/cfg/resnet50.cfg rename to det-yolov4-tmi/cfg/resnet50.cfg diff --git a/det-yolov4-training/cfg/resnext152-32x4d.cfg b/det-yolov4-tmi/cfg/resnext152-32x4d.cfg similarity index 100% rename from det-yolov4-training/cfg/resnext152-32x4d.cfg rename to det-yolov4-tmi/cfg/resnext152-32x4d.cfg diff --git a/det-yolov4-training/cfg/rnn.cfg b/det-yolov4-tmi/cfg/rnn.cfg similarity index 100% rename from det-yolov4-training/cfg/rnn.cfg rename to det-yolov4-tmi/cfg/rnn.cfg diff --git a/det-yolov4-training/cfg/rnn.train.cfg b/det-yolov4-tmi/cfg/rnn.train.cfg similarity index 100% rename from det-yolov4-training/cfg/rnn.train.cfg rename to det-yolov4-tmi/cfg/rnn.train.cfg diff --git a/det-yolov4-training/cfg/strided.cfg b/det-yolov4-tmi/cfg/strided.cfg similarity index 100% rename from det-yolov4-training/cfg/strided.cfg rename to det-yolov4-tmi/cfg/strided.cfg diff --git a/det-yolov4-training/cfg/t1.test.cfg b/det-yolov4-tmi/cfg/t1.test.cfg similarity index 100% rename from det-yolov4-training/cfg/t1.test.cfg rename to det-yolov4-tmi/cfg/t1.test.cfg diff --git a/det-yolov4-training/cfg/tiny-yolo-voc.cfg b/det-yolov4-tmi/cfg/tiny-yolo-voc.cfg similarity index 100% rename from det-yolov4-training/cfg/tiny-yolo-voc.cfg rename to det-yolov4-tmi/cfg/tiny-yolo-voc.cfg diff --git a/det-yolov4-training/cfg/tiny-yolo.cfg b/det-yolov4-tmi/cfg/tiny-yolo.cfg similarity index 100% rename from det-yolov4-training/cfg/tiny-yolo.cfg rename to det-yolov4-tmi/cfg/tiny-yolo.cfg diff --git a/det-yolov4-training/cfg/tiny-yolo_xnor.cfg b/det-yolov4-tmi/cfg/tiny-yolo_xnor.cfg similarity index 100% rename from det-yolov4-training/cfg/tiny-yolo_xnor.cfg rename to det-yolov4-tmi/cfg/tiny-yolo_xnor.cfg diff --git a/det-yolov4-training/cfg/tiny.cfg b/det-yolov4-tmi/cfg/tiny.cfg similarity index 100% rename from det-yolov4-training/cfg/tiny.cfg rename to det-yolov4-tmi/cfg/tiny.cfg diff --git a/det-yolov4-training/cfg/vgg-16.cfg b/det-yolov4-tmi/cfg/vgg-16.cfg similarity index 100% rename from det-yolov4-training/cfg/vgg-16.cfg rename to det-yolov4-tmi/cfg/vgg-16.cfg diff --git a/det-yolov4-training/cfg/vgg-conv.cfg b/det-yolov4-tmi/cfg/vgg-conv.cfg similarity index 100% rename from det-yolov4-training/cfg/vgg-conv.cfg rename to det-yolov4-tmi/cfg/vgg-conv.cfg diff --git a/det-yolov4-training/cfg/voc.data b/det-yolov4-tmi/cfg/voc.data similarity index 100% rename from det-yolov4-training/cfg/voc.data rename to det-yolov4-tmi/cfg/voc.data diff --git a/det-yolov4-training/cfg/writing.cfg b/det-yolov4-tmi/cfg/writing.cfg similarity index 100% rename from det-yolov4-training/cfg/writing.cfg rename to det-yolov4-tmi/cfg/writing.cfg diff --git a/det-yolov4-training/cfg/yolo-voc.2.0.cfg b/det-yolov4-tmi/cfg/yolo-voc.2.0.cfg similarity index 100% rename from det-yolov4-training/cfg/yolo-voc.2.0.cfg rename to det-yolov4-tmi/cfg/yolo-voc.2.0.cfg diff --git a/det-yolov4-training/cfg/yolo-voc.cfg b/det-yolov4-tmi/cfg/yolo-voc.cfg similarity index 100% rename from det-yolov4-training/cfg/yolo-voc.cfg rename to det-yolov4-tmi/cfg/yolo-voc.cfg diff --git a/det-yolov4-training/cfg/yolo.2.0.cfg b/det-yolov4-tmi/cfg/yolo.2.0.cfg similarity index 100% rename from det-yolov4-training/cfg/yolo.2.0.cfg rename to det-yolov4-tmi/cfg/yolo.2.0.cfg diff --git a/det-yolov4-training/cfg/yolo.cfg b/det-yolov4-tmi/cfg/yolo.cfg similarity index 100% rename from det-yolov4-training/cfg/yolo.cfg rename to det-yolov4-tmi/cfg/yolo.cfg diff --git a/det-yolov4-training/cfg/yolo9000.cfg b/det-yolov4-tmi/cfg/yolo9000.cfg similarity index 100% rename from det-yolov4-training/cfg/yolo9000.cfg rename to det-yolov4-tmi/cfg/yolo9000.cfg diff --git a/det-yolov4-training/cfg/yolov1/tiny-coco.cfg b/det-yolov4-tmi/cfg/yolov1/tiny-coco.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov1/tiny-coco.cfg rename to det-yolov4-tmi/cfg/yolov1/tiny-coco.cfg diff --git a/det-yolov4-training/cfg/yolov1/tiny-yolo.cfg b/det-yolov4-tmi/cfg/yolov1/tiny-yolo.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov1/tiny-yolo.cfg rename to det-yolov4-tmi/cfg/yolov1/tiny-yolo.cfg diff --git a/det-yolov4-training/cfg/yolov1/xyolo.test.cfg b/det-yolov4-tmi/cfg/yolov1/xyolo.test.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov1/xyolo.test.cfg rename to det-yolov4-tmi/cfg/yolov1/xyolo.test.cfg diff --git a/det-yolov4-training/cfg/yolov1/yolo-coco.cfg b/det-yolov4-tmi/cfg/yolov1/yolo-coco.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov1/yolo-coco.cfg rename to det-yolov4-tmi/cfg/yolov1/yolo-coco.cfg diff --git a/det-yolov4-training/cfg/yolov1/yolo-small.cfg b/det-yolov4-tmi/cfg/yolov1/yolo-small.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov1/yolo-small.cfg rename to det-yolov4-tmi/cfg/yolov1/yolo-small.cfg diff --git a/det-yolov4-training/cfg/yolov1/yolo.cfg b/det-yolov4-tmi/cfg/yolov1/yolo.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov1/yolo.cfg rename to det-yolov4-tmi/cfg/yolov1/yolo.cfg diff --git a/det-yolov4-training/cfg/yolov1/yolo.train.cfg b/det-yolov4-tmi/cfg/yolov1/yolo.train.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov1/yolo.train.cfg rename to det-yolov4-tmi/cfg/yolov1/yolo.train.cfg diff --git a/det-yolov4-training/cfg/yolov1/yolo2.cfg b/det-yolov4-tmi/cfg/yolov1/yolo2.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov1/yolo2.cfg rename to det-yolov4-tmi/cfg/yolov1/yolo2.cfg diff --git a/det-yolov4-training/cfg/yolov2-tiny-voc.cfg b/det-yolov4-tmi/cfg/yolov2-tiny-voc.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov2-tiny-voc.cfg rename to det-yolov4-tmi/cfg/yolov2-tiny-voc.cfg diff --git a/det-yolov4-training/cfg/yolov2-tiny.cfg b/det-yolov4-tmi/cfg/yolov2-tiny.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov2-tiny.cfg rename to det-yolov4-tmi/cfg/yolov2-tiny.cfg diff --git a/det-yolov4-training/cfg/yolov2-voc.cfg b/det-yolov4-tmi/cfg/yolov2-voc.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov2-voc.cfg rename to det-yolov4-tmi/cfg/yolov2-voc.cfg diff --git a/det-yolov4-training/cfg/yolov2.cfg b/det-yolov4-tmi/cfg/yolov2.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov2.cfg rename to det-yolov4-tmi/cfg/yolov2.cfg diff --git a/det-yolov4-training/cfg/yolov3-openimages.cfg b/det-yolov4-tmi/cfg/yolov3-openimages.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov3-openimages.cfg rename to det-yolov4-tmi/cfg/yolov3-openimages.cfg diff --git a/det-yolov4-training/cfg/yolov3-spp.cfg b/det-yolov4-tmi/cfg/yolov3-spp.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov3-spp.cfg rename to det-yolov4-tmi/cfg/yolov3-spp.cfg diff --git a/det-yolov4-training/cfg/yolov3-tiny-prn.cfg b/det-yolov4-tmi/cfg/yolov3-tiny-prn.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov3-tiny-prn.cfg rename to det-yolov4-tmi/cfg/yolov3-tiny-prn.cfg diff --git a/det-yolov4-training/cfg/yolov3-tiny.cfg b/det-yolov4-tmi/cfg/yolov3-tiny.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov3-tiny.cfg rename to det-yolov4-tmi/cfg/yolov3-tiny.cfg diff --git a/det-yolov4-training/cfg/yolov3-tiny_3l.cfg b/det-yolov4-tmi/cfg/yolov3-tiny_3l.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov3-tiny_3l.cfg rename to det-yolov4-tmi/cfg/yolov3-tiny_3l.cfg diff --git a/det-yolov4-training/cfg/yolov3-tiny_obj.cfg b/det-yolov4-tmi/cfg/yolov3-tiny_obj.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov3-tiny_obj.cfg rename to det-yolov4-tmi/cfg/yolov3-tiny_obj.cfg diff --git a/det-yolov4-training/cfg/yolov3-tiny_occlusion_track.cfg b/det-yolov4-tmi/cfg/yolov3-tiny_occlusion_track.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov3-tiny_occlusion_track.cfg rename to det-yolov4-tmi/cfg/yolov3-tiny_occlusion_track.cfg diff --git a/det-yolov4-training/cfg/yolov3-tiny_xnor.cfg b/det-yolov4-tmi/cfg/yolov3-tiny_xnor.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov3-tiny_xnor.cfg rename to det-yolov4-tmi/cfg/yolov3-tiny_xnor.cfg diff --git a/det-yolov4-training/cfg/yolov3-voc.cfg b/det-yolov4-tmi/cfg/yolov3-voc.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov3-voc.cfg rename to det-yolov4-tmi/cfg/yolov3-voc.cfg diff --git a/det-yolov4-training/cfg/yolov3-voc.yolov3-giou-40.cfg b/det-yolov4-tmi/cfg/yolov3-voc.yolov3-giou-40.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov3-voc.yolov3-giou-40.cfg rename to det-yolov4-tmi/cfg/yolov3-voc.yolov3-giou-40.cfg diff --git a/det-yolov4-training/cfg/yolov3.cfg b/det-yolov4-tmi/cfg/yolov3.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov3.cfg rename to det-yolov4-tmi/cfg/yolov3.cfg diff --git a/det-yolov4-training/cfg/yolov3.coco-giou-12.cfg b/det-yolov4-tmi/cfg/yolov3.coco-giou-12.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov3.coco-giou-12.cfg rename to det-yolov4-tmi/cfg/yolov3.coco-giou-12.cfg diff --git a/det-yolov4-training/cfg/yolov3_5l.cfg b/det-yolov4-tmi/cfg/yolov3_5l.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov3_5l.cfg rename to det-yolov4-tmi/cfg/yolov3_5l.cfg diff --git a/det-yolov4-training/cfg/yolov4-csp-swish.cfg b/det-yolov4-tmi/cfg/yolov4-csp-swish.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov4-csp-swish.cfg rename to det-yolov4-tmi/cfg/yolov4-csp-swish.cfg diff --git a/det-yolov4-training/cfg/yolov4-csp-x-swish-frozen.cfg b/det-yolov4-tmi/cfg/yolov4-csp-x-swish-frozen.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov4-csp-x-swish-frozen.cfg rename to det-yolov4-tmi/cfg/yolov4-csp-x-swish-frozen.cfg diff --git a/det-yolov4-training/cfg/yolov4-csp-x-swish.cfg b/det-yolov4-tmi/cfg/yolov4-csp-x-swish.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov4-csp-x-swish.cfg rename to det-yolov4-tmi/cfg/yolov4-csp-x-swish.cfg diff --git a/det-yolov4-training/cfg/yolov4-csp.cfg b/det-yolov4-tmi/cfg/yolov4-csp.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov4-csp.cfg rename to det-yolov4-tmi/cfg/yolov4-csp.cfg diff --git a/det-yolov4-training/cfg/yolov4-custom.cfg b/det-yolov4-tmi/cfg/yolov4-custom.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov4-custom.cfg rename to det-yolov4-tmi/cfg/yolov4-custom.cfg diff --git a/det-yolov4-training/cfg/yolov4-p5-frozen.cfg b/det-yolov4-tmi/cfg/yolov4-p5-frozen.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov4-p5-frozen.cfg rename to det-yolov4-tmi/cfg/yolov4-p5-frozen.cfg diff --git a/det-yolov4-training/cfg/yolov4-p5.cfg b/det-yolov4-tmi/cfg/yolov4-p5.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov4-p5.cfg rename to det-yolov4-tmi/cfg/yolov4-p5.cfg diff --git a/det-yolov4-training/cfg/yolov4-p6.cfg b/det-yolov4-tmi/cfg/yolov4-p6.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov4-p6.cfg rename to det-yolov4-tmi/cfg/yolov4-p6.cfg diff --git a/det-yolov4-training/cfg/yolov4-sam-mish-csp-reorg-bfm.cfg b/det-yolov4-tmi/cfg/yolov4-sam-mish-csp-reorg-bfm.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov4-sam-mish-csp-reorg-bfm.cfg rename to det-yolov4-tmi/cfg/yolov4-sam-mish-csp-reorg-bfm.cfg diff --git a/det-yolov4-training/cfg/yolov4-tiny-3l.cfg b/det-yolov4-tmi/cfg/yolov4-tiny-3l.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov4-tiny-3l.cfg rename to det-yolov4-tmi/cfg/yolov4-tiny-3l.cfg diff --git a/det-yolov4-training/cfg/yolov4-tiny-custom.cfg b/det-yolov4-tmi/cfg/yolov4-tiny-custom.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov4-tiny-custom.cfg rename to det-yolov4-tmi/cfg/yolov4-tiny-custom.cfg diff --git a/det-yolov4-training/cfg/yolov4-tiny.cfg b/det-yolov4-tmi/cfg/yolov4-tiny.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov4-tiny.cfg rename to det-yolov4-tmi/cfg/yolov4-tiny.cfg diff --git a/det-yolov4-training/cfg/yolov4-tiny_contrastive.cfg b/det-yolov4-tmi/cfg/yolov4-tiny_contrastive.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov4-tiny_contrastive.cfg rename to det-yolov4-tmi/cfg/yolov4-tiny_contrastive.cfg diff --git a/det-yolov4-training/cfg/yolov4.cfg b/det-yolov4-tmi/cfg/yolov4.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov4.cfg rename to det-yolov4-tmi/cfg/yolov4.cfg diff --git a/det-yolov4-training/cfg/yolov4_iter1000.cfg b/det-yolov4-tmi/cfg/yolov4_iter1000.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov4_iter1000.cfg rename to det-yolov4-tmi/cfg/yolov4_iter1000.cfg diff --git a/det-yolov4-training/cfg/yolov4x-mish.cfg b/det-yolov4-tmi/cfg/yolov4x-mish.cfg similarity index 100% rename from det-yolov4-training/cfg/yolov4x-mish.cfg rename to det-yolov4-tmi/cfg/yolov4x-mish.cfg diff --git a/det-yolov4-training/cmake/Modules/FindCUDNN.cmake b/det-yolov4-tmi/cmake/Modules/FindCUDNN.cmake similarity index 100% rename from det-yolov4-training/cmake/Modules/FindCUDNN.cmake rename to det-yolov4-tmi/cmake/Modules/FindCUDNN.cmake diff --git a/det-yolov4-training/cmake/Modules/FindPThreads4W.cmake b/det-yolov4-tmi/cmake/Modules/FindPThreads4W.cmake similarity index 100% rename from det-yolov4-training/cmake/Modules/FindPThreads4W.cmake rename to det-yolov4-tmi/cmake/Modules/FindPThreads4W.cmake diff --git a/det-yolov4-training/cmake/Modules/FindStb.cmake b/det-yolov4-tmi/cmake/Modules/FindStb.cmake similarity index 100% rename from det-yolov4-training/cmake/Modules/FindStb.cmake rename to det-yolov4-tmi/cmake/Modules/FindStb.cmake diff --git a/det-yolov4-training/config_and_train.py b/det-yolov4-tmi/config_and_train.py similarity index 100% rename from det-yolov4-training/config_and_train.py rename to det-yolov4-tmi/config_and_train.py diff --git a/det-yolov4-training/convert_label_ark2txt.py b/det-yolov4-tmi/convert_label_ark2txt.py similarity index 97% rename from det-yolov4-training/convert_label_ark2txt.py rename to det-yolov4-tmi/convert_label_ark2txt.py index 1043b53..ae54b63 100755 --- a/det-yolov4-training/convert_label_ark2txt.py +++ b/det-yolov4-tmi/convert_label_ark2txt.py @@ -21,9 +21,10 @@ def _convert_annotations(index_file_path: str, dst_annotations_dir: str) -> None files = f.readlines() files = [each.strip() for each in files] + N = len(files) for i, each_img_anno_path in enumerate(files): if i % 1000 == 0: - print(f"converted {i} image annotations") + print(f"converted {i}/{N} image annotations") # each_imgpath: asset path # each_txtfile: annotation path diff --git a/det-yolov4-training/convert_model_darknet2mxnet_yolov4.py b/det-yolov4-tmi/convert_model_darknet2mxnet_yolov4.py similarity index 100% rename from det-yolov4-training/convert_model_darknet2mxnet_yolov4.py rename to det-yolov4-tmi/convert_model_darknet2mxnet_yolov4.py diff --git a/det-yolov4-training/counters_per_class.txt b/det-yolov4-tmi/counters_per_class.txt similarity index 100% rename from det-yolov4-training/counters_per_class.txt rename to det-yolov4-tmi/counters_per_class.txt diff --git a/det-yolov4-training/Dockerfile b/det-yolov4-tmi/cuda101.dockerfile similarity index 82% rename from det-yolov4-training/Dockerfile rename to det-yolov4-tmi/cuda101.dockerfile index 61ce1f6..5a5a2b5 100644 --- a/det-yolov4-training/Dockerfile +++ b/det-yolov4-tmi/cuda101.dockerfile @@ -1,5 +1,8 @@ FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04 ARG PIP_SOURCE=https://pypi.mirrors.ustc.edu.cn/simple + +ENV PYTHONPATH=. + WORKDIR /darknet RUN sed -i 's#http://archive.ubuntu.com#https://mirrors.ustc.edu.cn#g' /etc/apt/sources.list RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC && apt-get update @@ -12,11 +15,12 @@ RUN wget https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v3_o RUN rm /usr/bin/python3 RUN ln -s /usr/bin/python3.7 /usr/bin/python3 RUN python3 get-pip.py -RUN pip3 install -i ${PIP_SOURCE} mxnet-cu101==1.5.1 numpy opencv-python pyyaml watchdog tensorboardX six +RUN pip3 install -i ${PIP_SOURCE} mxnet-cu101==1.5.1 numpy opencv-python pyyaml watchdog tensorboardX six scipy tqdm ENV DEBIAN_FRONTEND noninteractive RUN apt-get update && apt-get install -y libopencv-dev COPY . /darknet -RUN cp /darknet/make_train_test_darknet.sh /usr/bin/start.sh -RUN mkdir /img-man && cp /darknet/training-template.yaml /img-man/training-template.yaml RUN make -j + +RUN mkdir /img-man && cp /darknet/training-template.yaml /img-man/training-template.yaml && cp /darknet/mining/*-template.yaml /img-man +RUN echo "python3 /darknet/start.py" > /usr/bin/start.sh CMD bash /usr/bin/start.sh diff --git a/det-yolov4-training/cuda112.dockerfile b/det-yolov4-tmi/cuda112.dockerfile similarity index 82% rename from det-yolov4-training/cuda112.dockerfile rename to det-yolov4-tmi/cuda112.dockerfile index 3e6884b..aac49de 100644 --- a/det-yolov4-training/cuda112.dockerfile +++ b/det-yolov4-tmi/cuda112.dockerfile @@ -1,5 +1,8 @@ FROM nvidia/cuda:11.2.1-cudnn8-devel-ubuntu18.04 ARG PIP_SOURCE=https://pypi.mirrors.ustc.edu.cn/simple + +ENV PYTHONPATH=. + WORKDIR /darknet RUN sed -i 's#http://archive.ubuntu.com#https://mirrors.ustc.edu.cn#g' /etc/apt/sources.list RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC && apt-get update @@ -12,12 +15,13 @@ RUN wget https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v3_o RUN rm /usr/bin/python3 RUN ln -s /usr/bin/python3.7 /usr/bin/python3 RUN python3 get-pip.py -RUN pip3 install -i ${PIP_SOURCE} mxnet-cu112==1.9.1 numpy opencv-python pyyaml watchdog tensorboardX six +RUN pip3 install -i ${PIP_SOURCE} mxnet-cu112==1.9.1 numpy opencv-python pyyaml watchdog tensorboardX six scipy tqdm ENV DEBIAN_FRONTEND noninteractive RUN apt-get update && apt-get install -y libopencv-dev COPY . /darknet -RUN cp /darknet/make_train_test_darknet.sh /usr/bin/start.sh -RUN mkdir /img-man && cp /darknet/training-template.yaml /img-man/training-template.yaml RUN make -j + +RUN mkdir /img-man && cp /darknet/training-template.yaml /img-man/training-template.yaml && cp /darknet/mining/*-template.yaml /img-man +RUN echo "python3 /darknet/start.py" > /usr/bin/start.sh CMD bash /usr/bin/start.sh diff --git a/det-yolov4-training/darknet.py b/det-yolov4-tmi/darknet.py similarity index 100% rename from det-yolov4-training/darknet.py rename to det-yolov4-tmi/darknet.py diff --git a/det-yolov4-training/darknet_images.py b/det-yolov4-tmi/darknet_images.py similarity index 100% rename from det-yolov4-training/darknet_images.py rename to det-yolov4-tmi/darknet_images.py diff --git a/det-yolov4-training/darknet_video.py b/det-yolov4-tmi/darknet_video.py similarity index 100% rename from det-yolov4-training/darknet_video.py rename to det-yolov4-tmi/darknet_video.py diff --git a/det-yolov4-training/data/9k.tree b/det-yolov4-tmi/data/9k.tree similarity index 100% rename from det-yolov4-training/data/9k.tree rename to det-yolov4-tmi/data/9k.tree diff --git a/det-yolov4-training/data/coco.names b/det-yolov4-tmi/data/coco.names similarity index 100% rename from det-yolov4-training/data/coco.names rename to det-yolov4-tmi/data/coco.names diff --git a/det-yolov4-training/data/coco9k.map b/det-yolov4-tmi/data/coco9k.map similarity index 100% rename from det-yolov4-training/data/coco9k.map rename to det-yolov4-tmi/data/coco9k.map diff --git a/det-yolov4-training/data/goal.txt b/det-yolov4-tmi/data/goal.txt similarity index 100% rename from det-yolov4-training/data/goal.txt rename to det-yolov4-tmi/data/goal.txt diff --git a/det-yolov4-training/data/imagenet.labels.list b/det-yolov4-tmi/data/imagenet.labels.list similarity index 100% rename from det-yolov4-training/data/imagenet.labels.list rename to det-yolov4-tmi/data/imagenet.labels.list diff --git a/det-yolov4-training/data/imagenet.shortnames.list b/det-yolov4-tmi/data/imagenet.shortnames.list similarity index 100% rename from det-yolov4-training/data/imagenet.shortnames.list rename to det-yolov4-tmi/data/imagenet.shortnames.list diff --git a/det-yolov4-training/data/labels/make_labels.py b/det-yolov4-tmi/data/labels/make_labels.py similarity index 100% rename from det-yolov4-training/data/labels/make_labels.py rename to det-yolov4-tmi/data/labels/make_labels.py diff --git a/det-yolov4-training/data/openimages.names b/det-yolov4-tmi/data/openimages.names similarity index 100% rename from det-yolov4-training/data/openimages.names rename to det-yolov4-tmi/data/openimages.names diff --git a/det-yolov4-training/data/voc.names b/det-yolov4-tmi/data/voc.names similarity index 100% rename from det-yolov4-training/data/voc.names rename to det-yolov4-tmi/data/voc.names diff --git a/det-yolov4-training/image_yolov3.sh b/det-yolov4-tmi/image_yolov3.sh similarity index 100% rename from det-yolov4-training/image_yolov3.sh rename to det-yolov4-tmi/image_yolov3.sh diff --git a/det-yolov4-training/image_yolov4.sh b/det-yolov4-tmi/image_yolov4.sh similarity index 100% rename from det-yolov4-training/image_yolov4.sh rename to det-yolov4-tmi/image_yolov4.sh diff --git a/det-yolov4-training/img.txt b/det-yolov4-tmi/img.txt similarity index 100% rename from det-yolov4-training/img.txt rename to det-yolov4-tmi/img.txt diff --git a/det-yolov4-training/include/darknet.h b/det-yolov4-tmi/include/darknet.h similarity index 100% rename from det-yolov4-training/include/darknet.h rename to det-yolov4-tmi/include/darknet.h diff --git a/det-yolov4-training/include/yolo_v2_class.hpp b/det-yolov4-tmi/include/yolo_v2_class.hpp similarity index 100% rename from det-yolov4-training/include/yolo_v2_class.hpp rename to det-yolov4-tmi/include/yolo_v2_class.hpp diff --git a/det-yolov4-training/json_mjpeg_streams.sh b/det-yolov4-tmi/json_mjpeg_streams.sh similarity index 100% rename from det-yolov4-training/json_mjpeg_streams.sh rename to det-yolov4-tmi/json_mjpeg_streams.sh diff --git a/det-yolov4-training/make_train_test_darknet.sh b/det-yolov4-tmi/make_train_test_darknet.sh similarity index 100% rename from det-yolov4-training/make_train_test_darknet.sh rename to det-yolov4-tmi/make_train_test_darknet.sh diff --git a/det-yolov4-mining/.dockerignore b/det-yolov4-tmi/mining/.dockerignore similarity index 100% rename from det-yolov4-mining/.dockerignore rename to det-yolov4-tmi/mining/.dockerignore diff --git a/det-yolov4-mining/README.md b/det-yolov4-tmi/mining/README.md similarity index 100% rename from det-yolov4-mining/README.md rename to det-yolov4-tmi/mining/README.md diff --git a/det-yolov4-mining/active_learning/__init__.py b/det-yolov4-tmi/mining/active_learning/__init__.py similarity index 100% rename from det-yolov4-mining/active_learning/__init__.py rename to det-yolov4-tmi/mining/active_learning/__init__.py diff --git a/det-yolov4-mining/active_learning/apis/__init__.py b/det-yolov4-tmi/mining/active_learning/apis/__init__.py similarity index 100% rename from det-yolov4-mining/active_learning/apis/__init__.py rename to det-yolov4-tmi/mining/active_learning/apis/__init__.py diff --git a/det-yolov4-mining/active_learning/apis/al_api.py b/det-yolov4-tmi/mining/active_learning/apis/al_api.py similarity index 100% rename from det-yolov4-mining/active_learning/apis/al_api.py rename to det-yolov4-tmi/mining/active_learning/apis/al_api.py diff --git a/det-yolov4-mining/active_learning/apis/docker_api.py b/det-yolov4-tmi/mining/active_learning/apis/docker_api.py similarity index 100% rename from det-yolov4-mining/active_learning/apis/docker_api.py rename to det-yolov4-tmi/mining/active_learning/apis/docker_api.py diff --git a/det-yolov4-mining/active_learning/dataset/__init__.py b/det-yolov4-tmi/mining/active_learning/dataset/__init__.py similarity index 100% rename from det-yolov4-mining/active_learning/dataset/__init__.py rename to det-yolov4-tmi/mining/active_learning/dataset/__init__.py diff --git a/det-yolov4-mining/active_learning/dataset/datareader.py b/det-yolov4-tmi/mining/active_learning/dataset/datareader.py similarity index 100% rename from det-yolov4-mining/active_learning/dataset/datareader.py rename to det-yolov4-tmi/mining/active_learning/dataset/datareader.py diff --git a/det-yolov4-mining/active_learning/dataset/labeled_dataset.py b/det-yolov4-tmi/mining/active_learning/dataset/labeled_dataset.py similarity index 100% rename from det-yolov4-mining/active_learning/dataset/labeled_dataset.py rename to det-yolov4-tmi/mining/active_learning/dataset/labeled_dataset.py diff --git a/det-yolov4-mining/active_learning/dataset/unlabeled_dataset.py b/det-yolov4-tmi/mining/active_learning/dataset/unlabeled_dataset.py similarity index 100% rename from det-yolov4-mining/active_learning/dataset/unlabeled_dataset.py rename to det-yolov4-tmi/mining/active_learning/dataset/unlabeled_dataset.py diff --git a/det-yolov4-mining/active_learning/model_inference/__init__.py b/det-yolov4-tmi/mining/active_learning/model_inference/__init__.py similarity index 100% rename from det-yolov4-mining/active_learning/model_inference/__init__.py rename to det-yolov4-tmi/mining/active_learning/model_inference/__init__.py diff --git a/det-yolov4-mining/active_learning/model_inference/centernet.py b/det-yolov4-tmi/mining/active_learning/model_inference/centernet.py similarity index 100% rename from det-yolov4-mining/active_learning/model_inference/centernet.py rename to det-yolov4-tmi/mining/active_learning/model_inference/centernet.py diff --git a/det-yolov4-mining/active_learning/model_inference/yolo_models.py b/det-yolov4-tmi/mining/active_learning/model_inference/yolo_models.py similarity index 100% rename from det-yolov4-mining/active_learning/model_inference/yolo_models.py rename to det-yolov4-tmi/mining/active_learning/model_inference/yolo_models.py diff --git a/det-yolov4-mining/active_learning/strategy/__init__.py b/det-yolov4-tmi/mining/active_learning/strategy/__init__.py similarity index 100% rename from det-yolov4-mining/active_learning/strategy/__init__.py rename to det-yolov4-tmi/mining/active_learning/strategy/__init__.py diff --git a/det-yolov4-mining/active_learning/strategy/aldd.py b/det-yolov4-tmi/mining/active_learning/strategy/aldd.py similarity index 100% rename from det-yolov4-mining/active_learning/strategy/aldd.py rename to det-yolov4-tmi/mining/active_learning/strategy/aldd.py diff --git a/det-yolov4-mining/active_learning/strategy/aldd_yolo.py b/det-yolov4-tmi/mining/active_learning/strategy/aldd_yolo.py similarity index 100% rename from det-yolov4-mining/active_learning/strategy/aldd_yolo.py rename to det-yolov4-tmi/mining/active_learning/strategy/aldd_yolo.py diff --git a/det-yolov4-mining/active_learning/strategy/cald.py b/det-yolov4-tmi/mining/active_learning/strategy/cald.py similarity index 100% rename from det-yolov4-mining/active_learning/strategy/cald.py rename to det-yolov4-tmi/mining/active_learning/strategy/cald.py diff --git a/det-yolov4-mining/active_learning/strategy/data_augment.py b/det-yolov4-tmi/mining/active_learning/strategy/data_augment.py similarity index 100% rename from det-yolov4-mining/active_learning/strategy/data_augment.py rename to det-yolov4-tmi/mining/active_learning/strategy/data_augment.py diff --git a/det-yolov4-mining/active_learning/strategy/random_strategy.py b/det-yolov4-tmi/mining/active_learning/strategy/random_strategy.py similarity index 100% rename from det-yolov4-mining/active_learning/strategy/random_strategy.py rename to det-yolov4-tmi/mining/active_learning/strategy/random_strategy.py diff --git a/det-yolov4-mining/active_learning/utils/__init__.py b/det-yolov4-tmi/mining/active_learning/utils/__init__.py similarity index 100% rename from det-yolov4-mining/active_learning/utils/__init__.py rename to det-yolov4-tmi/mining/active_learning/utils/__init__.py diff --git a/det-yolov4-mining/active_learning/utils/al_log.py b/det-yolov4-tmi/mining/active_learning/utils/al_log.py similarity index 100% rename from det-yolov4-mining/active_learning/utils/al_log.py rename to det-yolov4-tmi/mining/active_learning/utils/al_log.py diff --git a/det-yolov4-mining/active_learning/utils/operator.py b/det-yolov4-tmi/mining/active_learning/utils/operator.py similarity index 100% rename from det-yolov4-mining/active_learning/utils/operator.py rename to det-yolov4-tmi/mining/active_learning/utils/operator.py diff --git a/det-yolov4-mining/al_main.py b/det-yolov4-tmi/mining/al_main.py similarity index 100% rename from det-yolov4-mining/al_main.py rename to det-yolov4-tmi/mining/al_main.py diff --git a/det-yolov4-mining/combined_class.txt b/det-yolov4-tmi/mining/combined_class.txt similarity index 100% rename from det-yolov4-mining/combined_class.txt rename to det-yolov4-tmi/mining/combined_class.txt diff --git a/det-yolov4-mining/docker_main.py b/det-yolov4-tmi/mining/docker_main.py similarity index 88% rename from det-yolov4-mining/docker_main.py rename to det-yolov4-tmi/mining/docker_main.py index 3eb4641..5f65377 100644 --- a/det-yolov4-mining/docker_main.py +++ b/det-yolov4-tmi/mining/docker_main.py @@ -9,8 +9,8 @@ import write_result -def _load_config() -> dict: - with open("/in/config.yaml", "r", encoding='utf8') as f: +def _load_config(config_file) -> dict: + with open(config_file, "r", encoding='utf8') as f: config = yaml.safe_load(f) # set default task id @@ -34,16 +34,17 @@ def _load_config() -> dict: if __name__ == '__main__': - config = _load_config() + config = _load_config("/in/config.yaml") - run_infer = int(config['run_infer']) - run_mining = int(config['run_mining']) + env_config = _load_config("/in/env.yaml") + run_infer = env_config['run_infer']=='true' + run_mining = env_config['run_mining']=='true' if not run_infer and not run_mining: raise ValueError('both run_infer and run_mining set to 0, abort') - monitor_process.run_mining = run_mining - monitor_process.run_infer = run_infer + monitor_process.run_mining = int(run_mining) + monitor_process.run_infer = int(run_infer) log_writer = LogWriter(monitor_path="/out/monitor.txt", monitor_pure_path="/out/monitor-log.txt", diff --git a/det-yolov4-mining/docker_readme.md b/det-yolov4-tmi/mining/docker_readme.md similarity index 100% rename from det-yolov4-mining/docker_readme.md rename to det-yolov4-tmi/mining/docker_readme.md diff --git a/det-yolov4-mining/infer-template.yaml b/det-yolov4-tmi/mining/infer-template.yaml similarity index 100% rename from det-yolov4-mining/infer-template.yaml rename to det-yolov4-tmi/mining/infer-template.yaml diff --git a/det-yolov4-mining/mining-template.yaml b/det-yolov4-tmi/mining/mining-template.yaml similarity index 95% rename from det-yolov4-mining/mining-template.yaml rename to det-yolov4-tmi/mining/mining-template.yaml index e02770f..aeee009 100644 --- a/det-yolov4-mining/mining-template.yaml +++ b/det-yolov4-tmi/mining/mining-template.yaml @@ -13,7 +13,7 @@ model_type: detection strategy: aldd_yolo image_height: 608 image_width: 608 -batch_size: 16 +batch_size: 4 anchors: '12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401' confidence_thresh: 0.1 nms_thresh: 0.45 @@ -23,4 +23,4 @@ max_boxes: 50 # model_params_path: [] # task_id: cycle-node-mined-0 # class_names: -# - expose_rubbish \ No newline at end of file +# - expose_rubbish diff --git a/det-yolov4-mining/monitor_process.py b/det-yolov4-tmi/mining/monitor_process.py similarity index 100% rename from det-yolov4-mining/monitor_process.py rename to det-yolov4-tmi/mining/monitor_process.py diff --git a/det-yolov4-mining/start.sh b/det-yolov4-tmi/mining/start.sh similarity index 100% rename from det-yolov4-mining/start.sh rename to det-yolov4-tmi/mining/start.sh diff --git a/det-yolov4-mining/test_api.py b/det-yolov4-tmi/mining/test_api.py similarity index 100% rename from det-yolov4-mining/test_api.py rename to det-yolov4-tmi/mining/test_api.py diff --git a/det-yolov4-mining/test_centernet.py b/det-yolov4-tmi/mining/test_centernet.py similarity index 100% rename from det-yolov4-mining/test_centernet.py rename to det-yolov4-tmi/mining/test_centernet.py diff --git a/det-yolov4-mining/tools/al_strategsy_union.py b/det-yolov4-tmi/mining/tools/al_strategsy_union.py similarity index 100% rename from det-yolov4-mining/tools/al_strategsy_union.py rename to det-yolov4-tmi/mining/tools/al_strategsy_union.py diff --git a/det-yolov4-mining/tools/imagenet_hard_negative.py b/det-yolov4-tmi/mining/tools/imagenet_hard_negative.py similarity index 100% rename from det-yolov4-mining/tools/imagenet_hard_negative.py rename to det-yolov4-tmi/mining/tools/imagenet_hard_negative.py diff --git a/det-yolov4-mining/tools/plot_dataset_class_hist.py b/det-yolov4-tmi/mining/tools/plot_dataset_class_hist.py similarity index 100% rename from det-yolov4-mining/tools/plot_dataset_class_hist.py rename to det-yolov4-tmi/mining/tools/plot_dataset_class_hist.py diff --git a/det-yolov4-mining/tools/visualize_aldd.py b/det-yolov4-tmi/mining/tools/visualize_aldd.py similarity index 100% rename from det-yolov4-mining/tools/visualize_aldd.py rename to det-yolov4-tmi/mining/tools/visualize_aldd.py diff --git a/det-yolov4-mining/tools/visualize_cald.py b/det-yolov4-tmi/mining/tools/visualize_cald.py similarity index 100% rename from det-yolov4-mining/tools/visualize_cald.py rename to det-yolov4-tmi/mining/tools/visualize_cald.py diff --git a/det-yolov4-mining/write_result.py b/det-yolov4-tmi/mining/write_result.py similarity index 100% rename from det-yolov4-mining/write_result.py rename to det-yolov4-tmi/mining/write_result.py diff --git a/det-yolov4-training/net_cam_v3.sh b/det-yolov4-tmi/net_cam_v3.sh similarity index 100% rename from det-yolov4-training/net_cam_v3.sh rename to det-yolov4-tmi/net_cam_v3.sh diff --git a/det-yolov4-training/net_cam_v4.sh b/det-yolov4-tmi/net_cam_v4.sh similarity index 100% rename from det-yolov4-training/net_cam_v4.sh rename to det-yolov4-tmi/net_cam_v4.sh diff --git a/det-yolov4-training/src/.editorconfig b/det-yolov4-tmi/src/.editorconfig similarity index 100% rename from det-yolov4-training/src/.editorconfig rename to det-yolov4-tmi/src/.editorconfig diff --git a/det-yolov4-training/src/activation_kernels.cu b/det-yolov4-tmi/src/activation_kernels.cu similarity index 100% rename from det-yolov4-training/src/activation_kernels.cu rename to det-yolov4-tmi/src/activation_kernels.cu diff --git a/det-yolov4-training/src/activation_layer.c b/det-yolov4-tmi/src/activation_layer.c similarity index 100% rename from det-yolov4-training/src/activation_layer.c rename to det-yolov4-tmi/src/activation_layer.c diff --git a/det-yolov4-training/src/activation_layer.h b/det-yolov4-tmi/src/activation_layer.h similarity index 100% rename from det-yolov4-training/src/activation_layer.h rename to det-yolov4-tmi/src/activation_layer.h diff --git a/det-yolov4-training/src/activations.c b/det-yolov4-tmi/src/activations.c similarity index 100% rename from det-yolov4-training/src/activations.c rename to det-yolov4-tmi/src/activations.c diff --git a/det-yolov4-training/src/activations.h b/det-yolov4-tmi/src/activations.h similarity index 100% rename from det-yolov4-training/src/activations.h rename to det-yolov4-tmi/src/activations.h diff --git a/det-yolov4-training/src/art.c b/det-yolov4-tmi/src/art.c similarity index 100% rename from det-yolov4-training/src/art.c rename to det-yolov4-tmi/src/art.c diff --git a/det-yolov4-training/src/avgpool_layer.c b/det-yolov4-tmi/src/avgpool_layer.c similarity index 100% rename from det-yolov4-training/src/avgpool_layer.c rename to det-yolov4-tmi/src/avgpool_layer.c diff --git a/det-yolov4-training/src/avgpool_layer.h b/det-yolov4-tmi/src/avgpool_layer.h similarity index 100% rename from det-yolov4-training/src/avgpool_layer.h rename to det-yolov4-tmi/src/avgpool_layer.h diff --git a/det-yolov4-training/src/avgpool_layer_kernels.cu b/det-yolov4-tmi/src/avgpool_layer_kernels.cu similarity index 100% rename from det-yolov4-training/src/avgpool_layer_kernels.cu rename to det-yolov4-tmi/src/avgpool_layer_kernels.cu diff --git a/det-yolov4-training/src/batchnorm_layer.c b/det-yolov4-tmi/src/batchnorm_layer.c similarity index 100% rename from det-yolov4-training/src/batchnorm_layer.c rename to det-yolov4-tmi/src/batchnorm_layer.c diff --git a/det-yolov4-training/src/batchnorm_layer.h b/det-yolov4-tmi/src/batchnorm_layer.h similarity index 100% rename from det-yolov4-training/src/batchnorm_layer.h rename to det-yolov4-tmi/src/batchnorm_layer.h diff --git a/det-yolov4-training/src/blas.c b/det-yolov4-tmi/src/blas.c similarity index 100% rename from det-yolov4-training/src/blas.c rename to det-yolov4-tmi/src/blas.c diff --git a/det-yolov4-training/src/blas.h b/det-yolov4-tmi/src/blas.h similarity index 100% rename from det-yolov4-training/src/blas.h rename to det-yolov4-tmi/src/blas.h diff --git a/det-yolov4-training/src/blas_kernels.cu b/det-yolov4-tmi/src/blas_kernels.cu similarity index 100% rename from det-yolov4-training/src/blas_kernels.cu rename to det-yolov4-tmi/src/blas_kernels.cu diff --git a/det-yolov4-training/src/box.c b/det-yolov4-tmi/src/box.c similarity index 100% rename from det-yolov4-training/src/box.c rename to det-yolov4-tmi/src/box.c diff --git a/det-yolov4-training/src/box.h b/det-yolov4-tmi/src/box.h similarity index 100% rename from det-yolov4-training/src/box.h rename to det-yolov4-tmi/src/box.h diff --git a/det-yolov4-training/src/captcha.c b/det-yolov4-tmi/src/captcha.c similarity index 100% rename from det-yolov4-training/src/captcha.c rename to det-yolov4-tmi/src/captcha.c diff --git a/det-yolov4-training/src/cifar.c b/det-yolov4-tmi/src/cifar.c similarity index 100% rename from det-yolov4-training/src/cifar.c rename to det-yolov4-tmi/src/cifar.c diff --git a/det-yolov4-training/src/classifier.c b/det-yolov4-tmi/src/classifier.c similarity index 100% rename from det-yolov4-training/src/classifier.c rename to det-yolov4-tmi/src/classifier.c diff --git a/det-yolov4-training/src/classifier.h b/det-yolov4-tmi/src/classifier.h similarity index 100% rename from det-yolov4-training/src/classifier.h rename to det-yolov4-tmi/src/classifier.h diff --git a/det-yolov4-training/src/coco.c b/det-yolov4-tmi/src/coco.c similarity index 100% rename from det-yolov4-training/src/coco.c rename to det-yolov4-tmi/src/coco.c diff --git a/det-yolov4-training/src/col2im.c b/det-yolov4-tmi/src/col2im.c similarity index 100% rename from det-yolov4-training/src/col2im.c rename to det-yolov4-tmi/src/col2im.c diff --git a/det-yolov4-training/src/col2im.h b/det-yolov4-tmi/src/col2im.h similarity index 100% rename from det-yolov4-training/src/col2im.h rename to det-yolov4-tmi/src/col2im.h diff --git a/det-yolov4-training/src/col2im_kernels.cu b/det-yolov4-tmi/src/col2im_kernels.cu similarity index 100% rename from det-yolov4-training/src/col2im_kernels.cu rename to det-yolov4-tmi/src/col2im_kernels.cu diff --git a/det-yolov4-training/src/compare.c b/det-yolov4-tmi/src/compare.c similarity index 100% rename from det-yolov4-training/src/compare.c rename to det-yolov4-tmi/src/compare.c diff --git a/det-yolov4-training/src/connected_layer.c b/det-yolov4-tmi/src/connected_layer.c similarity index 100% rename from det-yolov4-training/src/connected_layer.c rename to det-yolov4-tmi/src/connected_layer.c diff --git a/det-yolov4-training/src/connected_layer.h b/det-yolov4-tmi/src/connected_layer.h similarity index 100% rename from det-yolov4-training/src/connected_layer.h rename to det-yolov4-tmi/src/connected_layer.h diff --git a/det-yolov4-training/src/conv_lstm_layer.c b/det-yolov4-tmi/src/conv_lstm_layer.c similarity index 100% rename from det-yolov4-training/src/conv_lstm_layer.c rename to det-yolov4-tmi/src/conv_lstm_layer.c diff --git a/det-yolov4-training/src/conv_lstm_layer.h b/det-yolov4-tmi/src/conv_lstm_layer.h similarity index 100% rename from det-yolov4-training/src/conv_lstm_layer.h rename to det-yolov4-tmi/src/conv_lstm_layer.h diff --git a/det-yolov4-training/src/convolutional_kernels.cu b/det-yolov4-tmi/src/convolutional_kernels.cu similarity index 100% rename from det-yolov4-training/src/convolutional_kernels.cu rename to det-yolov4-tmi/src/convolutional_kernels.cu diff --git a/det-yolov4-training/src/convolutional_layer.c b/det-yolov4-tmi/src/convolutional_layer.c similarity index 100% rename from det-yolov4-training/src/convolutional_layer.c rename to det-yolov4-tmi/src/convolutional_layer.c diff --git a/det-yolov4-training/src/convolutional_layer.h b/det-yolov4-tmi/src/convolutional_layer.h similarity index 100% rename from det-yolov4-training/src/convolutional_layer.h rename to det-yolov4-tmi/src/convolutional_layer.h diff --git a/det-yolov4-training/src/cost_layer.c b/det-yolov4-tmi/src/cost_layer.c similarity index 100% rename from det-yolov4-training/src/cost_layer.c rename to det-yolov4-tmi/src/cost_layer.c diff --git a/det-yolov4-training/src/cost_layer.h b/det-yolov4-tmi/src/cost_layer.h similarity index 100% rename from det-yolov4-training/src/cost_layer.h rename to det-yolov4-tmi/src/cost_layer.h diff --git a/det-yolov4-training/src/cpu_gemm.c b/det-yolov4-tmi/src/cpu_gemm.c similarity index 100% rename from det-yolov4-training/src/cpu_gemm.c rename to det-yolov4-tmi/src/cpu_gemm.c diff --git a/det-yolov4-training/src/crnn_layer.c b/det-yolov4-tmi/src/crnn_layer.c similarity index 100% rename from det-yolov4-training/src/crnn_layer.c rename to det-yolov4-tmi/src/crnn_layer.c diff --git a/det-yolov4-training/src/crnn_layer.h b/det-yolov4-tmi/src/crnn_layer.h similarity index 100% rename from det-yolov4-training/src/crnn_layer.h rename to det-yolov4-tmi/src/crnn_layer.h diff --git a/det-yolov4-training/src/crop_layer.c b/det-yolov4-tmi/src/crop_layer.c similarity index 100% rename from det-yolov4-training/src/crop_layer.c rename to det-yolov4-tmi/src/crop_layer.c diff --git a/det-yolov4-training/src/crop_layer.h b/det-yolov4-tmi/src/crop_layer.h similarity index 100% rename from det-yolov4-training/src/crop_layer.h rename to det-yolov4-tmi/src/crop_layer.h diff --git a/det-yolov4-training/src/crop_layer_kernels.cu b/det-yolov4-tmi/src/crop_layer_kernels.cu similarity index 100% rename from det-yolov4-training/src/crop_layer_kernels.cu rename to det-yolov4-tmi/src/crop_layer_kernels.cu diff --git a/det-yolov4-training/src/csharp/CMakeLists.txt b/det-yolov4-tmi/src/csharp/CMakeLists.txt similarity index 100% rename from det-yolov4-training/src/csharp/CMakeLists.txt rename to det-yolov4-tmi/src/csharp/CMakeLists.txt diff --git a/det-yolov4-training/src/csharp/YoloCSharpWrapper.cs b/det-yolov4-tmi/src/csharp/YoloCSharpWrapper.cs similarity index 100% rename from det-yolov4-training/src/csharp/YoloCSharpWrapper.cs rename to det-yolov4-tmi/src/csharp/YoloCSharpWrapper.cs diff --git a/det-yolov4-training/src/dark_cuda.c b/det-yolov4-tmi/src/dark_cuda.c similarity index 100% rename from det-yolov4-training/src/dark_cuda.c rename to det-yolov4-tmi/src/dark_cuda.c diff --git a/det-yolov4-training/src/dark_cuda.h b/det-yolov4-tmi/src/dark_cuda.h similarity index 100% rename from det-yolov4-training/src/dark_cuda.h rename to det-yolov4-tmi/src/dark_cuda.h diff --git a/det-yolov4-training/src/darknet.c b/det-yolov4-tmi/src/darknet.c similarity index 100% rename from det-yolov4-training/src/darknet.c rename to det-yolov4-tmi/src/darknet.c diff --git a/det-yolov4-training/src/darkunistd.h b/det-yolov4-tmi/src/darkunistd.h similarity index 100% rename from det-yolov4-training/src/darkunistd.h rename to det-yolov4-tmi/src/darkunistd.h diff --git a/det-yolov4-training/src/data.c b/det-yolov4-tmi/src/data.c similarity index 100% rename from det-yolov4-training/src/data.c rename to det-yolov4-tmi/src/data.c diff --git a/det-yolov4-training/src/data.h b/det-yolov4-tmi/src/data.h similarity index 100% rename from det-yolov4-training/src/data.h rename to det-yolov4-tmi/src/data.h diff --git a/det-yolov4-training/src/deconvolutional_kernels.cu b/det-yolov4-tmi/src/deconvolutional_kernels.cu similarity index 100% rename from det-yolov4-training/src/deconvolutional_kernels.cu rename to det-yolov4-tmi/src/deconvolutional_kernels.cu diff --git a/det-yolov4-training/src/deconvolutional_layer.c b/det-yolov4-tmi/src/deconvolutional_layer.c similarity index 100% rename from det-yolov4-training/src/deconvolutional_layer.c rename to det-yolov4-tmi/src/deconvolutional_layer.c diff --git a/det-yolov4-training/src/deconvolutional_layer.h b/det-yolov4-tmi/src/deconvolutional_layer.h similarity index 100% rename from det-yolov4-training/src/deconvolutional_layer.h rename to det-yolov4-tmi/src/deconvolutional_layer.h diff --git a/det-yolov4-training/src/demo.c b/det-yolov4-tmi/src/demo.c similarity index 100% rename from det-yolov4-training/src/demo.c rename to det-yolov4-tmi/src/demo.c diff --git a/det-yolov4-training/src/demo.h b/det-yolov4-tmi/src/demo.h similarity index 100% rename from det-yolov4-training/src/demo.h rename to det-yolov4-tmi/src/demo.h diff --git a/det-yolov4-training/src/detection_layer.c b/det-yolov4-tmi/src/detection_layer.c similarity index 100% rename from det-yolov4-training/src/detection_layer.c rename to det-yolov4-tmi/src/detection_layer.c diff --git a/det-yolov4-training/src/detection_layer.h b/det-yolov4-tmi/src/detection_layer.h similarity index 100% rename from det-yolov4-training/src/detection_layer.h rename to det-yolov4-tmi/src/detection_layer.h diff --git a/det-yolov4-training/src/detector.c b/det-yolov4-tmi/src/detector.c similarity index 100% rename from det-yolov4-training/src/detector.c rename to det-yolov4-tmi/src/detector.c diff --git a/det-yolov4-training/src/dice.c b/det-yolov4-tmi/src/dice.c similarity index 100% rename from det-yolov4-training/src/dice.c rename to det-yolov4-tmi/src/dice.c diff --git a/det-yolov4-training/src/dropout_layer.c b/det-yolov4-tmi/src/dropout_layer.c similarity index 100% rename from det-yolov4-training/src/dropout_layer.c rename to det-yolov4-tmi/src/dropout_layer.c diff --git a/det-yolov4-training/src/dropout_layer.h b/det-yolov4-tmi/src/dropout_layer.h similarity index 100% rename from det-yolov4-training/src/dropout_layer.h rename to det-yolov4-tmi/src/dropout_layer.h diff --git a/det-yolov4-training/src/dropout_layer_kernels.cu b/det-yolov4-tmi/src/dropout_layer_kernels.cu similarity index 100% rename from det-yolov4-training/src/dropout_layer_kernels.cu rename to det-yolov4-tmi/src/dropout_layer_kernels.cu diff --git a/det-yolov4-training/src/gaussian_yolo_layer.c b/det-yolov4-tmi/src/gaussian_yolo_layer.c similarity index 100% rename from det-yolov4-training/src/gaussian_yolo_layer.c rename to det-yolov4-tmi/src/gaussian_yolo_layer.c diff --git a/det-yolov4-training/src/gaussian_yolo_layer.h b/det-yolov4-tmi/src/gaussian_yolo_layer.h similarity index 100% rename from det-yolov4-training/src/gaussian_yolo_layer.h rename to det-yolov4-tmi/src/gaussian_yolo_layer.h diff --git a/det-yolov4-training/src/gemm.c b/det-yolov4-tmi/src/gemm.c similarity index 100% rename from det-yolov4-training/src/gemm.c rename to det-yolov4-tmi/src/gemm.c diff --git a/det-yolov4-training/src/gemm.h b/det-yolov4-tmi/src/gemm.h similarity index 100% rename from det-yolov4-training/src/gemm.h rename to det-yolov4-tmi/src/gemm.h diff --git a/det-yolov4-training/src/getopt.c b/det-yolov4-tmi/src/getopt.c similarity index 100% rename from det-yolov4-training/src/getopt.c rename to det-yolov4-tmi/src/getopt.c diff --git a/det-yolov4-training/src/getopt.h b/det-yolov4-tmi/src/getopt.h similarity index 100% rename from det-yolov4-training/src/getopt.h rename to det-yolov4-tmi/src/getopt.h diff --git a/det-yolov4-training/src/gettimeofday.c b/det-yolov4-tmi/src/gettimeofday.c similarity index 100% rename from det-yolov4-training/src/gettimeofday.c rename to det-yolov4-tmi/src/gettimeofday.c diff --git a/det-yolov4-training/src/gettimeofday.h b/det-yolov4-tmi/src/gettimeofday.h similarity index 100% rename from det-yolov4-training/src/gettimeofday.h rename to det-yolov4-tmi/src/gettimeofday.h diff --git a/det-yolov4-training/src/go.c b/det-yolov4-tmi/src/go.c similarity index 100% rename from det-yolov4-training/src/go.c rename to det-yolov4-tmi/src/go.c diff --git a/det-yolov4-training/src/gru_layer.c b/det-yolov4-tmi/src/gru_layer.c similarity index 100% rename from det-yolov4-training/src/gru_layer.c rename to det-yolov4-tmi/src/gru_layer.c diff --git a/det-yolov4-training/src/gru_layer.h b/det-yolov4-tmi/src/gru_layer.h similarity index 100% rename from det-yolov4-training/src/gru_layer.h rename to det-yolov4-tmi/src/gru_layer.h diff --git a/det-yolov4-training/src/http_stream.cpp b/det-yolov4-tmi/src/http_stream.cpp similarity index 100% rename from det-yolov4-training/src/http_stream.cpp rename to det-yolov4-tmi/src/http_stream.cpp diff --git a/det-yolov4-training/src/http_stream.h b/det-yolov4-tmi/src/http_stream.h similarity index 100% rename from det-yolov4-training/src/http_stream.h rename to det-yolov4-tmi/src/http_stream.h diff --git a/det-yolov4-training/src/httplib.h b/det-yolov4-tmi/src/httplib.h similarity index 100% rename from det-yolov4-training/src/httplib.h rename to det-yolov4-tmi/src/httplib.h diff --git a/det-yolov4-training/src/im2col.c b/det-yolov4-tmi/src/im2col.c similarity index 100% rename from det-yolov4-training/src/im2col.c rename to det-yolov4-tmi/src/im2col.c diff --git a/det-yolov4-training/src/im2col.h b/det-yolov4-tmi/src/im2col.h similarity index 100% rename from det-yolov4-training/src/im2col.h rename to det-yolov4-tmi/src/im2col.h diff --git a/det-yolov4-training/src/im2col_kernels.cu b/det-yolov4-tmi/src/im2col_kernels.cu similarity index 100% rename from det-yolov4-training/src/im2col_kernels.cu rename to det-yolov4-tmi/src/im2col_kernels.cu diff --git a/det-yolov4-training/src/image.c b/det-yolov4-tmi/src/image.c similarity index 100% rename from det-yolov4-training/src/image.c rename to det-yolov4-tmi/src/image.c diff --git a/det-yolov4-training/src/image.h b/det-yolov4-tmi/src/image.h similarity index 100% rename from det-yolov4-training/src/image.h rename to det-yolov4-tmi/src/image.h diff --git a/det-yolov4-training/src/image_opencv.cpp b/det-yolov4-tmi/src/image_opencv.cpp similarity index 100% rename from det-yolov4-training/src/image_opencv.cpp rename to det-yolov4-tmi/src/image_opencv.cpp diff --git a/det-yolov4-training/src/image_opencv.h b/det-yolov4-tmi/src/image_opencv.h similarity index 100% rename from det-yolov4-training/src/image_opencv.h rename to det-yolov4-tmi/src/image_opencv.h diff --git a/det-yolov4-training/src/layer.c b/det-yolov4-tmi/src/layer.c similarity index 100% rename from det-yolov4-training/src/layer.c rename to det-yolov4-tmi/src/layer.c diff --git a/det-yolov4-training/src/layer.h b/det-yolov4-tmi/src/layer.h similarity index 100% rename from det-yolov4-training/src/layer.h rename to det-yolov4-tmi/src/layer.h diff --git a/det-yolov4-training/src/list.c b/det-yolov4-tmi/src/list.c similarity index 100% rename from det-yolov4-training/src/list.c rename to det-yolov4-tmi/src/list.c diff --git a/det-yolov4-training/src/list.h b/det-yolov4-tmi/src/list.h similarity index 100% rename from det-yolov4-training/src/list.h rename to det-yolov4-tmi/src/list.h diff --git a/det-yolov4-training/src/local_layer.c b/det-yolov4-tmi/src/local_layer.c similarity index 100% rename from det-yolov4-training/src/local_layer.c rename to det-yolov4-tmi/src/local_layer.c diff --git a/det-yolov4-training/src/local_layer.h b/det-yolov4-tmi/src/local_layer.h similarity index 100% rename from det-yolov4-training/src/local_layer.h rename to det-yolov4-tmi/src/local_layer.h diff --git a/det-yolov4-training/src/lstm_layer.c b/det-yolov4-tmi/src/lstm_layer.c similarity index 100% rename from det-yolov4-training/src/lstm_layer.c rename to det-yolov4-tmi/src/lstm_layer.c diff --git a/det-yolov4-training/src/lstm_layer.h b/det-yolov4-tmi/src/lstm_layer.h similarity index 100% rename from det-yolov4-training/src/lstm_layer.h rename to det-yolov4-tmi/src/lstm_layer.h diff --git a/det-yolov4-training/src/matrix.c b/det-yolov4-tmi/src/matrix.c similarity index 100% rename from det-yolov4-training/src/matrix.c rename to det-yolov4-tmi/src/matrix.c diff --git a/det-yolov4-training/src/matrix.h b/det-yolov4-tmi/src/matrix.h similarity index 100% rename from det-yolov4-training/src/matrix.h rename to det-yolov4-tmi/src/matrix.h diff --git a/det-yolov4-training/src/maxpool_layer.c b/det-yolov4-tmi/src/maxpool_layer.c similarity index 100% rename from det-yolov4-training/src/maxpool_layer.c rename to det-yolov4-tmi/src/maxpool_layer.c diff --git a/det-yolov4-training/src/maxpool_layer.h b/det-yolov4-tmi/src/maxpool_layer.h similarity index 100% rename from det-yolov4-training/src/maxpool_layer.h rename to det-yolov4-tmi/src/maxpool_layer.h diff --git a/det-yolov4-training/src/maxpool_layer_kernels.cu b/det-yolov4-tmi/src/maxpool_layer_kernels.cu similarity index 100% rename from det-yolov4-training/src/maxpool_layer_kernels.cu rename to det-yolov4-tmi/src/maxpool_layer_kernels.cu diff --git a/det-yolov4-training/src/network.c b/det-yolov4-tmi/src/network.c similarity index 100% rename from det-yolov4-training/src/network.c rename to det-yolov4-tmi/src/network.c diff --git a/det-yolov4-training/src/network.h b/det-yolov4-tmi/src/network.h similarity index 100% rename from det-yolov4-training/src/network.h rename to det-yolov4-tmi/src/network.h diff --git a/det-yolov4-training/src/network_kernels.cu b/det-yolov4-tmi/src/network_kernels.cu similarity index 100% rename from det-yolov4-training/src/network_kernels.cu rename to det-yolov4-tmi/src/network_kernels.cu diff --git a/det-yolov4-training/src/nightmare.c b/det-yolov4-tmi/src/nightmare.c similarity index 100% rename from det-yolov4-training/src/nightmare.c rename to det-yolov4-tmi/src/nightmare.c diff --git a/det-yolov4-training/src/normalization_layer.c b/det-yolov4-tmi/src/normalization_layer.c similarity index 100% rename from det-yolov4-training/src/normalization_layer.c rename to det-yolov4-tmi/src/normalization_layer.c diff --git a/det-yolov4-training/src/normalization_layer.h b/det-yolov4-tmi/src/normalization_layer.h similarity index 100% rename from det-yolov4-training/src/normalization_layer.h rename to det-yolov4-tmi/src/normalization_layer.h diff --git a/det-yolov4-training/src/option_list.c b/det-yolov4-tmi/src/option_list.c similarity index 100% rename from det-yolov4-training/src/option_list.c rename to det-yolov4-tmi/src/option_list.c diff --git a/det-yolov4-training/src/option_list.h b/det-yolov4-tmi/src/option_list.h similarity index 100% rename from det-yolov4-training/src/option_list.h rename to det-yolov4-tmi/src/option_list.h diff --git a/det-yolov4-training/src/parser.c b/det-yolov4-tmi/src/parser.c similarity index 100% rename from det-yolov4-training/src/parser.c rename to det-yolov4-tmi/src/parser.c diff --git a/det-yolov4-training/src/parser.h b/det-yolov4-tmi/src/parser.h similarity index 100% rename from det-yolov4-training/src/parser.h rename to det-yolov4-tmi/src/parser.h diff --git a/det-yolov4-training/src/region_layer.c b/det-yolov4-tmi/src/region_layer.c similarity index 100% rename from det-yolov4-training/src/region_layer.c rename to det-yolov4-tmi/src/region_layer.c diff --git a/det-yolov4-training/src/region_layer.h b/det-yolov4-tmi/src/region_layer.h similarity index 100% rename from det-yolov4-training/src/region_layer.h rename to det-yolov4-tmi/src/region_layer.h diff --git a/det-yolov4-training/src/reorg_layer.c b/det-yolov4-tmi/src/reorg_layer.c similarity index 100% rename from det-yolov4-training/src/reorg_layer.c rename to det-yolov4-tmi/src/reorg_layer.c diff --git a/det-yolov4-training/src/reorg_layer.h b/det-yolov4-tmi/src/reorg_layer.h similarity index 100% rename from det-yolov4-training/src/reorg_layer.h rename to det-yolov4-tmi/src/reorg_layer.h diff --git a/det-yolov4-training/src/reorg_old_layer.c b/det-yolov4-tmi/src/reorg_old_layer.c similarity index 100% rename from det-yolov4-training/src/reorg_old_layer.c rename to det-yolov4-tmi/src/reorg_old_layer.c diff --git a/det-yolov4-training/src/reorg_old_layer.h b/det-yolov4-tmi/src/reorg_old_layer.h similarity index 100% rename from det-yolov4-training/src/reorg_old_layer.h rename to det-yolov4-tmi/src/reorg_old_layer.h diff --git a/det-yolov4-training/src/representation_layer.c b/det-yolov4-tmi/src/representation_layer.c similarity index 100% rename from det-yolov4-training/src/representation_layer.c rename to det-yolov4-tmi/src/representation_layer.c diff --git a/det-yolov4-training/src/representation_layer.h b/det-yolov4-tmi/src/representation_layer.h similarity index 100% rename from det-yolov4-training/src/representation_layer.h rename to det-yolov4-tmi/src/representation_layer.h diff --git a/det-yolov4-training/src/rnn.c b/det-yolov4-tmi/src/rnn.c similarity index 100% rename from det-yolov4-training/src/rnn.c rename to det-yolov4-tmi/src/rnn.c diff --git a/det-yolov4-training/src/rnn_layer.c b/det-yolov4-tmi/src/rnn_layer.c similarity index 100% rename from det-yolov4-training/src/rnn_layer.c rename to det-yolov4-tmi/src/rnn_layer.c diff --git a/det-yolov4-training/src/rnn_layer.h b/det-yolov4-tmi/src/rnn_layer.h similarity index 100% rename from det-yolov4-training/src/rnn_layer.h rename to det-yolov4-tmi/src/rnn_layer.h diff --git a/det-yolov4-training/src/rnn_vid.c b/det-yolov4-tmi/src/rnn_vid.c similarity index 100% rename from det-yolov4-training/src/rnn_vid.c rename to det-yolov4-tmi/src/rnn_vid.c diff --git a/det-yolov4-training/src/route_layer.c b/det-yolov4-tmi/src/route_layer.c similarity index 100% rename from det-yolov4-training/src/route_layer.c rename to det-yolov4-tmi/src/route_layer.c diff --git a/det-yolov4-training/src/route_layer.h b/det-yolov4-tmi/src/route_layer.h similarity index 100% rename from det-yolov4-training/src/route_layer.h rename to det-yolov4-tmi/src/route_layer.h diff --git a/det-yolov4-training/src/sam_layer.c b/det-yolov4-tmi/src/sam_layer.c similarity index 100% rename from det-yolov4-training/src/sam_layer.c rename to det-yolov4-tmi/src/sam_layer.c diff --git a/det-yolov4-training/src/sam_layer.h b/det-yolov4-tmi/src/sam_layer.h similarity index 100% rename from det-yolov4-training/src/sam_layer.h rename to det-yolov4-tmi/src/sam_layer.h diff --git a/det-yolov4-training/src/scale_channels_layer.c b/det-yolov4-tmi/src/scale_channels_layer.c similarity index 100% rename from det-yolov4-training/src/scale_channels_layer.c rename to det-yolov4-tmi/src/scale_channels_layer.c diff --git a/det-yolov4-training/src/scale_channels_layer.h b/det-yolov4-tmi/src/scale_channels_layer.h similarity index 100% rename from det-yolov4-training/src/scale_channels_layer.h rename to det-yolov4-tmi/src/scale_channels_layer.h diff --git a/det-yolov4-training/src/shortcut_layer.c b/det-yolov4-tmi/src/shortcut_layer.c similarity index 100% rename from det-yolov4-training/src/shortcut_layer.c rename to det-yolov4-tmi/src/shortcut_layer.c diff --git a/det-yolov4-training/src/shortcut_layer.h b/det-yolov4-tmi/src/shortcut_layer.h similarity index 100% rename from det-yolov4-training/src/shortcut_layer.h rename to det-yolov4-tmi/src/shortcut_layer.h diff --git a/det-yolov4-training/src/softmax_layer.c b/det-yolov4-tmi/src/softmax_layer.c similarity index 100% rename from det-yolov4-training/src/softmax_layer.c rename to det-yolov4-tmi/src/softmax_layer.c diff --git a/det-yolov4-training/src/softmax_layer.h b/det-yolov4-tmi/src/softmax_layer.h similarity index 100% rename from det-yolov4-training/src/softmax_layer.h rename to det-yolov4-tmi/src/softmax_layer.h diff --git a/det-yolov4-training/src/super.c b/det-yolov4-tmi/src/super.c similarity index 100% rename from det-yolov4-training/src/super.c rename to det-yolov4-tmi/src/super.c diff --git a/det-yolov4-training/src/swag.c b/det-yolov4-tmi/src/swag.c similarity index 100% rename from det-yolov4-training/src/swag.c rename to det-yolov4-tmi/src/swag.c diff --git a/det-yolov4-training/src/tag.c b/det-yolov4-tmi/src/tag.c similarity index 100% rename from det-yolov4-training/src/tag.c rename to det-yolov4-tmi/src/tag.c diff --git a/det-yolov4-training/src/tree.c b/det-yolov4-tmi/src/tree.c similarity index 100% rename from det-yolov4-training/src/tree.c rename to det-yolov4-tmi/src/tree.c diff --git a/det-yolov4-training/src/tree.h b/det-yolov4-tmi/src/tree.h similarity index 100% rename from det-yolov4-training/src/tree.h rename to det-yolov4-tmi/src/tree.h diff --git a/det-yolov4-training/src/upsample_layer.c b/det-yolov4-tmi/src/upsample_layer.c similarity index 100% rename from det-yolov4-training/src/upsample_layer.c rename to det-yolov4-tmi/src/upsample_layer.c diff --git a/det-yolov4-training/src/upsample_layer.h b/det-yolov4-tmi/src/upsample_layer.h similarity index 100% rename from det-yolov4-training/src/upsample_layer.h rename to det-yolov4-tmi/src/upsample_layer.h diff --git a/det-yolov4-training/src/utils.c b/det-yolov4-tmi/src/utils.c similarity index 100% rename from det-yolov4-training/src/utils.c rename to det-yolov4-tmi/src/utils.c diff --git a/det-yolov4-training/src/utils.h b/det-yolov4-tmi/src/utils.h similarity index 100% rename from det-yolov4-training/src/utils.h rename to det-yolov4-tmi/src/utils.h diff --git a/det-yolov4-training/src/version.h b/det-yolov4-tmi/src/version.h similarity index 100% rename from det-yolov4-training/src/version.h rename to det-yolov4-tmi/src/version.h diff --git a/det-yolov4-training/src/version.h.in b/det-yolov4-tmi/src/version.h.in similarity index 100% rename from det-yolov4-training/src/version.h.in rename to det-yolov4-tmi/src/version.h.in diff --git a/det-yolov4-training/src/voxel.c b/det-yolov4-tmi/src/voxel.c similarity index 100% rename from det-yolov4-training/src/voxel.c rename to det-yolov4-tmi/src/voxel.c diff --git a/det-yolov4-training/src/writing.c b/det-yolov4-tmi/src/writing.c similarity index 100% rename from det-yolov4-training/src/writing.c rename to det-yolov4-tmi/src/writing.c diff --git a/det-yolov4-training/src/yolo.c b/det-yolov4-tmi/src/yolo.c similarity index 100% rename from det-yolov4-training/src/yolo.c rename to det-yolov4-tmi/src/yolo.c diff --git a/det-yolov4-training/src/yolo_console_dll.cpp b/det-yolov4-tmi/src/yolo_console_dll.cpp similarity index 100% rename from det-yolov4-training/src/yolo_console_dll.cpp rename to det-yolov4-tmi/src/yolo_console_dll.cpp diff --git a/det-yolov4-training/src/yolo_layer.c b/det-yolov4-tmi/src/yolo_layer.c similarity index 100% rename from det-yolov4-training/src/yolo_layer.c rename to det-yolov4-tmi/src/yolo_layer.c diff --git a/det-yolov4-training/src/yolo_layer.h b/det-yolov4-tmi/src/yolo_layer.h similarity index 100% rename from det-yolov4-training/src/yolo_layer.h rename to det-yolov4-tmi/src/yolo_layer.h diff --git a/det-yolov4-training/src/yolo_v2_class.cpp b/det-yolov4-tmi/src/yolo_v2_class.cpp similarity index 100% rename from det-yolov4-training/src/yolo_v2_class.cpp rename to det-yolov4-tmi/src/yolo_v2_class.cpp diff --git a/det-yolov4-tmi/start.py b/det-yolov4-tmi/start.py new file mode 100644 index 0000000..67da850 --- /dev/null +++ b/det-yolov4-tmi/start.py @@ -0,0 +1,24 @@ +import logging +import subprocess +import sys + +import yaml + + +def start() -> int: + with open("/in/env.yaml", "r", encoding='utf8') as f: + config = yaml.safe_load(f) + + logging.info(f"config is {config}") + if config['run_training']: + cmd = 'bash /darknet/make_train_test_darknet.sh' + cwd = '/darknet' + else: + cmd = 'python3 docker_main.py' + cwd = '/darknet/mining' + subprocess.run(cmd, check=True, shell=True, cwd=cwd) + + return 0 + +if __name__ == '__main__': + sys.exit(start()) diff --git a/det-yolov4-training/train.sh b/det-yolov4-tmi/train.sh similarity index 100% rename from det-yolov4-training/train.sh rename to det-yolov4-tmi/train.sh diff --git a/det-yolov4-training/train_watcher.py b/det-yolov4-tmi/train_watcher.py similarity index 100% rename from det-yolov4-training/train_watcher.py rename to det-yolov4-tmi/train_watcher.py diff --git a/det-yolov4-training/train_yolov3.sh b/det-yolov4-tmi/train_yolov3.sh similarity index 100% rename from det-yolov4-training/train_yolov3.sh rename to det-yolov4-tmi/train_yolov3.sh diff --git a/det-yolov4-training/training-template.yaml b/det-yolov4-tmi/training-template.yaml similarity index 96% rename from det-yolov4-training/training-template.yaml rename to det-yolov4-tmi/training-template.yaml index 17c32f7..5e75eaf 100644 --- a/det-yolov4-training/training-template.yaml +++ b/det-yolov4-tmi/training-template.yaml @@ -4,7 +4,7 @@ image_width: 608 learning_rate: 0.0013 max_batches: 20000 warmup_iterations: 1000 -batch: 64 +batch: 4 subdivisions: 32 shm_size: '16G' # class_names: diff --git a/det-yolov4-training/video_yolov3.sh b/det-yolov4-tmi/video_yolov3.sh similarity index 100% rename from det-yolov4-training/video_yolov3.sh rename to det-yolov4-tmi/video_yolov3.sh diff --git a/det-yolov4-training/video_yolov4.sh b/det-yolov4-tmi/video_yolov4.sh similarity index 100% rename from det-yolov4-training/video_yolov4.sh rename to det-yolov4-tmi/video_yolov4.sh diff --git a/det-yolov4-training/warm_up_training.py b/det-yolov4-tmi/warm_up_training.py similarity index 100% rename from det-yolov4-training/warm_up_training.py rename to det-yolov4-tmi/warm_up_training.py From 88a66e802789acc144eae7a4bb7a2c2ab27fe5df Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 3 Aug 2022 13:51:15 +0800 Subject: [PATCH 092/204] update readme --- README.MD | 149 +++----------- README_zh-CN.MD | 203 +++++++++++++++++++ det-mmdetection-tmi/mmdet/utils/util_ymir.py | 2 +- det-yolov4-tmi/cuda101.dockerfile | 1 + det-yolov4-tmi/mining/docker_main.py | 11 +- det-yolov5-tmi/utils/ymir_yolov5.py | 31 +-- 6 files changed, 258 insertions(+), 139 deletions(-) create mode 100644 README_zh-CN.MD diff --git a/README.MD b/README.MD index b03b375..085f419 100644 --- a/README.MD +++ b/README.MD @@ -1,4 +1,4 @@ -# ymir-executor 使用文档 +# ymir-executor documentation [English](./README.MD) | [简体中文](./README_zh-CN.MD) - [ymir](https://github.com/IndustryEssentials/ymir) @@ -7,28 +7,32 @@ - [yolov4](https://github.com/yzbx/ymir-executor-fork#det-yolov4-training) ``` - docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov4-cu111-tmi + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov4-cu112-tmi + + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov4-cu101-tmi ``` - [yolov5](https://github.com/yzbx/ymir-executor-fork#det-yolov5-tmi) ``` docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov5-cu111-tmi + + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov5-cu102-tmi ``` - [mmdetection](https://github.com/yzbx/ymir-executor-fork#det-mmdetection-tmi) ``` docker pull youdaoyzbx/ymir-executor:ymir1.1.0-mmdet-cu111-tmi + + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-mmdet-cu102-tmi ``` - [detectron2](https://github.com/yzbx/ymir-detectron2) - [change log](https://github.com/yzbx/ymir-detectron2/blob/master/README.md) - - ymir1.0.0的镜像与ymir1.1.0兼容 - ``` - docker pull youdaoyzbx/ymir-executor:ymir1.0.0-detectron2-tmi + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-detectron2-cu111-tmi ``` - [yolov7](https://github.com/yzbx/ymir-yolov7) @@ -39,42 +43,39 @@ docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov7-cu111-tmi ``` -## det-yolov4-training +## det-yolov4-tmi -- yolov4的训练镜像,采用mxnet与darknet框架,默认的 `Dockerfile` cuda版本为`10.1`,无法在高版本显卡如GTX3080/GTX3090上运行,需要修改dockerfile将cuda版本提升为11.1以上,参考 `cuda112.dockerfile` 进行构建。 +- yolov4 training, mining and infer docker image, use `mxnet` and `darknet` framework ``` - cd det-yolov4-training - # cuda101-yolov4-training - docker build -t ymir-executor/yolov4:cuda101-training -f Dockerfile . + cd det-yolov4-tmi + docker build -t ymir-executor/yolov4:cuda101-tmi -f cuda101.dockerfile . - # cuda112-yolov4-training - docker build -t ymir-executor/yolov4:cuda112-training -f cuda112.dockerfile . + docker build -t ymir-executor/yolov4:cuda112-tmi -f cuda112.dockerfile . ``` -## det-yolov4-mining - -- yolov4挖掘与推理镜像,与det-yolov4-training对应 +## det-yolov5-tmi -``` -cd det-yolov4-mining +- [change log](./det-yolov5-tmi/README.md) -docker build -t ymir-executor/yolov4:cuda101-mi -f Dockerfile . +- yolov5 training, mining and infer docker image, use `pytorch` framework -docker build -t ymir-executor/yolov4:cuda112-mi -f cuda112.dockerfile . ``` +cd det-yolov5-tmi +docker build -t ymir-executor/yolov5:cuda102-tmi -f cuda102.dockerfile . -## det-yolov5-tmi +docker build -t ymir-executor/yolov5:cuda111-tmi -f cuda111.dockerfile . +``` -- [change log](./det-yolov5-tmi/README.md) +## det-mmdetection-tmi -- yolov5训练、挖掘及推理镜像,镜像构建时会从github上下载权重, 如果访问github不稳定, 建议提前将模型权重下载并在构建时复制到镜像中. +- [change log](./det-mmdetection-tmi/README.md) ``` -cd det-yolov5-tmi -docker build -t ymir-executor/ymir1.1.0:cuda102-yolov5-tmi --build-arg SERVER_MODE=dev --build-arg YMIR=1.1.0 -f cuda102.dockerfile . +cd det-mmdetection-tmi +docker build -t ymir-executor/mmdet:cu102-tmi -f docker/Dockerfile.cuda102 . -docker build -t ymir-executor/ymir1.1.0:cuda111-yolov5-tmi --build-arg SERVER_MODE=dev --build-arg YMIR=1.1.0 -f cuda111.dockerfile . +docker build -t ymir-executor/mmdet:cu111-tmi -f docker/Dockerfile.cuda111 . ``` ## live-code-executor @@ -91,100 +92,12 @@ docker build -t ymir-executor/live-code:torch-tmi -f torch.dockerfile docker build -t ymir-executor/live-code:mxnet-tmi -f mxnet.dockerfile ``` -## det-mmdetection-tmi - -- [change log](./det-mmdetection-tmi/README.md) - -``` -cd det-mmdetection-tmi -docker build -t youdaoyzbx/ymir-executor:ymir1.1.0-mmdet-cu102-tmi -f docker/Dockerfile.cuda102 --build-arg SERVER_MODE=dev --build-arg YMIR=1.1.0 . - -docker build -t youdaoyzbx/ymir-executor:ymir1.1.0-mmdet-cu111-tmi -f docker/Dockerfile.cuda111 --build-arg SERVER_MODE=dev --build-arg YMIR=1.1.0 . -``` - -## 如何制作自己的ymir-executor - -- [ymir-executor 制作指南](https://github.com/IndustryEssentials/ymir/blob/dev/docs/ymir-dataset-zh-CN.md) - -- [ymir-executor-sdk](https://github.com/yzbx/ymir-executor-sdk) ymir镜像开发辅助库 - -## 如何导入预训练模型 - -- [如何导入外部模型](https://github.com/IndustryEssentials/ymir/blob/dev/docs/import-extra-models.md) - - - 通过ymir网页端的 `模型管理/模型列表/导入模型` 同样可以导入模型 - ---- - -# FAQ - -## apt 或 pip 安装慢或出错 - - - 采用国内源,如在docker file 中添加如下命令 - - ``` - RUN sed -i 's/archive.ubuntu.com/mirrors.tuna.tsinghua.edu.cn/g' /etc/apt/sources.list - - RUN pip config set global.index-url https://mirrors.aliyun.com/pypi/simple - ``` - -## docker build 的时候出错,找不到相应docker file或`COPY/ADD`时出错 - - - 回到项目根目录或docker file对应根目录,确保docker file 中`COPY/ADD`的文件与文件夹能够访问,以yolov5为例. - - ``` - cd ymir-executor/det-yolov5-tmi - - docker build -t ymir-executor/yolov5:cuda111 . -f cuda111.dockerfile --build-arg SERVER_MODE=dev - ``` - -## 镜像运行完`/in`与`/out`目录中的文件被清理 - - - ymir系统为节省空间,会在任务`成功结束`后删除其中不必要的文件,如果不想删除,可以在部署ymir时,修改文件`ymir/command/mir/tools/command_run_in_out.py`,注释其中的`_cleanup(work_dir=work_dir)`。注意需要重新构建后端镜像 - - ``` - cd ymir - docker build -t industryessentials/ymir-backend --build-arg PIP_SOURCE=https://pypi.mirrors.ustc.edu.cn/simple --build-arg SERVER_MODE='dev' -f Dockerfile.backend . - - docker-compose down -v && docker-compose up -d - ``` - -## 训练镜像如何调试 - - ![](./debug.png) - - - 先通过失败任务的tensorboard链接拿到任务id,如`t000000100000175245d1656933456` - - - 进入ymir部署目录 `ymir-workplace/sandbox/work_dir/TaskTypeTraining/t000000100000175245d1656933456/sub_task/t000000100000175245d1656933456`, `ls` 可以看到以下结果 - - ``` - # ls - in out task_config.yaml - ``` - - - 挂载目录并运行镜像``,注意需要将ymir部署目录挂载到镜像中 - - ``` - docker run -it --gpus all --shm-size 12G -v $PWD/in:/in -v $PWD/out:/out -v : bash - - # 以/home/ymir/ymir-workplace作为ymir部署目录为例 - docker run -it --gpus all --shm-size 12G -v $PWD/in:/in -v $PWD/out:/out -v /home/ymir/ymir-workplace:/home/ymir/ymir-workplace bash - ``` - - - 进入到docker 容器中后, 执行镜像默认的命令, 如dockerfile中写的 `CMD bash /usr/bin/start.sh` - - ``` - bash /usr/bin/start.sh - ``` - - - 推理与挖掘镜像调试同理,注意对应目录均为`ymir-workplace/sandbox/work_dir/TaskTypeMining` - -## 模型精度/速度如何权衡与提升 +## how to custom ymir-executor - - 模型精度与数据集大小、数据集质量、学习率、batch size、 迭代次数、模型结构、数据增强方式、损失函数等相关,在此不做展开,详情参考: +- [custom ymir-executor](https://github.com/IndustryEssentials/ymir/blob/dev/docs/ymir-dataset-zh-CN.md) - - [Object Detection in 20 Years: A Survey](https://arxiv.org/abs/1905.05055) +- [ymir-executor-sdk](https://github.com/yzbx/ymir-executor-sdk) - - [Paper with Code: Object Detection](https://paperswithcode.com/task/object-detection) +## how to import pretrained model weights - - [awesome object detection](https://github.com/amusi/awesome-object-detection) +- [import pretainted model weights](https://github.com/IndustryEssentials/ymir/blob/dev/docs/import-extra-models.md) diff --git a/README_zh-CN.MD b/README_zh-CN.MD new file mode 100644 index 0000000..d05ffab --- /dev/null +++ b/README_zh-CN.MD @@ -0,0 +1,203 @@ +# ymir-executor 使用文档 [English](./README.MD) | [简体中文](./README_zh-CN.MD) + +- [ymir](https://github.com/IndustryEssentials/ymir) + +## ymir-1.1.0 官方镜像 + +- [yolov4](https://github.com/yzbx/ymir-executor-fork#det-yolov4-training) + + ``` + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov4-cu112-tmi + + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov4-cu101-tmi + ``` + +- [yolov5](https://github.com/yzbx/ymir-executor-fork#det-yolov5-tmi) + + ``` + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov5-cu111-tmi + + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov5-cu102-tmi + ``` + +- [mmdetection](https://github.com/yzbx/ymir-executor-fork#det-mmdetection-tmi) + ``` + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-mmdet-cu111-tmi + + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-mmdet-cu102-tmi + ``` + +- [detectron2](https://github.com/yzbx/ymir-detectron2) + + - [change log](https://github.com/yzbx/ymir-detectron2/blob/master/README.md) + + ``` + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-detectron2-cu111-tmi + ``` + +- [yolov7](https://github.com/yzbx/ymir-yolov7) + + - [change log](https://github.com/yzbx/ymir-yolov7/blob/main/ymir/README.md) + + ``` + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov7-cu111-tmi + ``` + +## det-yolov4-tmi + +- yolov4的训练、挖掘与推理镜像,采用mxnet与darknet框架 + + ``` + cd det-yolov4-tmi + docker build -t ymir-executor/yolov4:cuda101-tmi -f cuda101.dockerfile . + + docker build -t ymir-executor/yolov4:cuda112-tmi -f cuda112.dockerfile . + ``` + +## det-yolov5-tmi + +- [change log](./det-yolov5-tmi/README.md) + +- yolov5训练、挖掘及推理镜像,采用pytorch框架,镜像构建时会从github上下载权重, 如果访问github不稳定, 建议提前将模型权重下载并在构建时复制到镜像中. + +``` +cd det-yolov5-tmi +docker build -t ymir-executor/yolov5:cuda102-tmi -f cuda102.dockerfile . + +docker build -t ymir-executor/yolov5:cuda111-tmi -f cuda111.dockerfile . +``` + +## det-mmdetection-tmi + +- [change log](./det-mmdetection-tmi/README.md) + +``` +cd det-mmdetection-tmi +docker build -t ymir-executor/mmdet:cu102-tmi -f docker/Dockerfile.cuda102 . + +docker build -t ymir-executor/mmdet:cu111-tmi -f docker/Dockerfile.cuda111 . +``` + +## live-code-executor + +- 可以通过`git_url`, `commit id` 或 `tag` 从网上clone代码到镜像并运行, 不推荐使用`branch`, 因为这样拉取的代码可能随时间变化, 实验结果不具备可重复性. + +- 参考 [live-code](https://github.com/IndustryEssentials/ymir-remote-git) + +``` +cd live-code-executor + +docker build -t ymir-executor/live-code:torch-tmi -f torch.dockerfile + +docker build -t ymir-executor/live-code:mxnet-tmi -f mxnet.dockerfile +``` + +## 如何制作自己的ymir-executor + +- [ymir-executor 制作指南](https://github.com/IndustryEssentials/ymir/blob/dev/docs/ymir-dataset-zh-CN.md) + +- [ymir-executor-sdk](https://github.com/yzbx/ymir-executor-sdk) ymir镜像开发辅助库 + +## 如何导入预训练模型 + +- [如何导入外部模型](https://github.com/IndustryEssentials/ymir/blob/dev/docs/import-extra-models.md) + + - 通过ymir网页端的 `模型管理/模型列表/导入模型` 同样可以导入模型 + +--- + +# FAQ + +## 关于cuda版本 + +- 推荐安装11.2以上的cuda版本, 使用11.1及以上的镜像 + +- GTX3080/GTX3090系统不支持11.1以下的cuda,只能使用cuda11.1及以上的镜像 + +## apt 或 pip 安装慢或出错 + +- 采用国内源,如在docker file 中添加如下命令 + + ``` + RUN sed -i 's/archive.ubuntu.com/mirrors.tuna.tsinghua.edu.cn/g' /etc/apt/sources.list + + RUN pip config set global.index-url https://mirrors.aliyun.com/pypi/simple + ``` + +## docker build 的时候出错,找不到相应docker file或`COPY/ADD`时出错 + +- 回到项目根目录或docker file对应根目录,确保docker file 中`COPY/ADD`的文件与文件夹能够访问,以yolov5为例. + + ``` + cd ymir-executor/det-yolov5-tmi + + docker build -t ymir-executor/yolov5:cuda111 . -f cuda111.dockerfile --build-arg SERVER_MODE=dev + ``` + +## 镜像运行完`/in`与`/out`目录中的文件被清理 + +- ymir系统为节省空间,会在任务`成功结束`后删除其中不必要的文件,如果不想删除,可以在部署ymir后,修改镜像`industryessentials/ymir-backend`中的`/usr/local/lib/python3.8/dist-packages/mir/tools/command_run_in_out.py`,注释其中所有的`_cleanup(work_dir=work_dir)`, 将修改覆盖到镜像`industryessentials/ymir-backend:latest`并重启ymir + + ``` + $ docker ps |grep backend + + 580c2f1dae1b industryessentials/ymir-backend ... + 5490c294982f industryessentials/ymir-backend-redis ... + + $ docker run -it --rm industryessentials/ymir-backend:latest bash + $ vim /usr/local/lib/python3.8/dist-packages/mir/tools/command_run_in_out.py + ``` + 注释所有的`_cleanup(work_dir=work_dir)`之后,不要立即退出容器,切换到另一个终端 + ``` + $ docker ps |grep backend + + dced73e51429 industryessentials/ymir-backend # use the latest one + 580c2f1dae1b industryessentials/ymir-backend ... + 5490c294982f industryessentials/ymir-backend-redis ... + + $ docker commit dced73e51429 industryessentials/ymir-backend:latest + ``` + 保存改动后,再切换回之前的终端,退出容器,重启ymir即可 + + +## 训练镜像如何调试 + +![](./debug.png) + +- 先修改镜像`industryessentials/ymir-backend`,注释其中所有的`_cleanup(work_dir=work_dir)`,保存`/in`和`/out`目录 + +- 再通过失败任务的tensorboard链接拿到任务id,如`t000000100000175245d1656933456` + +- 进入ymir部署目录 `ymir-workplace/sandbox/work_dir/TaskTypeTraining/t000000100000175245d1656933456/sub_task/t000000100000175245d1656933456`, `ls` 可以看到以下结果 + + ``` + # ls + in out task_config.yaml + ``` + +- 挂载目录并运行镜像``,注意需要将ymir部署目录挂载到镜像中 + + ``` + docker run -it --gpus all --shm-size 12G -v $PWD/in:/in -v $PWD/out:/out -v : bash + + # 以/home/ymir/ymir-workplace作为ymir部署目录为例 + docker run -it --gpus all --shm-size 12G -v $PWD/in:/in -v $PWD/out:/out -v /home/ymir/ymir-workplace:/home/ymir/ymir-workplace bash + ``` + +- 进入到docker 容器中后, 执行镜像默认的命令, 如dockerfile中写的 `CMD bash /usr/bin/start.sh` + + ``` + bash /usr/bin/start.sh + ``` + +- 推理与挖掘镜像调试同理,注意对应目录均为`ymir-workplace/sandbox/work_dir/TaskTypeMining` + +## 模型精度/速度如何权衡与提升 + +- 模型精度与数据集大小、数据集质量、学习率、batch size、 迭代次数、模型结构、数据增强方式、损失函数等相关,在此不做展开,详情参考: + + - [Object Detection in 20 Years: A Survey](https://arxiv.org/abs/1905.05055) + + - [Paper with Code: Object Detection](https://paperswithcode.com/task/object-detection) + + - [awesome object detection](https://github.com/amusi/awesome-object-detection) diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index aac1df8..810914b 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -3,13 +3,13 @@ """ import glob import logging -import yaml import os import os.path as osp from enum import IntEnum from typing import Any, List, Optional import mmcv +import yaml from easydict import EasyDict as edict from mmcv import Config from nptyping import NDArray, Shape, UInt8 diff --git a/det-yolov4-tmi/cuda101.dockerfile b/det-yolov4-tmi/cuda101.dockerfile index 5a5a2b5..53aa01b 100644 --- a/det-yolov4-tmi/cuda101.dockerfile +++ b/det-yolov4-tmi/cuda101.dockerfile @@ -16,6 +16,7 @@ RUN rm /usr/bin/python3 RUN ln -s /usr/bin/python3.7 /usr/bin/python3 RUN python3 get-pip.py RUN pip3 install -i ${PIP_SOURCE} mxnet-cu101==1.5.1 numpy opencv-python pyyaml watchdog tensorboardX six scipy tqdm + ENV DEBIAN_FRONTEND noninteractive RUN apt-get update && apt-get install -y libopencv-dev COPY . /darknet diff --git a/det-yolov4-tmi/mining/docker_main.py b/det-yolov4-tmi/mining/docker_main.py index 5f65377..359d066 100644 --- a/det-yolov4-tmi/mining/docker_main.py +++ b/det-yolov4-tmi/mining/docker_main.py @@ -36,15 +36,16 @@ def _load_config(config_file) -> dict: if __name__ == '__main__': config = _load_config("/in/config.yaml") - env_config = _load_config("/in/env.yaml") - run_infer = env_config['run_infer']=='true' - run_mining = env_config['run_mining']=='true' + with open("/in/env.yaml", "r", encoding='utf8') as f: + env_config = yaml.safe_load(f) + run_infer = int(env_config['run_infer']) + run_mining = int(env_config['run_mining']) if not run_infer and not run_mining: raise ValueError('both run_infer and run_mining set to 0, abort') - monitor_process.run_mining = int(run_mining) - monitor_process.run_infer = int(run_infer) + monitor_process.run_mining = run_mining + monitor_process.run_infer = run_infer log_writer = LogWriter(monitor_path="/out/monitor.txt", monitor_pure_path="/out/monitor-log.txt", diff --git a/det-yolov5-tmi/utils/ymir_yolov5.py b/det-yolov5-tmi/utils/ymir_yolov5.py index be78660..c2bd681 100644 --- a/det-yolov5-tmi/utils/ymir_yolov5.py +++ b/det-yolov5-tmi/utils/ymir_yolov5.py @@ -8,20 +8,19 @@ from enum import IntEnum from typing import Any, Dict, List, Tuple -from easydict import EasyDict as edict import numpy as np import torch import yaml -from nptyping import NDArray, Shape, UInt8 -from packaging.version import Version -from ymir_exc import env -from ymir_exc import result_writer as rw - +from easydict import EasyDict as edict from models.common import DetectMultiBackend from models.experimental import attempt_download +from nptyping import NDArray, Shape, UInt8 +from packaging.version import Version from utils.augmentations import letterbox from utils.general import check_img_size, non_max_suppression, scale_coords from utils.torch_utils import select_device +from ymir_exc import env +from ymir_exc import result_writer as rw class YmirStage(IntEnum): @@ -85,17 +84,16 @@ def get_weight_file(cfg: edict) -> str: else: model_params_path = cfg.param.model_params_path - model_dir = osp.join(cfg.ymir.input.root_dir, - cfg.ymir.input.models_dir) - model_params_path = [p for p in model_params_path if osp.exists(osp.join(model_dir, p))] + model_dir = cfg.ymir.input.models_dir + model_params_path = [osp.join(model_dir, p) for p in model_params_path if osp.exists(osp.join(model_dir, p)) and p.endswith('.pt')] # choose weight file by priority, best.pt > xxx.pt - if 'best.pt' in model_params_path: - return osp.join(model_dir, 'best.pt') - else: - for f in model_params_path: - if f.endswith('.pt'): - return osp.join(model_dir, f) + for p in model_params_path: + if p.endswith('best.pt'): + return p + + if len(model_params_path) > 0: + return max(model_params_path, key=osp.getctime) return "" @@ -142,6 +140,9 @@ def __init__(self, cfg: edict): def init_detector(self, device: torch.device) -> DetectMultiBackend: weights = get_weight_file(self.cfg) + if not weights: + raise Exception("no weights file specified!") + data_yaml = osp.join(self.cfg.ymir.output.root_dir, 'data.yaml') model = DetectMultiBackend(weights=weights, device=device, From 769be08f9c8f50ffb66dd7dfee0a0aba8b5c9f8b Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 3 Aug 2022 17:40:29 +0800 Subject: [PATCH 093/204] update readme and dockerfile --- README.MD | 14 ---------- README_zh-CN.MD | 17 +++++++++-- det-mmdetection-tmi/docker/Dockerfile.cuda102 | 9 ++---- det-mmdetection-tmi/docker/Dockerfile.cuda111 | 9 ++---- det-yolov5-tmi/cuda102.dockerfile | 11 ++------ det-yolov5-tmi/cuda111.dockerfile | 11 ++------ det-yolov5-tmi/utils/ymir_yolov5.py | 28 +++++++++---------- live-code-executor/mxnet.dockerfile | 2 +- live-code-executor/torch.dockerfile | 1 + 9 files changed, 41 insertions(+), 61 deletions(-) diff --git a/README.MD b/README.MD index 085f419..54a8b94 100644 --- a/README.MD +++ b/README.MD @@ -78,20 +78,6 @@ docker build -t ymir-executor/mmdet:cu102-tmi -f docker/Dockerfile.cuda102 . docker build -t ymir-executor/mmdet:cu111-tmi -f docker/Dockerfile.cuda111 . ``` -## live-code-executor - -- 可以通过`git_url`, `commit id` 或 `tag` 从网上clone代码到镜像并运行, 不推荐使用`branch`, 因为这样拉取的代码可能随时间变化, 实验结果不具备可重复性. - -- 参考 [live-code](https://github.com/IndustryEssentials/ymir-remote-git) - -``` -cd live-code-executor - -docker build -t ymir-executor/live-code:torch-tmi -f torch.dockerfile - -docker build -t ymir-executor/live-code:mxnet-tmi -f mxnet.dockerfile -``` - ## how to custom ymir-executor - [custom ymir-executor](https://github.com/IndustryEssentials/ymir/blob/dev/docs/ymir-dataset-zh-CN.md) diff --git a/README_zh-CN.MD b/README_zh-CN.MD index d05ffab..5505b24 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -131,7 +131,7 @@ docker build -t ymir-executor/live-code:mxnet-tmi -f mxnet.dockerfile ``` cd ymir-executor/det-yolov5-tmi - docker build -t ymir-executor/yolov5:cuda111 . -f cuda111.dockerfile --build-arg SERVER_MODE=dev + docker build -t ymir-executor/yolov5:cuda111 . -f cuda111.dockerfile ``` ## 镜像运行完`/in`与`/out`目录中的文件被清理 @@ -162,9 +162,16 @@ docker build -t ymir-executor/live-code:mxnet-tmi -f mxnet.dockerfile ## 训练镜像如何调试 +- 一般性的错误在`ymir-workplace/ymir-data/logs`下查看 + +``` +tail -f -n 100 ymir_controller.log +tail -f -n 100 ymir_app.log +``` + ![](./debug.png) -- 先修改镜像`industryessentials/ymir-backend`,注释其中所有的`_cleanup(work_dir=work_dir)`,保存`/in`和`/out`目录 +- 先修改镜像`industryessentials/ymir-backend`,注释其中所有的`_cleanup(work_dir=work_dir)`,保存`/in`和`/out`目录下的文件 - 再通过失败任务的tensorboard链接拿到任务id,如`t000000100000175245d1656933456` @@ -173,6 +180,12 @@ docker build -t ymir-executor/live-code:mxnet-tmi -f mxnet.dockerfile ``` # ls in out task_config.yaml + + # ls out + monitor.txt ymir-executor-out.log + + # ls in + assets config.yaml env.yaml ... ``` - 挂载目录并运行镜像``,注意需要将ymir部署目录挂载到镜像中 diff --git a/det-mmdetection-tmi/docker/Dockerfile.cuda102 b/det-mmdetection-tmi/docker/Dockerfile.cuda102 index dd73fb5..517acd0 100644 --- a/det-mmdetection-tmi/docker/Dockerfile.cuda102 +++ b/det-mmdetection-tmi/docker/Dockerfile.cuda102 @@ -6,7 +6,6 @@ FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel # mmcv>=1.3.17, <=1.5.0 ARG MMCV="1.4.3" -ARG SERVER_MODE=prod ARG YMIR="1.1.0" ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" @@ -27,13 +26,9 @@ RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC \ && rm -rf /var/lib/apt/lists/* # Install ymir-exc sdk and MMCV (no cu102/torch1.8.1, use torch1.8.0 instead) -RUN pip install --no-cache-dir --upgrade pip wheel setuptools && \ - if [ "${SERVER_MODE}" = "dev" ]; then \ - pip install "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ - else \ - pip install ymir-exc; \ - fi \ +RUN pip install --no-cache-dir --upgrade pip wheel setuptools \ && pip install --no-cache-dir mmcv-full==${MMCV} -f https://download.openmmlab.com/mmcv/dist/cu102/torch1.8.0/index.html \ + && pip install "git+https://github.com/yzbx/ymir-executor-sdk.git@ymir1.0.0" \ && conda clean --all # Install det-mmdetection-tmi diff --git a/det-mmdetection-tmi/docker/Dockerfile.cuda111 b/det-mmdetection-tmi/docker/Dockerfile.cuda111 index e4320d4..fbf2508 100644 --- a/det-mmdetection-tmi/docker/Dockerfile.cuda111 +++ b/det-mmdetection-tmi/docker/Dockerfile.cuda111 @@ -6,7 +6,6 @@ FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-runtime # mmcv>=1.3.17, <=1.5.0 ARG MMCV="1.4.3" -ARG SERVER_MODE=prod ARG YMIR="1.1.0" ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" @@ -25,13 +24,9 @@ RUN apt-get update && apt-get install -y build-essential ffmpeg libsm6 libxext6 && rm -rf /var/lib/apt/lists/* # Install ymir-exc sdk and MMCV -RUN pip install --no-cache-dir --upgrade pip wheel setuptools && \ - if [ "${SERVER_MODE}" = "dev" ]; then \ - pip install "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ - else \ - pip install ymir-exc; \ - fi \ +RUN pip install --no-cache-dir --upgrade pip wheel setuptools \ && pip install --no-cache-dir mmcv-full==${MMCV} -f https://download.openmmlab.com/mmcv/dist/cu111/torch1.8.0/index.html \ + && pip install "git+https://github.com/yzbx/ymir-executor-sdk.git@ymir1.0.0" \ && conda clean --all # Install det-mmdetection-tmi diff --git a/det-yolov5-tmi/cuda102.dockerfile b/det-yolov5-tmi/cuda102.dockerfile index 031859d..e8ab497 100644 --- a/det-yolov5-tmi/cuda102.dockerfile +++ b/det-yolov5-tmi/cuda102.dockerfile @@ -3,8 +3,6 @@ ARG CUDA="10.2" ARG CUDNN="7" FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-runtime -# support SERVER_MODE=dev or prod -ARG SERVER_MODE=prod # support YMIR=1.0.0, 1.1.0 or 1.2.0 ARG YMIR="1.1.0" @@ -16,16 +14,13 @@ ENV YMIR_VERSION=${YMIR} # Install linux package RUN apt-get update && apt-get install -y gnupg2 git libglib2.0-0 \ - libgl1-mesa-glx curl wget zip \ + libgl1-mesa-glx libsm6 libxext6 libxrender-dev curl wget zip vim \ + build-essential ninja-build \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* # install ymir-exc sdk -RUN if [ "${SERVER_MODE}" = "dev" ]; then \ - pip install "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ - else \ - pip install ymir-exc; \ - fi +RUN pip install "git+https://github.com/yzbx/ymir-executor-sdk.git@ymir1.0.0" # Copy file from host to docker and install requirements COPY . /app diff --git a/det-yolov5-tmi/cuda111.dockerfile b/det-yolov5-tmi/cuda111.dockerfile index c238bd5..6cfff64 100644 --- a/det-yolov5-tmi/cuda111.dockerfile +++ b/det-yolov5-tmi/cuda111.dockerfile @@ -4,8 +4,6 @@ ARG CUDNN="8" # cuda11.1 + pytorch 1.9.0 + cudnn8 not work!!! FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-runtime -# support SERVER_MODE=dev or prod -ARG SERVER_MODE=prod # support YMIR=1.0.0, 1.1.0 or 1.2.0 ARG YMIR="1.1.0" @@ -18,16 +16,13 @@ ENV YMIR_VERSION=$YMIR # Install linux package RUN apt-get update && apt-get install -y gnupg2 git libglib2.0-0 \ - libgl1-mesa-glx curl wget zip \ + libgl1-mesa-glx libsm6 libxext6 libxrender-dev curl wget zip vim \ + build-essential ninja-build \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* # install ymir-exc sdk -RUN if [ "${SERVER_MODE}" = "dev" ]; then \ - pip install "git+https://github.com/IndustryEssentials/ymir.git/@dev#egg=ymir-exc&subdirectory=docker_executor/sample_executor/ymir_exc"; \ - else \ - pip install ymir-exc; \ - fi +RUN pip install "git+https://github.com/yzbx/ymir-executor-sdk.git@ymir1.0.0" # Copy file from host to docker and install requirements COPY . /app diff --git a/det-yolov5-tmi/utils/ymir_yolov5.py b/det-yolov5-tmi/utils/ymir_yolov5.py index c2bd681..fec095b 100644 --- a/det-yolov5-tmi/utils/ymir_yolov5.py +++ b/det-yolov5-tmi/utils/ymir_yolov5.py @@ -85,7 +85,8 @@ def get_weight_file(cfg: edict) -> str: model_params_path = cfg.param.model_params_path model_dir = cfg.ymir.input.models_dir - model_params_path = [osp.join(model_dir, p) for p in model_params_path if osp.exists(osp.join(model_dir, p)) and p.endswith('.pt')] + model_params_path = [osp.join(model_dir, p) + for p in model_params_path if osp.exists(osp.join(model_dir, p)) and p.endswith('.pt')] # choose weight file by priority, best.pt > xxx.pt for p in model_params_path: @@ -233,15 +234,15 @@ def write_ymir_training_result(cfg: edict, weight_file: str = "") -> int: YMIR_VERSION = os.getenv('YMIR_VERSION', '1.2.0') if Version(YMIR_VERSION) >= Version('1.2.0'): - _write_latest_ymir_training_result(cfg, map50, epoch, weight_file) + _write_latest_ymir_training_result(cfg, float(map50), epoch, weight_file) else: - _write_ancient_ymir_training_result(cfg, map50) + _write_ancient_ymir_training_result(cfg, float(map50)) def _write_latest_ymir_training_result(cfg: edict, - map50: float, - epoch: int, - weight_file: str) -> int: + map50: float, + epoch: int, + weight_file: str) -> int: """ for ymir>=1.2.0 cfg: ymir config @@ -266,10 +267,10 @@ def _write_latest_ymir_training_result(cfg: edict, training_result_file = cfg.ymir.output.training_result_file if osp.exists(training_result_file): - with open(cfg.ymir.output.training_result_file, 'r') as f: + with open(training_result_file, 'r') as f: training_result = yaml.safe_load(stream=f) - map50 = max(training_result.get('map',0.0), map50) + map50 = max(training_result.get('map', 0.0), map50) rw.write_model_stage(stage_name=f"{model}_last_and_best", files=files, mAP=float(map50)) @@ -284,18 +285,17 @@ def _write_ancient_ymir_training_result(cfg: edict, map50: float) -> None: files = [osp.basename(f) for f in glob.glob(osp.join(cfg.ymir.output.models_dir, '*'))] training_result_file = cfg.ymir.output.training_result_file if osp.exists(training_result_file): - with open(cfg.ymir.output.training_result_file, 'r') as f: + with open(training_result_file, 'r') as f: training_result = yaml.safe_load(stream=f) training_result['model'] = files - training_result['map'] = max(training_result.get('map', 0), map50) + training_result['map'] = max(float(training_result.get('map', 0)), map50) else: training_result = { 'model': files, - 'map': map50, - 'stage_name': f'{cfg.param.model}' + 'map': float(map50), + 'stage_name': cfg.param.model } - env_config = env.get_current_env() - with open(env_config.output.training_result_file, 'w') as f: + with open(training_result_file, 'w') as f: yaml.safe_dump(training_result, f) diff --git a/live-code-executor/mxnet.dockerfile b/live-code-executor/mxnet.dockerfile index a12e29d..ed08fff 100644 --- a/live-code-executor/mxnet.dockerfile +++ b/live-code-executor/mxnet.dockerfile @@ -16,7 +16,7 @@ ENV PATH /opt/conda/bin:$PATH RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC && \ apt-get update && \ apt-get install -y git gcc wget curl zip libglib2.0-0 libgl1-mesa-glx \ - libsm6 libxext6 libxrender-dev && \ + libsm6 libxext6 libxrender-dev build-essential && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* && \ wget "${MINICONDA_URL}" -O miniconda.sh -q && \ diff --git a/live-code-executor/torch.dockerfile b/live-code-executor/torch.dockerfile index df43f85..4fd9a90 100644 --- a/live-code-executor/torch.dockerfile +++ b/live-code-executor/torch.dockerfile @@ -16,6 +16,7 @@ ENV LANG=C.UTF-8 # install linux package RUN apt-get update && apt-get install -y git curl wget zip gcc \ libglib2.0-0 libgl1-mesa-glx libsm6 libxext6 libxrender-dev \ + build-essential \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* From 8c6b3dcb70986a0de7cab8c846eb9dec09a2e738 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 8 Aug 2022 10:10:08 +0800 Subject: [PATCH 094/204] remove redundant --- det-yolov5-tmi/start.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py index 4f0648f..a483cc5 100644 --- a/det-yolov5-tmi/start.py +++ b/det-yolov5-tmi/start.py @@ -118,8 +118,6 @@ def _run_training(cfg: edict) -> None: logging.info(f'export onnx weight: {command}') subprocess.run(command.split(), check=True) - # save hyperparameter - shutil.copy(f'models/{model}.yaml', f'{models_dir}/{model}.yaml') write_ymir_training_result(cfg) # if task done, write 100% percent log monitor.write_monitor_logger(percent=1.0) From 19b40d15785e7b0f62dffd6954f0afa218c23893 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 8 Aug 2022 10:15:17 +0800 Subject: [PATCH 095/204] sort imports --- det-yolov5-tmi/start.py | 10 ++++------ det-yolov5-tmi/utils/ymir_yolov5.py | 2 +- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py index a483cc5..c46f6a0 100644 --- a/det-yolov5-tmi/start.py +++ b/det-yolov5-tmi/start.py @@ -1,20 +1,18 @@ import logging import os -import os.path as osp -import shutil import subprocess import sys import cv2 from easydict import EasyDict as edict +from utils.ymir_yolov5 import (YmirStage, YmirYolov5, convert_ymir_to_yolov5, + download_weight_file, get_merged_config, + get_weight_file, get_ymir_process, + write_ymir_training_result) from ymir_exc import dataset_reader as dr from ymir_exc import env, monitor from ymir_exc import result_writer as rw -from utils.ymir_yolov5 import (YmirStage, YmirYolov5, convert_ymir_to_yolov5, - download_weight_file, get_merged_config, - get_weight_file, get_ymir_process, write_ymir_training_result) - def start() -> int: cfg = get_merged_config() diff --git a/det-yolov5-tmi/utils/ymir_yolov5.py b/det-yolov5-tmi/utils/ymir_yolov5.py index fec095b..f63a1c4 100644 --- a/det-yolov5-tmi/utils/ymir_yolov5.py +++ b/det-yolov5-tmi/utils/ymir_yolov5.py @@ -6,7 +6,7 @@ import os.path as osp import shutil from enum import IntEnum -from typing import Any, Dict, List, Tuple +from typing import Any, List import numpy as np import torch From bf4e4ad7d25ba1f114a9c6943ef33f041e3650bd Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 8 Aug 2022 11:55:40 +0800 Subject: [PATCH 096/204] update train.py --- det-yolov5-tmi/train.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/det-yolov5-tmi/train.py b/det-yolov5-tmi/train.py index d28fdb8..ac9abd6 100644 --- a/det-yolov5-tmi/train.py +++ b/det-yolov5-tmi/train.py @@ -206,7 +206,7 @@ def lf(x): return (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear # Epochs start_epoch = ckpt['epoch'] + 1 if resume: - assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.' + assert start_epoch > 0, f'{weights} training from {start_epoch} to {epochs} epochs is finished, nothing to resume.' if epochs < start_epoch: LOGGER.info(f"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs.") epochs += ckpt['epoch'] # finetune additional epochs @@ -296,7 +296,7 @@ def lf(x): return (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear # ymir monitor if epoch % monitor_gap == 0: - percent = get_ymir_process(stage=YmirStage.TASK, p=epoch / (epochs - start_epoch + 1)) + percent = get_ymir_process(stage=YmirStage.TASK, p=(epoch - start_epoch + 1) / (epochs - start_epoch + 1)) monitor.write_monitor_logger(percent=percent) # Update image weights (optional, single-GPU only) @@ -523,12 +523,17 @@ def main(opt, callbacks=Callbacks()): check_git_status() check_requirements(exclude=['thop']) + ymir_cfg = get_merged_config() # Resume if opt.resume and not check_wandb_resume(opt) and not opt.evolve: # resume an interrupted run - ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path + ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run(ymir_cfg.ymir.input.root_dir) # specified or most recent path assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' - with open(Path(ckpt).parent.parent / 'opt.yaml', errors='ignore') as f: - opt = argparse.Namespace(**yaml.safe_load(f)) # replace + + opt_file = Path(ckpt).parent / 'opt.yaml' + if opt_file.exists(): + with open(opt_file, errors='ignore') as f: + opt = argparse.Namespace(**yaml.safe_load(f)) # replace + os.makedirs(opt.save_dir, exist_ok=True) opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate LOGGER.info(f'Resuming training from {ckpt}') else: @@ -539,8 +544,8 @@ def main(opt, callbacks=Callbacks()): if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve opt.project = str(ROOT / 'runs/evolve') opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) - ymir_cfg = get_merged_config() - opt.ymir_cfg = ymir_cfg + + opt.ymir_cfg = ymir_cfg # DDP mode device = select_device(opt.device, batch_size=opt.batch_size) From 17ee0ed9fba32fead01164bff21f1c38e6a7d15b Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 9 Aug 2022 12:30:04 +0800 Subject: [PATCH 097/204] update readme --- README.MD | 27 +++++++++++++++++++++++---- README_zh-CN.MD | 26 ++++++++++++++++++++++---- det-yolov5-tmi/train.py | 19 ++++++++++++------- 3 files changed, 57 insertions(+), 15 deletions(-) diff --git a/README.MD b/README.MD index 54a8b94..703bde6 100644 --- a/README.MD +++ b/README.MD @@ -2,6 +2,8 @@ - [ymir](https://github.com/IndustryEssentials/ymir) +- [wiki](https://github.com/yzbx/ymir-executor-fork/wiki) + ## ymir-1.1.0 official image - [yolov4](https://github.com/yzbx/ymir-executor-fork#det-yolov4-training) @@ -14,6 +16,8 @@ - [yolov5](https://github.com/yzbx/ymir-executor-fork#det-yolov5-tmi) + - [change log](./det-yolov5-tmi/README.md) + ``` docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov5-cu111-tmi @@ -21,6 +25,9 @@ ``` - [mmdetection](https://github.com/yzbx/ymir-executor-fork#det-mmdetection-tmi) + + - [change log](./det-mmdetection-tmi/README.md) + ``` docker pull youdaoyzbx/ymir-executor:ymir1.1.0-mmdet-cu111-tmi @@ -43,6 +50,22 @@ docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov7-cu111-tmi ``` +- overview + + | docker image | [finetune](https://github.com/yzbx/ymir-executor-fork/wiki/use-yolov5-to-finetune-or-training-model) | tensorboard | args | framework | onnx | pretrained weights | + | - | - | - | - | - | - | - | + | yolov4 | ? | ✔️ | ❌ | darknet + mxnet | ❌ | local | + | yolov5 | ✔️ | ✔️ | ✔️ | pytorch | ✔️ | local+online | + | yolov7 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | local+online | + | mmdetection | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | + | detectron2 | ? | ✔️ | ✔️ | pytorch | ❌ | online | + + - online pretrained weights may download through network + + - local pretrained weights have copied to docker images when building image + +--- + ## det-yolov4-tmi - yolov4 training, mining and infer docker image, use `mxnet` and `darknet` framework @@ -56,8 +79,6 @@ ## det-yolov5-tmi -- [change log](./det-yolov5-tmi/README.md) - - yolov5 training, mining and infer docker image, use `pytorch` framework ``` @@ -69,8 +90,6 @@ docker build -t ymir-executor/yolov5:cuda111-tmi -f cuda111.dockerfile . ## det-mmdetection-tmi -- [change log](./det-mmdetection-tmi/README.md) - ``` cd det-mmdetection-tmi docker build -t ymir-executor/mmdet:cu102-tmi -f docker/Dockerfile.cuda102 . diff --git a/README_zh-CN.MD b/README_zh-CN.MD index 5505b24..0460da4 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -2,6 +2,8 @@ - [ymir](https://github.com/IndustryEssentials/ymir) +- [wiki](https://github.com/yzbx/ymir-executor-fork/wiki) + ## ymir-1.1.0 官方镜像 - [yolov4](https://github.com/yzbx/ymir-executor-fork#det-yolov4-training) @@ -14,6 +16,8 @@ - [yolov5](https://github.com/yzbx/ymir-executor-fork#det-yolov5-tmi) + - [change log](./det-yolov5-tmi/README.md) + ``` docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov5-cu111-tmi @@ -21,6 +25,9 @@ ``` - [mmdetection](https://github.com/yzbx/ymir-executor-fork#det-mmdetection-tmi) + + - [change log](./det-mmdetection-tmi/README.md) + ``` docker pull youdaoyzbx/ymir-executor:ymir1.1.0-mmdet-cu111-tmi @@ -43,6 +50,21 @@ docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov7-cu111-tmi ``` +- 比较 + + | docker image | [finetune](https://github.com/yzbx/ymir-executor-fork/wiki/use-yolov5-to-finetune-or-training-model) | tensorboard | args | framework | onnx | pretrained weight | + | - | - | - | - | - | - | - | + | yolov4 | ? | ✔️ | ❌ | darknet + mxnet | ❌ | local | + | yolov5 | ✔️ | ✔️ | ✔️ | pytorch | ✔️ | local+online | + | yolov7 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | local+online | + | mmdetection | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | + | detectron2 | ? | ✔️ | ✔️ | pytorch | ❌ | online | + + - online 预训练权重可能在训练时通过网络下载 + + - local 预训练权重在构建镜像时复制到了镜像 +--- + ## det-yolov4-tmi - yolov4的训练、挖掘与推理镜像,采用mxnet与darknet框架 @@ -56,8 +78,6 @@ ## det-yolov5-tmi -- [change log](./det-yolov5-tmi/README.md) - - yolov5训练、挖掘及推理镜像,采用pytorch框架,镜像构建时会从github上下载权重, 如果访问github不稳定, 建议提前将模型权重下载并在构建时复制到镜像中. ``` @@ -69,8 +89,6 @@ docker build -t ymir-executor/yolov5:cuda111-tmi -f cuda111.dockerfile . ## det-mmdetection-tmi -- [change log](./det-mmdetection-tmi/README.md) - ``` cd det-mmdetection-tmi docker build -t ymir-executor/mmdet:cu102-tmi -f docker/Dockerfile.cuda102 . diff --git a/det-yolov5-tmi/train.py b/det-yolov5-tmi/train.py index ac9abd6..e8d3fe6 100644 --- a/det-yolov5-tmi/train.py +++ b/det-yolov5-tmi/train.py @@ -12,7 +12,6 @@ $ python path/to/train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch """ -from ymir_exc import monitor import argparse import math import os @@ -32,6 +31,7 @@ from torch.nn.parallel import DistributedDataParallel as DDP from torch.optim import SGD, Adam, AdamW, lr_scheduler from tqdm import tqdm +from ymir_exc import monitor FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory @@ -47,17 +47,22 @@ from utils.callbacks import Callbacks from utils.datasets import create_dataloader from utils.downloads import attempt_download -from utils.general import (LOGGER, check_dataset, check_file, check_git_status, check_img_size, check_requirements, - check_suffix, check_version, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, - intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle, - print_args, print_mutation, strip_optimizer) +from utils.general import (LOGGER, check_dataset, check_file, check_git_status, + check_img_size, check_requirements, check_suffix, + check_version, check_yaml, colorstr, get_latest_run, + increment_path, init_seeds, intersect_dicts, + labels_to_class_weights, labels_to_image_weights, + methods, one_cycle, print_args, print_mutation, + strip_optimizer) from utils.loggers import Loggers from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.loss import ComputeLoss from utils.metrics import fitness from utils.plots import plot_evolve, plot_labels -from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, select_device, torch_distributed_zero_first -from utils.ymir_yolov5 import write_ymir_training_result, YmirStage, get_ymir_process, get_merged_config +from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, + select_device, torch_distributed_zero_first) +from utils.ymir_yolov5 import (YmirStage, get_merged_config, get_ymir_process, + write_ymir_training_result) LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) From e28dfbcc9f8cceafbd2386a7560ef661b0348a0e Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 16 Aug 2022 16:31:24 +0800 Subject: [PATCH 098/204] remove ddp destroy_process_group() on train end, https://github.com/ultralytics/yolov5/pull/8935 --- README.MD | 2 +- README_zh-CN.MD | 2 +- det-yolov5-tmi/train.py | 3 --- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/README.MD b/README.MD index 703bde6..50ce730 100644 --- a/README.MD +++ b/README.MD @@ -58,7 +58,7 @@ | yolov5 | ✔️ | ✔️ | ✔️ | pytorch | ✔️ | local+online | | yolov7 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | local+online | | mmdetection | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | - | detectron2 | ? | ✔️ | ✔️ | pytorch | ❌ | online | + | detectron2 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | - online pretrained weights may download through network diff --git a/README_zh-CN.MD b/README_zh-CN.MD index 0460da4..6f375ec 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -58,7 +58,7 @@ | yolov5 | ✔️ | ✔️ | ✔️ | pytorch | ✔️ | local+online | | yolov7 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | local+online | | mmdetection | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | - | detectron2 | ? | ✔️ | ✔️ | pytorch | ❌ | online | + | detectron2 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | - online 预训练权重可能在训练时通过网络下载 diff --git a/det-yolov5-tmi/train.py b/det-yolov5-tmi/train.py index e8d3fe6..bc7a182 100644 --- a/det-yolov5-tmi/train.py +++ b/det-yolov5-tmi/train.py @@ -568,9 +568,6 @@ def main(opt, callbacks=Callbacks()): # Train if not opt.evolve: train(opt.hyp, opt, device, callbacks) - if WORLD_SIZE > 1 and RANK == 0: - LOGGER.info('Destroying process group... ') - dist.destroy_process_group() # Evolve hyperparameters (optional) else: From fb66ee90c0edaac12cf2fd841b5fb2c607444b43 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 22 Aug 2022 14:00:08 +0800 Subject: [PATCH 099/204] add multi-gpu data loader --- README_zh-CN.MD | 4 + det-yolov5-tmi/cuda111.dockerfile | 9 ++- det-yolov5-tmi/mining/ymir_mining.py | 110 +++++++++++++++++++++++++++ det-yolov5-tmi/start.py | 71 ++++++++++------- det-yolov5-tmi/utils/ymir_yolov5.py | 80 +++++++++---------- 5 files changed, 200 insertions(+), 74 deletions(-) create mode 100644 det-yolov5-tmi/mining/ymir_mining.py diff --git a/README_zh-CN.MD b/README_zh-CN.MD index 6f375ec..83e7f57 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -232,3 +232,7 @@ tail -f -n 100 ymir_app.log - [Paper with Code: Object Detection](https://paperswithcode.com/task/object-detection) - [awesome object detection](https://github.com/amusi/awesome-object-detection) + + - [voc2012 object detection leadboard](http://host.robots.ox.ac.uk:8080/leaderboard/displaylb.php?challengeid=11&compid=4) + + - [coco object detection leadboard](https://cocodataset.org/#detection-leaderboard) diff --git a/det-yolov5-tmi/cuda111.dockerfile b/det-yolov5-tmi/cuda111.dockerfile index 6cfff64..5d1e165 100644 --- a/det-yolov5-tmi/cuda111.dockerfile +++ b/det-yolov5-tmi/cuda111.dockerfile @@ -21,13 +21,14 @@ RUN apt-get update && apt-get install -y gnupg2 git libglib2.0-0 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -# install ymir-exc sdk -RUN pip install "git+https://github.com/yzbx/ymir-executor-sdk.git@ymir1.0.0" +COPY ./requirements.txt /workspace/ +# install ymir-exc sdk and requirements +RUN pip install "git+https://github.com/yzbx/ymir-executor-sdk.git@ymir1.0.0" \ + && pip install -r /workspace/requirements.txt # Copy file from host to docker and install requirements COPY . /app -RUN mkdir /img-man && mv /app/*-template.yaml /img-man/ \ - && pip install -r /app/requirements.txt +RUN mkdir /img-man && mv /app/*-template.yaml /img-man/ # Download pretrained weight and font file RUN cd /app && bash data/scripts/download_weights.sh \ diff --git a/det-yolov5-tmi/mining/ymir_mining.py b/det-yolov5-tmi/mining/ymir_mining.py new file mode 100644 index 0000000..5f7d319 --- /dev/null +++ b/det-yolov5-tmi/mining/ymir_mining.py @@ -0,0 +1,110 @@ +"""run.py: +img --(model)--> pred --(augmentation)--> (aug1_pred, aug2_pred, ..., augN_pred) +img --(augmentation)--> aug1_img --(model)--> pred1 +img --(augmentation)--> aug2_img --(model)--> pred2 +... +img --(augmentation)--> augN_img --(model)--> predN + +dataload(img) --(model)--> pred +dataload(img, pred) --(augmentation1)--> (aug1_img, aug1_pred) --(model)--> pred1 + +1. split dataset with DDP sampler +2. use DDP model to infer sampled dataloader +3. gather infer result + +""" +import os +import torch +import torch.distributed as dist +import torch.multiprocessing as mp +import torch.utils.data as td +from functools import partial +from typing import List, Any +import cv2 +from utils.augmentations import letterbox +import numpy as np +from ymir_exc.util import get_merged_config +from utils.ymir_yolov5 import YmirYolov5 + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + + +def load_image_file(img_file: str, img_size, stride): + img = cv2.imread(img_file) + img1 = letterbox(img, img_size, stride=stride, auto=True)[0] + + # preprocess: convert data format + img1 = img1.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img1 = np.ascontiguousarray(img1) + # img1 = torch.from_numpy(img1).to(self.device) + + img1 = img1 / 255 # 0 - 255 to 0.0 - 1.0 + # img1.unsqueeze_(dim=0) # expand for batch dim + return img1 + + +class YmirDataset(td.Dataset): + + def __init__(self, images: List[str], annotations: List[Any] = None, augmentations=None, load_fn=None): + super().__init__() + self.annotations = annotations + self.images = images + self.augmentations = augmentations + self.load_fn = load_fn + + def __getitem__(self, index): + + return self.load_fn(self.images[index]) + + def __len__(self): + return len(self.images) + + +def run(rank, size): + """ Distributed function to be implemented later. """ + cfg = get_merged_config() + model = YmirYolov5(cfg) + + load_fn = partial(load_image_file, img_size=model.img_size, stride=model.stride) + + with open(cfg.ymir.input.candidate_index_file, 'r') as f: + images = [line.strip() for line in f.readlines()] + + # origin dataset + origin_dataset = YmirDataset(images, load_fn=load_fn) + + sampler = None if rank == -1 else td.distributed.DistributedSampler(origin_dataset) + origin_dataset_loader = td.Dataloader(origin_dataset, + batch_size=4, + shuffle=False, + sampler=sampler, + num_workers=0, + pip_memory=True, + drop_last=False) + + + for batch in origin_dataset_loader: + + + +def init_process(rank, size, fn, backend='gloo'): + """ Initialize the distributed environment. """ + os.environ['MASTER_ADDR'] = '127.0.0.1' + os.environ['MASTER_PORT'] = '29500' + dist.init_process_group(backend, rank=rank, world_size=size) + fn(rank, size) + + +if __name__ == "__main__": + size = 2 + processes = [] + mp.set_start_method("spawn") + for rank in range(size): + p = mp.Process(target=init_process, args=(rank, size, run)) + p.start() + processes.append(p) + + for p in processes: + p.join() diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py index c46f6a0..01be78a 100644 --- a/det-yolov5-tmi/start.py +++ b/det-yolov5-tmi/start.py @@ -5,10 +5,10 @@ import cv2 from easydict import EasyDict as edict +from models.experimental import attempt_download from utils.ymir_yolov5 import (YmirStage, YmirYolov5, convert_ymir_to_yolov5, - download_weight_file, get_merged_config, - get_weight_file, get_ymir_process, - write_ymir_training_result) + get_merged_config, get_weight_file, + get_ymir_process, write_ymir_training_result) from ymir_exc import dataset_reader as dr from ymir_exc import env, monitor from ymir_exc import result_writer as rw @@ -51,7 +51,8 @@ def _run_training(cfg: edict) -> None: out_dir = cfg.ymir.output.root_dir convert_ymir_to_yolov5(cfg) logging.info(f'generate {out_dir}/data.yaml') - monitor.write_monitor_logger(percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0)) + monitor.write_monitor_logger( + percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0)) # 2. training model epochs = cfg.param.epochs @@ -75,7 +76,7 @@ def _run_training(cfg: edict) -> None: weights = get_weight_file(cfg) if not weights: # download pretrained weight - weights = download_weight_file(model) + weights = attempt_download(f'{model}.pt') models_dir = cfg.ymir.output.models_dir @@ -86,18 +87,18 @@ def _run_training(cfg: edict) -> None: device = gpu_id else: device = gpu_id - commands += f'-m torch.distributed.launch --nproc_per_node {gpu_count} --master_port {port}'.split() - - commands += ['train.py', - '--epochs', str(epochs), - '--batch-size', str(batch_size), - '--data', f'{out_dir}/data.yaml', - '--project', '/out', - '--cfg', f'models/{model}.yaml', - '--name', 'models', '--weights', weights, - '--img-size', str(img_size), - '--save-period', str(save_period), - '--device', device] + commands += f'-m torch.distributed.launch --nproc_per_node {gpu_count} --master_port {port}'.split( + ) + + commands += [ + 'train.py', '--epochs', + str(epochs), '--batch-size', + str(batch_size), '--data', f'{out_dir}/data.yaml', '--project', '/out', + '--cfg', f'models/{model}.yaml', '--name', 'models', '--weights', + weights, '--img-size', + str(img_size), '--save-period', + str(save_period), '--device', device + ] if gpu_count > 1 and sync_bn: commands.append("--sync-bn") @@ -108,7 +109,8 @@ def _run_training(cfg: edict) -> None: logging.info(f'start training: {commands}') subprocess.run(commands, check=True) - monitor.write_monitor_logger(percent=get_ymir_process(stage=YmirStage.TASK, p=1.0)) + monitor.write_monitor_logger( + percent=get_ymir_process(stage=YmirStage.TASK, p=1.0)) # 3. convert to onnx and save model weight to design directory opset = cfg.param.opset @@ -126,14 +128,20 @@ def _run_mining(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: out_dir = cfg.ymir.output.root_dir convert_ymir_to_yolov5(cfg) logging.info(f'generate {out_dir}/data.yaml') - monitor.write_monitor_logger(percent=get_ymir_process( - stage=YmirStage.PREPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) + monitor.write_monitor_logger( + percent=get_ymir_process(stage=YmirStage.PREPROCESS, + p=1.0, + task_idx=task_idx, + task_num=task_num)) command = 'python3 mining/mining_cald.py' logging.info(f'mining: {command}') subprocess.run(command.split(), check=True) - monitor.write_monitor_logger(percent=get_ymir_process( - stage=YmirStage.POSTPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) + monitor.write_monitor_logger( + percent=get_ymir_process(stage=YmirStage.POSTPROCESS, + p=1.0, + task_idx=task_idx, + task_num=task_num)) def _run_infer(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: @@ -141,8 +149,11 @@ def _run_infer(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: out_dir = cfg.ymir.output.root_dir convert_ymir_to_yolov5(cfg) logging.info(f'generate {out_dir}/data.yaml') - monitor.write_monitor_logger(percent=get_ymir_process( - stage=YmirStage.PREPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) + monitor.write_monitor_logger( + percent=get_ymir_process(stage=YmirStage.PREPROCESS, + p=1.0, + task_idx=task_idx, + task_num=task_num)) N = dr.items_count(env.DatasetType.CANDIDATE) infer_result = dict() @@ -157,12 +168,18 @@ def _run_infer(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: idx += 1 if idx % monitor_gap == 0: - percent = get_ymir_process(stage=YmirStage.TASK, p=idx / N, task_idx=task_idx, task_num=task_num) + percent = get_ymir_process(stage=YmirStage.TASK, + p=idx / N, + task_idx=task_idx, + task_num=task_num) monitor.write_monitor_logger(percent=percent) rw.write_infer_result(infer_result=infer_result) - monitor.write_monitor_logger(percent=get_ymir_process( - stage=YmirStage.PREPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) + monitor.write_monitor_logger( + percent=get_ymir_process(stage=YmirStage.PREPROCESS, + p=1.0, + task_idx=task_idx, + task_num=task_num)) if __name__ == '__main__': diff --git a/det-yolov5-tmi/utils/ymir_yolov5.py b/det-yolov5-tmi/utils/ymir_yolov5.py index f63a1c4..7257ed1 100644 --- a/det-yolov5-tmi/utils/ymir_yolov5.py +++ b/det-yolov5-tmi/utils/ymir_yolov5.py @@ -13,11 +13,11 @@ import yaml from easydict import EasyDict as edict from models.common import DetectMultiBackend -from models.experimental import attempt_download +from torch.nn.parallel import DistributedDataParallel as DDP from nptyping import NDArray, Shape, UInt8 from packaging.version import Version from utils.augmentations import letterbox -from utils.general import check_img_size, non_max_suppression, scale_coords +from utils.general import check_img_size, non_max_suppression, scale_coords, check_version from utils.torch_utils import select_device from ymir_exc import env from ymir_exc import result_writer as rw @@ -25,7 +25,7 @@ class YmirStage(IntEnum): PREPROCESS = 1 # convert dataset - TASK = 2 # training/mining/infer + TASK = 2 # training/mining/infer POSTPROCESS = 3 # export model @@ -85,8 +85,9 @@ def get_weight_file(cfg: edict) -> str: model_params_path = cfg.param.model_params_path model_dir = cfg.ymir.input.models_dir - model_params_path = [osp.join(model_dir, p) - for p in model_params_path if osp.exists(osp.join(model_dir, p)) and p.endswith('.pt')] + model_params_path = [ + osp.join(model_dir, p) for p in model_params_path if osp.exists(osp.join(model_dir, p)) and p.endswith('.pt') + ] # choose weight file by priority, best.pt > xxx.pt for p in model_params_path: @@ -99,12 +100,7 @@ def get_weight_file(cfg: edict) -> str: return "" -def download_weight_file(model_name): - weights = attempt_download(f'{model_name}.pt') - return weights - - -class YmirYolov5(): +class YmirYolov5(object): """ used for mining and inference to init detector and predict. """ @@ -145,11 +141,23 @@ def init_detector(self, device: torch.device) -> DetectMultiBackend: raise Exception("no weights file specified!") data_yaml = osp.join(self.cfg.ymir.output.root_dir, 'data.yaml') - model = DetectMultiBackend(weights=weights, - device=device, - dnn=False, # not use opencv dnn for onnx inference - data=data_yaml) # dataset.yaml path - + model = DetectMultiBackend( + weights=weights, + device=device, + dnn=False, # not use opencv dnn for onnx inference + data=data_yaml) # dataset.yaml path + + if ddp: + LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html + RANK = int(os.getenv('RANK', -1)) + # WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + cuda = device.type != 'cpu' + if cuda and RANK != -1: + if check_version(torch.__version__, '1.11.0'): + model.model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK, + static_graph=True) # type: ignore + else: + model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) # type: ignore return model def predict(self, img: CV_IMAGE) -> NDArray: @@ -200,23 +208,22 @@ def infer(self, img: CV_IMAGE) -> List[rw.Annotation]: for i in range(result.shape[0]): xmin, ymin, xmax, ymax, conf, cls = result[i, :6].tolist() - ann = rw.Annotation(class_name=self.class_names[int(cls)], score=conf, box=rw.Box( - x=int(xmin), y=int(ymin), w=int(xmax - xmin), h=int(ymax - ymin))) + ann = rw.Annotation(class_name=self.class_names[int(cls)], + score=conf, + box=rw.Box(x=int(xmin), y=int(ymin), w=int(xmax - xmin), h=int(ymax - ymin))) anns.append(ann) return anns -def convert_ymir_to_yolov5(cfg: edict) -> None: +def convert_ymir_to_yolov5(cfg: edict): """ convert ymir format dataset to yolov5 format generate data.yaml for training/mining/infer """ - data = dict(path=cfg.ymir.output.root_dir, - nc=len(cfg.param.class_names), - names=cfg.param.class_names) + data = dict(path=cfg.ymir.output.root_dir, nc=len(cfg.param.class_names), names=cfg.param.class_names) for split, prefix in zip(['train', 'val', 'test'], ['training', 'val', 'candidate']): src_file = getattr(cfg.ymir.input, f'{prefix}_index_file') if osp.exists(src_file): @@ -228,10 +235,7 @@ def convert_ymir_to_yolov5(cfg: edict) -> None: fw.write(yaml.safe_dump(data)) -def write_ymir_training_result(cfg: edict, - map50: float = 0.0, - epoch: int = 0, - weight_file: str = "") -> int: +def write_ymir_training_result(cfg: edict, map50: float = 0.0, epoch: int = 0, weight_file: str = ""): YMIR_VERSION = os.getenv('YMIR_VERSION', '1.2.0') if Version(YMIR_VERSION) >= Version('1.2.0'): _write_latest_ymir_training_result(cfg, float(map50), epoch, weight_file) @@ -239,10 +243,7 @@ def write_ymir_training_result(cfg: edict, _write_ancient_ymir_training_result(cfg, float(map50)) -def _write_latest_ymir_training_result(cfg: edict, - map50: float, - epoch: int, - weight_file: str) -> int: +def _write_latest_ymir_training_result(cfg: edict, map50: float, epoch: int, weight_file: str) -> int: """ for ymir>=1.2.0 cfg: ymir config @@ -257,13 +258,12 @@ def _write_latest_ymir_training_result(cfg: edict, model = cfg.param.model # use `rw.write_training_result` to save training result if weight_file: - rw.write_model_stage(stage_name=f"{model}_{epoch}", - files=[osp.basename(weight_file)], - mAP=float(map50)) + rw.write_model_stage(stage_name=f"{model}_{epoch}", files=[osp.basename(weight_file)], mAP=float(map50)) else: # save other files with - files = [osp.basename(f) for f in glob.glob(osp.join(cfg.ymir.output.models_dir, '*')) - if not f.endswith('.pt')] + ['last.pt', 'best.pt'] + files = [ + osp.basename(f) for f in glob.glob(osp.join(cfg.ymir.output.models_dir, '*')) if not f.endswith('.pt') + ] + ['last.pt', 'best.pt'] training_result_file = cfg.ymir.output.training_result_file if osp.exists(training_result_file): @@ -271,9 +271,7 @@ def _write_latest_ymir_training_result(cfg: edict, training_result = yaml.safe_load(stream=f) map50 = max(training_result.get('map', 0.0), map50) - rw.write_model_stage(stage_name=f"{model}_last_and_best", - files=files, - mAP=float(map50)) + rw.write_model_stage(stage_name=f"{model}_last_and_best", files=files, mAP=float(map50)) return 0 @@ -291,11 +289,7 @@ def _write_ancient_ymir_training_result(cfg: edict, map50: float) -> None: training_result['model'] = files training_result['map'] = max(float(training_result.get('map', 0)), map50) else: - training_result = { - 'model': files, - 'map': float(map50), - 'stage_name': cfg.param.model - } + training_result = {'model': files, 'map': float(map50), 'stage_name': cfg.param.model} with open(training_result_file, 'w') as f: yaml.safe_dump(training_result, f) From a5d81c8c7647870d9abeac7c279c147084a284c3 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 25 Aug 2022 18:59:58 +0800 Subject: [PATCH 100/204] add multi-gpu infer and mining --- det-mmdetection-tmi/ymir_infer.py | 2 +- det-yolov5-tmi/infer-template.yaml | 3 + det-yolov5-tmi/mining-template.yaml | 3 + det-yolov5-tmi/mining/mining_cald.py | 7 +- det-yolov5-tmi/mining/util.py | 132 +++++++++++++ det-yolov5-tmi/mining/ymir_infer.py | 128 +++++++++++++ det-yolov5-tmi/mining/ymir_mining.py | 255 +++++++++++++++++--------- det-yolov5-tmi/mypy.ini | 4 +- det-yolov5-tmi/start.py | 155 +++++++--------- det-yolov5-tmi/train.py | 22 +-- det-yolov5-tmi/training-template.yaml | 2 +- det-yolov5-tmi/utils/ymir_yolov5.py | 159 ++++++---------- 12 files changed, 569 insertions(+), 303 deletions(-) create mode 100644 det-yolov5-tmi/mining/util.py create mode 100644 det-yolov5-tmi/mining/ymir_infer.py diff --git a/det-mmdetection-tmi/ymir_infer.py b/det-mmdetection-tmi/ymir_infer.py index ecec19e..661b2ea 100644 --- a/det-mmdetection-tmi/ymir_infer.py +++ b/det-mmdetection-tmi/ymir_infer.py @@ -100,7 +100,7 @@ def __init__(self, cfg: edict): cfg_options = parse_option(options) if options else None # current infer can only use one gpu!!! - gpu_ids = cfg.param.gpu_id + gpu_ids = cfg.param.get('gpu_id', '0') gpu_id = gpu_ids.split(',')[0] # build the model from a config file and a checkpoint file self.model = init_detector( diff --git a/det-yolov5-tmi/infer-template.yaml b/det-yolov5-tmi/infer-template.yaml index 89dcc96..008375c 100644 --- a/det-yolov5-tmi/infer-template.yaml +++ b/det-yolov5-tmi/infer-template.yaml @@ -10,3 +10,6 @@ img_size: 640 conf_thres: 0.25 iou_thres: 0.45 +batch_size_per_gpu: 16 +num_workers_per_gpu: 4 +ddp: False diff --git a/det-yolov5-tmi/mining-template.yaml b/det-yolov5-tmi/mining-template.yaml index 20106dc..78e13e7 100644 --- a/det-yolov5-tmi/mining-template.yaml +++ b/det-yolov5-tmi/mining-template.yaml @@ -10,3 +10,6 @@ img_size: 640 conf_thres: 0.25 iou_thres: 0.45 +batch_size_per_gpu: 16 +num_workers_per_gpu: 4 +ddp: False diff --git a/det-yolov5-tmi/mining/mining_cald.py b/det-yolov5-tmi/mining/mining_cald.py index 0fde401..0e08660 100644 --- a/det-yolov5-tmi/mining/mining_cald.py +++ b/det-yolov5-tmi/mining/mining_cald.py @@ -6,17 +6,18 @@ from typing import Dict, List, Tuple import cv2 -from easydict import EasyDict as edict import numpy as np +from easydict import EasyDict as edict from nptyping import NDArray from scipy.stats import entropy from tqdm import tqdm from ymir_exc import dataset_reader as dr from ymir_exc import env, monitor from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, get_merged_config, get_ymir_process from mining.data_augment import cutout, horizontal_flip, intersect, resize, rotate -from utils.ymir_yolov5 import BBOX, CV_IMAGE, YmirYolov5, YmirStage, get_ymir_process, get_merged_config +from utils.ymir_yolov5 import BBOX, CV_IMAGE, YmirYolov5 def split_result(result: NDArray) -> Tuple[BBOX, NDArray, NDArray]: @@ -49,7 +50,7 @@ def __init__(self, cfg: edict): def mining(self) -> List: N = dr.items_count(env.DatasetType.CANDIDATE) - monitor_gap = max(1, N // 100) + monitor_gap = max(1, N // 1000) idx = -1 beta = 1.3 mining_result = [] diff --git a/det-yolov5-tmi/mining/util.py b/det-yolov5-tmi/mining/util.py new file mode 100644 index 0000000..41c7c73 --- /dev/null +++ b/det-yolov5-tmi/mining/util.py @@ -0,0 +1,132 @@ +"""run.py: +img --(model)--> pred --(augmentation)--> (aug1_pred, aug2_pred, ..., augN_pred) +img --(augmentation)--> aug1_img --(model)--> pred1 +img --(augmentation)--> aug2_img --(model)--> pred2 +... +img --(augmentation)--> augN_img --(model)--> predN + +dataload(img) --(model)--> pred +dataload(img, pred) --(augmentation1)--> (aug1_img, aug1_pred) --(model)--> pred1 + +1. split dataset with DDP sampler +2. use DDP model to infer sampled dataloader +3. gather infer result + +""" +import os +from typing import Any, List + +import cv2 +import numpy as np +import torch.utils.data as td +from scipy.stats import entropy +from torch.utils.data._utils.collate import default_collate + +from mining.data_augment import cutout, horizontal_flip, resize, rotate +from mining.mining_cald import get_ious +from utils.augmentations import letterbox + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + + +def preprocess(img, img_size, stride): + img1 = letterbox(img, img_size, stride=stride, auto=False)[0] + + # preprocess: convert data format + img1 = img1.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img1 = np.ascontiguousarray(img1) + # img1 = torch.from_numpy(img1).to(self.device) + + img1 = img1 / 255 # 0 - 255 to 0.0 - 1.0 + return img1 + + +def load_image_file(img_file: str, img_size, stride): + img = cv2.imread(img_file) + img1 = letterbox(img, img_size, stride=stride, auto=False)[0] + + # preprocess: convert data format + img1 = img1.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img1 = np.ascontiguousarray(img1) + # img1 = torch.from_numpy(img1).to(self.device) + + img1 = img1 / 255 # 0 - 255 to 0.0 - 1.0 + # img1.unsqueeze_(dim=0) # expand for batch dim + return dict(image=img1, origin_shape=img.shape[0:2], image_file=img_file) + # return img1 + + +def load_image_file_with_ann(image_info: dict, img_size, stride): + img_file = image_info['image_file'] + # xyxy(int) conf(float) class_index(int) + bboxes = image_info['results'][:, :4].astype(np.int32) + img = cv2.imread(img_file) + aug_dict = dict(flip=horizontal_flip, cutout=cutout, rotate=rotate, resize=resize) + + data = dict(image_file=img_file, origin_shape=img.shape[0:2]) + for key in aug_dict: + aug_img, aug_bbox = aug_dict[key](img, bboxes) + preprocess_aug_img = preprocess(aug_img, img_size, stride) + data[f'image_{key}'] = preprocess_aug_img + data[f'bboxes_{key}'] = aug_bbox + data[f'origin_shape_{key}'] = aug_img.shape[0:2] + + data.update(image_info) + return data + + +def collate_fn_with_fake_ann(batch): + new_batch = dict() + for key in ['flip', 'cutout', 'rotate', 'resize']: + new_batch[f'bboxes_{key}_list'] = [data[f'bboxes_{key}'] for data in batch] + + new_batch[f'image_{key}'] = default_collate([data[f'image_{key}'] for data in batch]) + + new_batch[f'origin_shape_{key}'] = default_collate([data[f'origin_shape_{key}'] for data in batch]) + + new_batch['results_list'] = [data['results'] for data in batch] + new_batch['image_file'] = [data['image_file'] for data in batch] + + return new_batch + + +def update_consistency(consistency, consistency_per_aug, beta, pred_bboxes_key, pred_conf_key, aug_bboxes_key, + aug_conf): + cls_scores_aug = 1 - pred_conf_key + cls_scores = 1 - aug_conf + + consistency_per_aug = 2.0 + ious = get_ious(pred_bboxes_key, aug_bboxes_key) + aug_idxs = np.argmax(ious, axis=0) + for origin_idx, aug_idx in enumerate(aug_idxs): + max_iou = ious[aug_idx, origin_idx] + if max_iou == 0: + consistency_per_aug = min(consistency_per_aug, beta) + p = cls_scores_aug[aug_idx] + q = cls_scores[origin_idx] + m = (p + q) / 2. + js = 0.5 * entropy(p, m) + 0.5 * entropy(q, m) + if js < 0: + js = 0 + consistency_box = max_iou + consistency_cls = 0.5 * (aug_conf[origin_idx] + pred_conf_key[aug_idx]) * (1 - js) + consistency_per_inst = abs(consistency_box + consistency_cls - beta) + consistency_per_aug = min(consistency_per_aug, consistency_per_inst.item()) + + consistency += consistency_per_aug + return consistency + + +class YmirDataset(td.Dataset): + def __init__(self, images: List[Any], load_fn=None): + super().__init__() + self.images = images + self.load_fn = load_fn + + def __getitem__(self, index): + return self.load_fn(self.images[index]) + + def __len__(self): + return len(self.images) diff --git a/det-yolov5-tmi/mining/ymir_infer.py b/det-yolov5-tmi/mining/ymir_infer.py new file mode 100644 index 0000000..9f459f0 --- /dev/null +++ b/det-yolov5-tmi/mining/ymir_infer.py @@ -0,0 +1,128 @@ +"""use fake DDP to infer +1. split data with `images_rank = images[RANK::WORLD_SIZE]` +2. save splited result with `torch.save(results, f'results_{RANK}.pt')` +3. merge result +""" +import os +import warnings +from functools import partial + +import torch +import torch.distributed as dist +import torch.utils.data as td +from tqdm import tqdm +from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, get_merged_config + +from mining.util import YmirDataset, load_image_file +from utils.general import scale_coords +from utils.ymir_yolov5 import YmirYolov5 + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + + +def run(ymir_cfg, ymir_yolov5): + # eg: gpu_id = 1,3,5,7 for LOCAL_RANK = 2, will use gpu 5. + gpu = int(ymir_yolov5.gpu_id.split(',')[LOCAL_RANK]) + device = torch.device('cuda', gpu) + ymir_yolov5.to(device) + + load_fn = partial(load_image_file, img_size=ymir_yolov5.img_size, stride=ymir_yolov5.stride) + batch_size_per_gpu = ymir_yolov5.batch_size_per_gpu + gpu_count = ymir_yolov5.gpu_count + num_workers_per_gpu = min([ + os.cpu_count() // max(gpu_count, 1), batch_size_per_gpu if batch_size_per_gpu > 1 else 0, + ymir_yolov5.num_workers_per_gpu + ]) + + with open(ymir_cfg.ymir.input.candidate_index_file, 'r') as f: + images = [line.strip() for line in f.readlines()] + + # origin dataset + images_rank = images[RANK::WORLD_SIZE] + origin_dataset = YmirDataset(images_rank, load_fn=load_fn) + origin_dataset_loader = td.DataLoader(origin_dataset, + batch_size=batch_size_per_gpu, + shuffle=False, + sampler=None, + num_workers=num_workers_per_gpu, + pin_memory=False, + drop_last=False) + + results = [] + dataset_size = len(images_rank) + monitor_gap = max(1, dataset_size // 1000 // batch_size_per_gpu) + pbar = tqdm(origin_dataset_loader) if RANK == 0 else origin_dataset_loader + for idx, batch in enumerate(pbar): + with torch.no_grad(): + pred = ymir_yolov5.forward(batch['image'].float().to(device), nms=True) + + if idx % monitor_gap == 0: + ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) + + preprocess_image_shape = batch['image'].shape[2:] + for idx, det in enumerate(pred): # per image + result_per_image = [] + if len(det): + origin_image_shape = (batch['origin_shape'][0][idx], batch['origin_shape'][1][idx]) + image_file = batch['image_file'][idx] + # Rescale boxes from img_size to img size + det[:, :4] = scale_coords(preprocess_image_shape, det[:, :4], origin_image_shape).round() + result_per_image.append(det) + results.append(dict(image_file=image_file, result=result_per_image)) + + torch.save(results, f'/out/infer_results_{RANK}.pt') + + +def main(): + ymir_cfg = get_merged_config() + ymir_yolov5 = YmirYolov5(ymir_cfg, task='infer') + + if LOCAL_RANK != -1: + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + gpu = int(ymir_yolov5.gpu_id.split(',')[LOCAL_RANK]) + torch.cuda.set_device(gpu) + torch.cuda.set_device(LOCAL_RANK) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + + run(ymir_cfg, ymir_yolov5) + + dist.barrier() + + if RANK in [0, -1]: + results = [] + for rank in range(WORLD_SIZE): + results.append(torch.load(f'/out/infer_results_{rank}.pt')) + + torch.save(results, '/out/infer_results_all_rank.pt') + + ymir_infer_result = dict() + for result in results: + for img_data in result: + img_file = img_data['image_file'] + anns = [] + for each_det in img_data['result']: + each_det_np = each_det.data.cpu().numpy() + for i in range(each_det_np.shape[0]): + xmin, ymin, xmax, ymax, conf, cls = each_det_np[i, :6].tolist() + if conf < ymir_yolov5.conf_thres: + continue + if int(cls) >= len(ymir_yolov5.class_names): + warnings.warn(f'class index {int(cls)} out of range for {ymir_yolov5.class_names}') + continue + ann = rw.Annotation(class_name=ymir_yolov5.class_names[int(cls)], + score=conf, + box=rw.Box(x=int(xmin), y=int(ymin), w=int(xmax - xmin), + h=int(ymax - ymin))) + anns.append(ann) + ymir_infer_result[img_file] = anns + rw.write_infer_result(infer_result=ymir_infer_result) + + print(f'rank: {RANK}, start destroy process group') + dist.destroy_process_group() + + +if __name__ == '__main__': + main() diff --git a/det-yolov5-tmi/mining/ymir_mining.py b/det-yolov5-tmi/mining/ymir_mining.py index 5f7d319..e58264b 100644 --- a/det-yolov5-tmi/mining/ymir_mining.py +++ b/det-yolov5-tmi/mining/ymir_mining.py @@ -1,29 +1,24 @@ -"""run.py: -img --(model)--> pred --(augmentation)--> (aug1_pred, aug2_pred, ..., augN_pred) -img --(augmentation)--> aug1_img --(model)--> pred1 -img --(augmentation)--> aug2_img --(model)--> pred2 -... -img --(augmentation)--> augN_img --(model)--> predN - -dataload(img) --(model)--> pred -dataload(img, pred) --(augmentation1)--> (aug1_img, aug1_pred) --(model)--> pred1 - -1. split dataset with DDP sampler -2. use DDP model to infer sampled dataloader -3. gather infer result - +"""use fake DDP to infer +1. split data with `images_rank = images[RANK::WORLD_SIZE]` +2. infer on the origin dataset +3. infer on the augmentation dataset +4. save splited mining result with `torch.save(results, f'/out/mining_results_{RANK}.pt')` +5. merge mining result """ import os +from functools import partial + +import numpy as np import torch import torch.distributed as dist -import torch.multiprocessing as mp import torch.utils.data as td -from functools import partial -from typing import List, Any -import cv2 -from utils.augmentations import letterbox -import numpy as np -from ymir_exc.util import get_merged_config +from tqdm import tqdm +from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, get_merged_config + +from mining.util import (YmirDataset, collate_fn_with_fake_ann, load_image_file, load_image_file_with_ann, + update_consistency) +from utils.general import scale_coords from utils.ymir_yolov5 import YmirYolov5 LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html @@ -31,80 +26,160 @@ WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) -def load_image_file(img_file: str, img_size, stride): - img = cv2.imread(img_file) - img1 = letterbox(img, img_size, stride=stride, auto=True)[0] - - # preprocess: convert data format - img1 = img1.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB - img1 = np.ascontiguousarray(img1) - # img1 = torch.from_numpy(img1).to(self.device) - - img1 = img1 / 255 # 0 - 255 to 0.0 - 1.0 - # img1.unsqueeze_(dim=0) # expand for batch dim - return img1 - - -class YmirDataset(td.Dataset): - - def __init__(self, images: List[str], annotations: List[Any] = None, augmentations=None, load_fn=None): - super().__init__() - self.annotations = annotations - self.images = images - self.augmentations = augmentations - self.load_fn = load_fn +def run(ymir_cfg, ymir_yolov5): + # eg: gpu_id = 1,3,5,7 for LOCAL_RANK = 2, will use gpu 5. + gpu = int(ymir_yolov5.gpu_id.split(',')[LOCAL_RANK]) + device = torch.device('cuda', gpu) + ymir_yolov5.to(device) - def __getitem__(self, index): + load_fn = partial(load_image_file, img_size=ymir_yolov5.img_size, stride=ymir_yolov5.stride) + batch_size_per_gpu = ymir_yolov5.batch_size_per_gpu + gpu_count = ymir_yolov5.gpu_count + num_workers_per_gpu = min([ + os.cpu_count() // max(gpu_count, 1), batch_size_per_gpu if batch_size_per_gpu > 1 else 0, + ymir_yolov5.num_workers_per_gpu + ]) - return self.load_fn(self.images[index]) - - def __len__(self): - return len(self.images) - - -def run(rank, size): - """ Distributed function to be implemented later. """ - cfg = get_merged_config() - model = YmirYolov5(cfg) - - load_fn = partial(load_image_file, img_size=model.img_size, stride=model.stride) - - with open(cfg.ymir.input.candidate_index_file, 'r') as f: + with open(ymir_cfg.ymir.input.candidate_index_file, 'r') as f: images = [line.strip() for line in f.readlines()] # origin dataset - origin_dataset = YmirDataset(images, load_fn=load_fn) - - sampler = None if rank == -1 else td.distributed.DistributedSampler(origin_dataset) - origin_dataset_loader = td.Dataloader(origin_dataset, - batch_size=4, + images_rank = images[RANK::WORLD_SIZE] + origin_dataset = YmirDataset(images_rank, load_fn=load_fn) + origin_dataset_loader = td.DataLoader(origin_dataset, + batch_size=batch_size_per_gpu, shuffle=False, - sampler=sampler, - num_workers=0, - pip_memory=True, + sampler=None, + num_workers=num_workers_per_gpu, + pin_memory=False, drop_last=False) - - for batch in origin_dataset_loader: - - - -def init_process(rank, size, fn, backend='gloo'): - """ Initialize the distributed environment. """ - os.environ['MASTER_ADDR'] = '127.0.0.1' - os.environ['MASTER_PORT'] = '29500' - dist.init_process_group(backend, rank=rank, world_size=size) - fn(rank, size) - - -if __name__ == "__main__": - size = 2 - processes = [] - mp.set_start_method("spawn") - for rank in range(size): - p = mp.Process(target=init_process, args=(rank, size, run)) - p.start() - processes.append(p) - - for p in processes: - p.join() + results = [] + mining_results = dict() + beta = 1.3 + dataset_size = len(images_rank) + pbar = tqdm(origin_dataset_loader) if RANK == 0 else origin_dataset_loader + for idx, batch in enumerate(pbar): + with torch.no_grad(): + pred = ymir_yolov5.forward(batch['image'].float().to(device), nms=True) + + ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx / dataset_size) + preprocess_image_shape = batch['image'].shape[2:] + for inner_idx, det in enumerate(pred): # per image + result_per_image = [] + if len(det): + origin_image_shape = (batch['origin_shape'][0][inner_idx], batch['origin_shape'][1][inner_idx]) + image_file = batch['image_file'][inner_idx] + # Rescale boxes from img_size to img size + det[:, :4] = scale_coords(preprocess_image_shape, det[:, :4], origin_image_shape).round() + result_per_image.append(det) + else: + mining_results[image_file] = -beta + continue + + results_per_image = torch.cat(result_per_image, dim=0).data.cpu().numpy() + results.append(dict(image_file=image_file, origin_shape=origin_image_shape, results=results_per_image)) + + aug_load_fn = partial(load_image_file_with_ann, img_size=ymir_yolov5.img_size, stride=ymir_yolov5.stride) + aug_dataset = YmirDataset(results, load_fn=aug_load_fn) + aug_dataset_loader = td.DataLoader(aug_dataset, + batch_size=batch_size_per_gpu, + shuffle=False, + sampler=None, + collate_fn=collate_fn_with_fake_ann, + num_workers=num_workers_per_gpu, + pin_memory=False, + drop_last=False) + + dataset_size = len(results) + monitor_gap = max(1, dataset_size // 1000 // batch_size_per_gpu) + pbar = tqdm(aug_dataset_loader) if RANK == 0 else aug_dataset_loader + for idx, batch in enumerate(pbar): + if idx % monitor_gap == 0: + ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) + + batch_consistency = [0.0 for _ in range(len(batch['image_file']))] + aug_keys = ['flip', 'cutout', 'rotate', 'resize'] + + pred_result = dict() + for key in aug_keys: + with torch.no_grad(): + pred_result[key] = ymir_yolov5.forward(batch[f'image_{key}'].float().to(device), nms=True) + + for inner_idx in range(len(batch['image_file'])): + for key in aug_keys: + preprocess_image_shape = batch[f'image_{key}'].shape[2:] + result_per_image = [] + det = pred_result[key][inner_idx] + if len(det) == 0: + # no result for the image with augmentation f'{key}' + batch_consistency[inner_idx] += beta + continue + + # prediction result from origin image + fake_ann = batch['results_list'][inner_idx] + # bboxes = fake_ann[:, :4].data.cpu().numpy().astype(np.int32) + conf = fake_ann[:, 4] + + # augmentated bbox from bboxes, aug_conf = conf + aug_bboxes_key = batch[f'bboxes_{key}_list'][inner_idx].astype(np.int32) + + origin_image_shape = (batch[f'origin_shape_{key}'][0][inner_idx], + batch[f'origin_shape_{key}'][1][inner_idx]) + + # Rescale boxes from img_size to img size + det[:, :4] = scale_coords(preprocess_image_shape, det[:, :4], origin_image_shape).round() + result_per_image.append(det) + + pred_bboxes_key = det[:, :4].data.cpu().numpy().astype(np.int32) + pred_conf_key = det[:, 4].data.cpu().numpy() + batch_consistency[inner_idx] = update_consistency(consistency=batch_consistency[inner_idx], + consistency_per_aug=2.0, + beta=beta, + pred_bboxes_key=pred_bboxes_key, + pred_conf_key=pred_conf_key, + aug_bboxes_key=aug_bboxes_key, + aug_conf=conf) + + for inner_idx in range(len(batch['image_file'])): + batch_consistency[inner_idx] /= len(aug_keys) + image_file = batch['image_file'][inner_idx] + mining_results[image_file] = batch_consistency[inner_idx] + + torch.save(mining_results, f'mining_results_{RANK}.pt') + + +def main(): + ymir_cfg = get_merged_config() + ymir_yolov5 = YmirYolov5(ymir_cfg, task='mining') + + if LOCAL_RANK != -1: + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + gpu = int(ymir_yolov5.gpu_id.split(',')[LOCAL_RANK]) + torch.cuda.set_device(gpu) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + + run(ymir_cfg, ymir_yolov5) + + # wait all process to save the mining result + dist.barrier() + + if RANK in [0, -1]: + results = [] + for rank in range(WORLD_SIZE): + results.append(torch.load(f'mining_results_{rank}.pt')) + + torch.save(results, 'mining_results_all_rank.pt') + + ymir_mining_result = [] + for result in results: + for img_file, score in result.items(): + ymir_mining_result.append((img_file, score)) + rw.write_mining_result(mining_result=ymir_mining_result) + + print(f'rank: {RANK}, start destroy process group') + dist.destroy_process_group() + + +if __name__ == '__main__': + main() diff --git a/det-yolov5-tmi/mypy.ini b/det-yolov5-tmi/mypy.ini index 85e751a..bb96738 100644 --- a/det-yolov5-tmi/mypy.ini +++ b/det-yolov5-tmi/mypy.ini @@ -1,8 +1,8 @@ [mypy] ignore_missing_imports = True disallow_untyped_defs = False -files = [mining/*.py, utils/ymir_yolov5.py, start.py, train.py] -exclude = [utils/general.py] +exclude = [utils/general.py, models/*.py, utils/*.py] +files = mining/*.py, utils/ymir_yolov5.py, start.py, train.py [mypy-torch.*] ignore_errors = True diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py index 01be78a..6b75b55 100644 --- a/det-yolov5-tmi/start.py +++ b/det-yolov5-tmi/start.py @@ -5,13 +5,14 @@ import cv2 from easydict import EasyDict as edict -from models.experimental import attempt_download -from utils.ymir_yolov5 import (YmirStage, YmirYolov5, convert_ymir_to_yolov5, - get_merged_config, get_weight_file, - get_ymir_process, write_ymir_training_result) from ymir_exc import dataset_reader as dr from ymir_exc import env, monitor from ymir_exc import result_writer as rw +from ymir_exc.util import (YmirStage, find_free_port, get_bool, get_merged_config, get_ymir_process, + write_ymir_training_result) + +from models.experimental import attempt_download +from utils.ymir_yolov5 import YmirYolov5, convert_ymir_to_yolov5, get_weight_file def start() -> int: @@ -51,27 +52,20 @@ def _run_training(cfg: edict) -> None: out_dir = cfg.ymir.output.root_dir convert_ymir_to_yolov5(cfg) logging.info(f'generate {out_dir}/data.yaml') - monitor.write_monitor_logger( - percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0)) + monitor.write_monitor_logger(percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0)) # 2. training model - epochs = cfg.param.epochs - batch_size = cfg.param.batch_size - model = cfg.param.model - img_size = cfg.param.img_size - save_period = max(1, min(epochs // 10, int(cfg.param.save_period))) - args_options = cfg.param.args_options - gpu_id = str(cfg.param.gpu_id) - gpu_count = len(gpu_id.split(',')) if gpu_id else 0 - port = int(cfg.param.get('port', 29500)) - sync_bn = cfg.param.get('sync_bn', False) - if isinstance(sync_bn, str): - if sync_bn.lower() in ['f', 'false']: - sync_bn = False - elif sync_bn.lower() in ['t', 'true']: - sync_bn = True - else: - raise Exception(f'unknown bool str sync_bn = {sync_bn}') + epochs: int = int(cfg.param.epochs) + batch_size_per_gpu: int = int(cfg.param.batch_size_per_gpu) + model: str = cfg.param.model + img_size: int = int(cfg.param.img_size) + save_period: int = max(1, min(epochs // 10, int(cfg.param.save_period))) + args_options: str = cfg.param.args_options + gpu_id: str = str(cfg.param.get('gpu_id', '0')) + gpu_count: int = len(gpu_id.split(',')) if gpu_id else 0 + batch_size: int = batch_size_per_gpu * max(1, gpu_count) + port: int = int(cfg.param.get('port', 29500)) + sync_bn: bool = get_bool(cfg, key='sync_bn', default_value=False) weights = get_weight_file(cfg) if not weights: @@ -79,38 +73,34 @@ def _run_training(cfg: edict) -> None: weights = attempt_download(f'{model}.pt') models_dir = cfg.ymir.output.models_dir + project = os.path.dirname(models_dir) + name = os.path.basename(models_dir) + assert os.path.join(project, name) == models_dir commands = ['python3'] - if gpu_count == 0: - device = 'cpu' - elif gpu_count == 1: - device = gpu_id - else: - device = gpu_id - commands += f'-m torch.distributed.launch --nproc_per_node {gpu_count} --master_port {port}'.split( - ) + device = gpu_id or 'cpu' + if gpu_count > 1: + commands.extend(f'-m torch.distributed.launch --nproc_per_node {gpu_count} --master_port {port}'.split()) - commands += [ + commands.extend([ 'train.py', '--epochs', str(epochs), '--batch-size', - str(batch_size), '--data', f'{out_dir}/data.yaml', '--project', '/out', - '--cfg', f'models/{model}.yaml', '--name', 'models', '--weights', - weights, '--img-size', + str(batch_size), '--data', f'{out_dir}/data.yaml', '--project', project, '--cfg', f'models/{model}.yaml', + '--name', name, '--weights', weights, '--img-size', str(img_size), '--save-period', str(save_period), '--device', device - ] + ]) if gpu_count > 1 and sync_bn: commands.append("--sync-bn") if args_options: - commands += args_options.split() + commands.extend(args_options.split()) logging.info(f'start training: {commands}') subprocess.run(commands, check=True) - monitor.write_monitor_logger( - percent=get_ymir_process(stage=YmirStage.TASK, p=1.0)) + monitor.write_monitor_logger(percent=get_ymir_process(stage=YmirStage.TASK, p=1.0)) # 3. convert to onnx and save model weight to design directory opset = cfg.param.opset @@ -118,68 +108,57 @@ def _run_training(cfg: edict) -> None: logging.info(f'export onnx weight: {command}') subprocess.run(command.split(), check=True) - write_ymir_training_result(cfg) + write_ymir_training_result(cfg, map50=0, files=[], id='last') # if task done, write 100% percent log monitor.write_monitor_logger(percent=1.0) def _run_mining(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: - # generate data.yaml for mining - out_dir = cfg.ymir.output.root_dir - convert_ymir_to_yolov5(cfg) - logging.info(f'generate {out_dir}/data.yaml') - monitor.write_monitor_logger( - percent=get_ymir_process(stage=YmirStage.PREPROCESS, - p=1.0, - task_idx=task_idx, - task_num=task_num)) + gpu_id: str = str(cfg.param.get('gpu_id', '0')) + gpu_count: int = len(gpu_id.split(',')) if gpu_id else 0 - command = 'python3 mining/mining_cald.py' + if gpu_count <= 1: + command = 'python3 mining/mining_cald.py' + else: + port = find_free_port() + command = f'python3 -m torch.distributed.launch --nproc_per_node {gpu_count} --master_port {port} mining/ymir_mining.py' # noqa logging.info(f'mining: {command}') subprocess.run(command.split(), check=True) monitor.write_monitor_logger( - percent=get_ymir_process(stage=YmirStage.POSTPROCESS, - p=1.0, - task_idx=task_idx, - task_num=task_num)) + percent=get_ymir_process(stage=YmirStage.POSTPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) def _run_infer(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: - # generate data.yaml for infer - out_dir = cfg.ymir.output.root_dir - convert_ymir_to_yolov5(cfg) - logging.info(f'generate {out_dir}/data.yaml') - monitor.write_monitor_logger( - percent=get_ymir_process(stage=YmirStage.PREPROCESS, - p=1.0, - task_idx=task_idx, - task_num=task_num)) - - N = dr.items_count(env.DatasetType.CANDIDATE) - infer_result = dict() - model = YmirYolov5(cfg) - idx = -1 - - monitor_gap = max(1, N // 100) - for asset_path, _ in dr.item_paths(dataset_type=env.DatasetType.CANDIDATE): - img = cv2.imread(asset_path) - result = model.infer(img) - infer_result[asset_path] = result - idx += 1 - - if idx % monitor_gap == 0: - percent = get_ymir_process(stage=YmirStage.TASK, - p=idx / N, - task_idx=task_idx, - task_num=task_num) - monitor.write_monitor_logger(percent=percent) - - rw.write_infer_result(infer_result=infer_result) + gpu_id: str = str(cfg.param.get('gpu_id', '0')) + gpu_count: int = len(gpu_id.split(',')) if gpu_id else 0 + + if gpu_count <= 1: + N = dr.items_count(env.DatasetType.CANDIDATE) + infer_result = dict() + model = YmirYolov5(cfg) + idx = -1 + + monitor_gap = max(1, N // 100) + for asset_path, _ in dr.item_paths(dataset_type=env.DatasetType.CANDIDATE): + img = cv2.imread(asset_path) + result = model.infer(img) + infer_result[asset_path] = result + idx += 1 + + if idx % monitor_gap == 0: + percent = get_ymir_process(stage=YmirStage.TASK, p=idx / N, task_idx=task_idx, task_num=task_num) + monitor.write_monitor_logger(percent=percent) + + rw.write_infer_result(infer_result=infer_result) + else: + port = find_free_port() + command = f'python3 -m torch.distributed.launch --nproc_per_node {gpu_count} --master_port {port} mining/ymir_infer.py' # noqa + + logging.info(f'mining: {command}') + subprocess.run(command.split(), check=True) + monitor.write_monitor_logger( - percent=get_ymir_process(stage=YmirStage.PREPROCESS, - p=1.0, - task_idx=task_idx, - task_num=task_num)) + percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) if __name__ == '__main__': diff --git a/det-yolov5-tmi/train.py b/det-yolov5-tmi/train.py index bc7a182..0d208bf 100644 --- a/det-yolov5-tmi/train.py +++ b/det-yolov5-tmi/train.py @@ -39,6 +39,8 @@ sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative +from ymir_exc.util import YmirStage, get_merged_config, get_ymir_process, write_ymir_training_result + import val # for end-of-epoch mAP from models.experimental import attempt_load from models.yolo import Model @@ -47,22 +49,16 @@ from utils.callbacks import Callbacks from utils.datasets import create_dataloader from utils.downloads import attempt_download -from utils.general import (LOGGER, check_dataset, check_file, check_git_status, - check_img_size, check_requirements, check_suffix, - check_version, check_yaml, colorstr, get_latest_run, - increment_path, init_seeds, intersect_dicts, - labels_to_class_weights, labels_to_image_weights, - methods, one_cycle, print_args, print_mutation, - strip_optimizer) +from utils.general import (LOGGER, check_dataset, check_file, check_git_status, check_img_size, check_requirements, + check_suffix, check_version, check_yaml, colorstr, get_latest_run, increment_path, + init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, + one_cycle, print_args, print_mutation, strip_optimizer) from utils.loggers import Loggers from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.loss import ComputeLoss from utils.metrics import fitness from utils.plots import plot_evolve, plot_labels -from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, - select_device, torch_distributed_zero_first) -from utils.ymir_yolov5 import (YmirStage, get_merged_config, get_ymir_process, - write_ymir_training_result) +from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, select_device, torch_distributed_zero_first LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) @@ -423,7 +419,7 @@ def lf(x): return (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear if (epoch > 0) and (opt.save_period > 0) and (epoch % opt.save_period == 0): torch.save(ckpt, w / f'epoch{epoch}.pt') weight_file = str(w / f'epoch{epoch}.pt') - write_ymir_training_result(ymir_cfg, map50=results[2], epoch=epoch, weight_file=weight_file) + write_ymir_training_result(ymir_cfg, map50=results[2], id=str(epoch), files=[weight_file]) del ckpt callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) @@ -472,7 +468,7 @@ def lf(x): return (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear torch.cuda.empty_cache() # save the best and last weight file with other files in models_dir if RANK in [-1, 0]: - write_ymir_training_result(ymir_cfg, map50=best_fitness, epoch=epochs, weight_file='') + write_ymir_training_result(ymir_cfg, map50=best_fitness, id=str(epochs), files=[]) return results diff --git a/det-yolov5-tmi/training-template.yaml b/det-yolov5-tmi/training-template.yaml index 763f66a..ac9a91f 100644 --- a/det-yolov5-tmi/training-template.yaml +++ b/det-yolov5-tmi/training-template.yaml @@ -10,7 +10,7 @@ shm_size: '32G' export_format: 'ark:raw' model: 'yolov5s' -batch_size: 16 +batch_size_per_gpu: 16 epochs: 300 img_size: 640 opset: 11 diff --git a/det-yolov5-tmi/utils/ymir_yolov5.py b/det-yolov5-tmi/utils/ymir_yolov5.py index 7257ed1..0b43505 100644 --- a/det-yolov5-tmi/utils/ymir_yolov5.py +++ b/det-yolov5-tmi/utils/ymir_yolov5.py @@ -5,122 +5,73 @@ import os import os.path as osp import shutil -from enum import IntEnum from typing import Any, List import numpy as np import torch import yaml from easydict import EasyDict as edict -from models.common import DetectMultiBackend -from torch.nn.parallel import DistributedDataParallel as DDP from nptyping import NDArray, Shape, UInt8 from packaging.version import Version -from utils.augmentations import letterbox -from utils.general import check_img_size, non_max_suppression, scale_coords, check_version -from utils.torch_utils import select_device -from ymir_exc import env +from ymir_exc import monitor from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, get_weight_files, get_ymir_process - -class YmirStage(IntEnum): - PREPROCESS = 1 # convert dataset - TASK = 2 # training/mining/infer - POSTPROCESS = 3 # export model - +from models.common import DetectMultiBackend +from utils.augmentations import letterbox +from utils.general import check_img_size, non_max_suppression, scale_coords +from utils.torch_utils import select_device BBOX = NDArray[Shape['*,4'], Any] CV_IMAGE = NDArray[Shape['*,*,3'], UInt8] -def get_ymir_process(stage: YmirStage, p: float, task_idx: int = 0, task_num: int = 1) -> float: - """ - stage: pre-process/task/post-process - p: percent for stage - task_idx: index for multiple tasks like mining (task_idx=0) and infer (task_idx=1) - task_num: the total number of multiple tasks. - """ - # const value for ymir process - PREPROCESS_PERCENT = 0.1 - TASK_PERCENT = 0.8 - POSTPROCESS_PERCENT = 0.1 - - if p < 0 or p > 1.0: - raise Exception(f'p not in [0,1], p={p}') - - ratio = 1.0 / task_num - init = task_idx / task_num - - if stage == YmirStage.PREPROCESS: - return init + PREPROCESS_PERCENT * p * ratio - elif stage == YmirStage.TASK: - return init + (PREPROCESS_PERCENT + TASK_PERCENT * p) * ratio - elif stage == YmirStage.POSTPROCESS: - return init + (PREPROCESS_PERCENT + TASK_PERCENT + POSTPROCESS_PERCENT * p) * ratio - else: - raise NotImplementedError(f'unknown stage {stage}') - - -def get_merged_config() -> edict: - """ - merge ymir_config and executor_config - """ - merged_cfg = edict() - # the hyperparameter information - merged_cfg.param = env.get_executor_config() - - # the ymir path information - merged_cfg.ymir = env.get_current_env() - return merged_cfg - - def get_weight_file(cfg: edict) -> str: """ return the weight file path by priority find weight file in cfg.param.model_params_path or cfg.param.model_params_path """ - if cfg.ymir.run_training: - model_params_path = cfg.param.get('pretrained_model_params', []) - else: - model_params_path = cfg.param.model_params_path - - model_dir = cfg.ymir.input.models_dir - model_params_path = [ - osp.join(model_dir, p) for p in model_params_path if osp.exists(osp.join(model_dir, p)) and p.endswith('.pt') - ] - + weight_files = get_weight_files(cfg, suffix=('.pt')) # choose weight file by priority, best.pt > xxx.pt - for p in model_params_path: + for p in weight_files: if p.endswith('best.pt'): return p - if len(model_params_path) > 0: - return max(model_params_path, key=osp.getctime) + if len(weight_files) > 0: + return max(weight_files, key=osp.getctime) return "" -class YmirYolov5(object): +class YmirYolov5(torch.nn.Module): """ used for mining and inference to init detector and predict. """ - - def __init__(self, cfg: edict): + def __init__(self, cfg: edict, task='infer'): + super().__init__() self.cfg = cfg if cfg.ymir.run_mining and cfg.ymir.run_infer: # multiple task, run mining first, infer later - infer_task_idx = 1 - task_num = 2 + if task == 'infer': + self.task_idx = 1 + elif task == 'mining': + self.task_idx = 0 + else: + raise Exception(f'unknown task {task}') + + self.task_num = 2 else: - infer_task_idx = 0 - task_num = 1 - - self.task_idx = infer_task_idx - self.task_num = task_num - - device = select_device(cfg.param.get('gpu_id', 'cpu')) - + self.task_idx = 0 + self.task_num = 1 + + self.gpu_id: str = str(cfg.param.get('gpu_id', '0')) + device = select_device(self.gpu_id) + self.gpu_count: int = len(self.gpu_id.split(',')) if self.gpu_id else 0 + self.batch_size_per_gpu = int(cfg.param.get('batch_size_per_gpu', 4)) + self.num_workers_per_gpu = int(cfg.param.get('num_workers_per_gpu', 4)) + self.batch_size: int = self.batch_size_per_gpu * self.gpu_count self.model = self.init_detector(device) + self.model.eval() self.device = device self.class_names = cfg.param.class_names self.stride = self.model.stride @@ -128,36 +79,39 @@ def __init__(self, cfg: edict): self.iou_thres = float(cfg.param.iou_thres) img_size = int(cfg.param.img_size) - imgsz = (img_size, img_size) + imgsz = [img_size, img_size] imgsz = check_img_size(imgsz, s=self.stride) self.model.warmup(imgsz=(1, 3, *imgsz), half=False) # warmup self.img_size = imgsz + def forward(self, x, nms=False): + pred = self.model(x) + if not nms: + return pred + + # postprocess + conf_thres = self.conf_thres + iou_thres = self.iou_thres + classes = None # not filter class_idx in results + agnostic_nms = False + max_det = 100 + + pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) + return pred + def init_detector(self, device: torch.device) -> DetectMultiBackend: weights = get_weight_file(self.cfg) if not weights: raise Exception("no weights file specified!") - data_yaml = osp.join(self.cfg.ymir.output.root_dir, 'data.yaml') model = DetectMultiBackend( weights=weights, device=device, dnn=False, # not use opencv dnn for onnx inference - data=data_yaml) # dataset.yaml path - - if ddp: - LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html - RANK = int(os.getenv('RANK', -1)) - # WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) - cuda = device.type != 'cpu' - if cuda and RANK != -1: - if check_version(torch.__version__, '1.11.0'): - model.model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK, - static_graph=True) # type: ignore - else: - model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) # type: ignore + data=None) # dataset.yaml path + return model def predict(self, img: CV_IMAGE) -> NDArray: @@ -175,16 +129,7 @@ def predict(self, img: CV_IMAGE) -> NDArray: img1 = img1 / 255 # 0 - 255 to 0.0 - 1.0 img1.unsqueeze_(dim=0) # expand for batch dim - pred = self.model(img1) - - # postprocess - conf_thres = self.conf_thres - iou_thres = self.iou_thres - classes = None # not filter class_idx in results - agnostic_nms = False - max_det = 1000 - - pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) + pred = self.forward(img1, nms=True) result = [] for det in pred: @@ -216,6 +161,10 @@ def infer(self, img: CV_IMAGE) -> List[rw.Annotation]: return anns + def write_monitor_logger(self, stage: YmirStage, p: float): + monitor.write_monitor_logger( + percent=get_ymir_process(stage=stage, p=p, task_idx=self.task_idx, task_num=self.task_num)) + def convert_ymir_to_yolov5(cfg: edict): """ From 946a04fc79de59e65303d67a5456aa015ff4e77c Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 25 Aug 2022 19:08:16 +0800 Subject: [PATCH 101/204] change mining result save directory --- det-yolov5-tmi/mining/ymir_mining.py | 6 +++--- det-yolov5-tmi/start.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/det-yolov5-tmi/mining/ymir_mining.py b/det-yolov5-tmi/mining/ymir_mining.py index e58264b..677f2ee 100644 --- a/det-yolov5-tmi/mining/ymir_mining.py +++ b/det-yolov5-tmi/mining/ymir_mining.py @@ -146,7 +146,7 @@ def run(ymir_cfg, ymir_yolov5): image_file = batch['image_file'][inner_idx] mining_results[image_file] = batch_consistency[inner_idx] - torch.save(mining_results, f'mining_results_{RANK}.pt') + torch.save(mining_results, f'/out/mining_results_{RANK}.pt') def main(): @@ -167,9 +167,9 @@ def main(): if RANK in [0, -1]: results = [] for rank in range(WORLD_SIZE): - results.append(torch.load(f'mining_results_{rank}.pt')) + results.append(torch.load(f'/out/mining_results_{rank}.pt')) - torch.save(results, 'mining_results_all_rank.pt') + torch.save(results, '/out/mining_results_all_rank.pt') ymir_mining_result = [] for result in results: diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py index 6b75b55..858bf0c 100644 --- a/det-yolov5-tmi/start.py +++ b/det-yolov5-tmi/start.py @@ -154,7 +154,7 @@ def _run_infer(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: port = find_free_port() command = f'python3 -m torch.distributed.launch --nproc_per_node {gpu_count} --master_port {port} mining/ymir_infer.py' # noqa - logging.info(f'mining: {command}') + logging.info(f'infer: {command}') subprocess.run(command.split(), check=True) monitor.write_monitor_logger( From 3242af355f22444c377e5f5dfb1b6a7d9b047070 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 26 Aug 2022 15:01:10 +0800 Subject: [PATCH 102/204] use data.yaml for onnx model --- det-yolov5-tmi/infer-template.yaml | 2 +- det-yolov5-tmi/mining-template.yaml | 2 +- det-yolov5-tmi/mining/ymir_infer.py | 17 +++++++++------ det-yolov5-tmi/mining/ymir_mining.py | 24 +++++++++++---------- det-yolov5-tmi/start.py | 19 +++++++++++++++++ det-yolov5-tmi/utils/ymir_yolov5.py | 32 ++++++++++++++-------------- 6 files changed, 60 insertions(+), 36 deletions(-) diff --git a/det-yolov5-tmi/infer-template.yaml b/det-yolov5-tmi/infer-template.yaml index 008375c..329887a 100644 --- a/det-yolov5-tmi/infer-template.yaml +++ b/det-yolov5-tmi/infer-template.yaml @@ -12,4 +12,4 @@ conf_thres: 0.25 iou_thres: 0.45 batch_size_per_gpu: 16 num_workers_per_gpu: 4 -ddp: False +pin_memory: False diff --git a/det-yolov5-tmi/mining-template.yaml b/det-yolov5-tmi/mining-template.yaml index 78e13e7..1ae6d29 100644 --- a/det-yolov5-tmi/mining-template.yaml +++ b/det-yolov5-tmi/mining-template.yaml @@ -12,4 +12,4 @@ conf_thres: 0.25 iou_thres: 0.45 batch_size_per_gpu: 16 num_workers_per_gpu: 4 -ddp: False +pin_memory: False diff --git a/det-yolov5-tmi/mining/ymir_infer.py b/det-yolov5-tmi/mining/ymir_infer.py index 9f459f0..827dc8a 100644 --- a/det-yolov5-tmi/mining/ymir_infer.py +++ b/det-yolov5-tmi/mining/ymir_infer.py @@ -4,12 +4,14 @@ 3. merge result """ import os +import sys import warnings from functools import partial import torch import torch.distributed as dist import torch.utils.data as td +from easydict import EasyDict as edict from tqdm import tqdm from ymir_exc import result_writer as rw from ymir_exc.util import YmirStage, get_merged_config @@ -23,7 +25,7 @@ WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) -def run(ymir_cfg, ymir_yolov5): +def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): # eg: gpu_id = 1,3,5,7 for LOCAL_RANK = 2, will use gpu 5. gpu = int(ymir_yolov5.gpu_id.split(',')[LOCAL_RANK]) device = torch.device('cuda', gpu) @@ -32,8 +34,9 @@ def run(ymir_cfg, ymir_yolov5): load_fn = partial(load_image_file, img_size=ymir_yolov5.img_size, stride=ymir_yolov5.stride) batch_size_per_gpu = ymir_yolov5.batch_size_per_gpu gpu_count = ymir_yolov5.gpu_count + cpu_count: int = os.cpu_count() or 1 num_workers_per_gpu = min([ - os.cpu_count() // max(gpu_count, 1), batch_size_per_gpu if batch_size_per_gpu > 1 else 0, + cpu_count // max(gpu_count, 1), batch_size_per_gpu if batch_size_per_gpu > 1 else 0, ymir_yolov5.num_workers_per_gpu ]) @@ -48,7 +51,7 @@ def run(ymir_cfg, ymir_yolov5): shuffle=False, sampler=None, num_workers=num_workers_per_gpu, - pin_memory=False, + pin_memory=ymir_yolov5.pin_memory, drop_last=False) results = [] @@ -76,7 +79,7 @@ def run(ymir_cfg, ymir_yolov5): torch.save(results, f'/out/infer_results_{RANK}.pt') -def main(): +def main() -> int: ymir_cfg = get_merged_config() ymir_yolov5 = YmirYolov5(ymir_cfg, task='infer') @@ -89,6 +92,7 @@ def main(): run(ymir_cfg, ymir_yolov5) + # wait all process to save the infer result dist.barrier() if RANK in [0, -1]: @@ -96,8 +100,6 @@ def main(): for rank in range(WORLD_SIZE): results.append(torch.load(f'/out/infer_results_{rank}.pt')) - torch.save(results, '/out/infer_results_all_rank.pt') - ymir_infer_result = dict() for result in results: for img_data in result: @@ -122,7 +124,8 @@ def main(): print(f'rank: {RANK}, start destroy process group') dist.destroy_process_group() + return 0 if __name__ == '__main__': - main() + sys.exit(main()) diff --git a/det-yolov5-tmi/mining/ymir_mining.py b/det-yolov5-tmi/mining/ymir_mining.py index 677f2ee..14fc7aa 100644 --- a/det-yolov5-tmi/mining/ymir_mining.py +++ b/det-yolov5-tmi/mining/ymir_mining.py @@ -6,12 +6,14 @@ 5. merge mining result """ import os +import sys from functools import partial import numpy as np import torch import torch.distributed as dist import torch.utils.data as td +from easydict import EasyDict as edict from tqdm import tqdm from ymir_exc import result_writer as rw from ymir_exc.util import YmirStage, get_merged_config @@ -26,17 +28,18 @@ WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) -def run(ymir_cfg, ymir_yolov5): +def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): # eg: gpu_id = 1,3,5,7 for LOCAL_RANK = 2, will use gpu 5. gpu = int(ymir_yolov5.gpu_id.split(',')[LOCAL_RANK]) device = torch.device('cuda', gpu) ymir_yolov5.to(device) load_fn = partial(load_image_file, img_size=ymir_yolov5.img_size, stride=ymir_yolov5.stride) - batch_size_per_gpu = ymir_yolov5.batch_size_per_gpu - gpu_count = ymir_yolov5.gpu_count + batch_size_per_gpu: int = ymir_yolov5.batch_size_per_gpu + gpu_count: int = ymir_yolov5.gpu_count + cpu_count: int = os.cpu_count() or 1 num_workers_per_gpu = min([ - os.cpu_count() // max(gpu_count, 1), batch_size_per_gpu if batch_size_per_gpu > 1 else 0, + cpu_count // max(gpu_count, 1), batch_size_per_gpu if batch_size_per_gpu > 1 else 0, ymir_yolov5.num_workers_per_gpu ]) @@ -51,7 +54,7 @@ def run(ymir_cfg, ymir_yolov5): shuffle=False, sampler=None, num_workers=num_workers_per_gpu, - pin_memory=False, + pin_memory=ymir_yolov5.pin_memory, drop_last=False) results = [] @@ -63,7 +66,7 @@ def run(ymir_cfg, ymir_yolov5): with torch.no_grad(): pred = ymir_yolov5.forward(batch['image'].float().to(device), nms=True) - ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx / dataset_size) + ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) preprocess_image_shape = batch['image'].shape[2:] for inner_idx, det in enumerate(pred): # per image result_per_image = [] @@ -88,7 +91,7 @@ def run(ymir_cfg, ymir_yolov5): sampler=None, collate_fn=collate_fn_with_fake_ann, num_workers=num_workers_per_gpu, - pin_memory=False, + pin_memory=ymir_yolov5.pin_memory, drop_last=False) dataset_size = len(results) @@ -149,7 +152,7 @@ def run(ymir_cfg, ymir_yolov5): torch.save(mining_results, f'/out/mining_results_{RANK}.pt') -def main(): +def main() -> int: ymir_cfg = get_merged_config() ymir_yolov5 = YmirYolov5(ymir_cfg, task='mining') @@ -169,8 +172,6 @@ def main(): for rank in range(WORLD_SIZE): results.append(torch.load(f'/out/mining_results_{rank}.pt')) - torch.save(results, '/out/mining_results_all_rank.pt') - ymir_mining_result = [] for result in results: for img_file, score in result.items(): @@ -179,7 +180,8 @@ def main(): print(f'rank: {RANK}, start destroy process group') dist.destroy_process_group() + return 0 if __name__ == '__main__': - main() + sys.exit(main()) diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py index 858bf0c..9e2dfa1 100644 --- a/det-yolov5-tmi/start.py +++ b/det-yolov5-tmi/start.py @@ -114,6 +114,15 @@ def _run_training(cfg: edict) -> None: def _run_mining(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: + # generate data.yaml for mining + out_dir = cfg.ymir.output.root_dir + convert_ymir_to_yolov5(cfg) + logging.info(f'generate {out_dir}/data.yaml') + monitor.write_monitor_logger( + percent=get_ymir_process(stage=YmirStage.PREPROCESS, + p=1.0, + task_idx=task_idx, + task_num=task_num)) gpu_id: str = str(cfg.param.get('gpu_id', '0')) gpu_count: int = len(gpu_id.split(',')) if gpu_id else 0 @@ -129,6 +138,16 @@ def _run_mining(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: def _run_infer(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: + # generate data.yaml for infer + out_dir = cfg.ymir.output.root_dir + convert_ymir_to_yolov5(cfg) + logging.info(f'generate {out_dir}/data.yaml') + monitor.write_monitor_logger( + percent=get_ymir_process(stage=YmirStage.PREPROCESS, + p=1.0, + task_idx=task_idx, + task_num=task_num)) + gpu_id: str = str(cfg.param.get('gpu_id', '0')) gpu_count: int = len(gpu_id.split(',')) if gpu_id else 0 diff --git a/det-yolov5-tmi/utils/ymir_yolov5.py b/det-yolov5-tmi/utils/ymir_yolov5.py index 0b43505..4093100 100644 --- a/det-yolov5-tmi/utils/ymir_yolov5.py +++ b/det-yolov5-tmi/utils/ymir_yolov5.py @@ -15,7 +15,7 @@ from packaging.version import Version from ymir_exc import monitor from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_weight_files, get_ymir_process +from ymir_exc.util import YmirStage, get_bool, get_weight_files, get_ymir_process from models.common import DetectMultiBackend from utils.augmentations import letterbox @@ -67,37 +67,36 @@ def __init__(self, cfg: edict, task='infer'): self.gpu_id: str = str(cfg.param.get('gpu_id', '0')) device = select_device(self.gpu_id) self.gpu_count: int = len(self.gpu_id.split(',')) if self.gpu_id else 0 - self.batch_size_per_gpu = int(cfg.param.get('batch_size_per_gpu', 4)) - self.num_workers_per_gpu = int(cfg.param.get('num_workers_per_gpu', 4)) + self.batch_size_per_gpu: int = int(cfg.param.get('batch_size_per_gpu', 4)) + self.num_workers_per_gpu: int = int(cfg.param.get('num_workers_per_gpu', 4)) + self.pin_memory: bool = get_bool(cfg, 'pin_memory', False) self.batch_size: int = self.batch_size_per_gpu * self.gpu_count self.model = self.init_detector(device) self.model.eval() self.device = device - self.class_names = cfg.param.class_names + self.class_names: List[str] = cfg.param.class_names self.stride = self.model.stride - self.conf_thres = float(cfg.param.conf_thres) - self.iou_thres = float(cfg.param.iou_thres) + self.conf_thres: float = float(cfg.param.conf_thres) + self.iou_thres: float = float(cfg.param.iou_thres) img_size = int(cfg.param.img_size) imgsz = [img_size, img_size] imgsz = check_img_size(imgsz, s=self.stride) self.model.warmup(imgsz=(1, 3, *imgsz), half=False) # warmup - self.img_size = imgsz + self.img_size: List[int] = imgsz def forward(self, x, nms=False): pred = self.model(x) if not nms: return pred - # postprocess - conf_thres = self.conf_thres - iou_thres = self.iou_thres - classes = None # not filter class_idx in results - agnostic_nms = False - max_det = 100 - - pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) + pred = non_max_suppression(pred, + conf_thres=self.conf_thres, + iou_thres=self.iou_thres, + classes=None, # not filter class_idx + agnostic=False, + max_det=100) return pred def init_detector(self, device: torch.device) -> DetectMultiBackend: @@ -106,11 +105,12 @@ def init_detector(self, device: torch.device) -> DetectMultiBackend: if not weights: raise Exception("no weights file specified!") + data_yaml = osp.join(self.cfg.ymir.output.root_dir, 'data.yaml') model = DetectMultiBackend( weights=weights, device=device, dnn=False, # not use opencv dnn for onnx inference - data=None) # dataset.yaml path + data=data_yaml) # dataset.yaml path return model From 8573696b5eefb781222d124a6e3033712d1cc951 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 26 Aug 2022 17:51:15 +0800 Subject: [PATCH 103/204] fix mining bug for multi-gpu --- det-yolov5-tmi/mining/ymir_mining.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/det-yolov5-tmi/mining/ymir_mining.py b/det-yolov5-tmi/mining/ymir_mining.py index 14fc7aa..917ea44 100644 --- a/det-yolov5-tmi/mining/ymir_mining.py +++ b/det-yolov5-tmi/mining/ymir_mining.py @@ -70,9 +70,9 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): preprocess_image_shape = batch['image'].shape[2:] for inner_idx, det in enumerate(pred): # per image result_per_image = [] + image_file = batch['image_file'][inner_idx] if len(det): origin_image_shape = (batch['origin_shape'][0][inner_idx], batch['origin_shape'][1][inner_idx]) - image_file = batch['image_file'][inner_idx] # Rescale boxes from img_size to img size det[:, :4] = scale_coords(preprocess_image_shape, det[:, :4], origin_image_shape).round() result_per_image.append(det) From 01989aeec64b8bc2ac4cd54f8a2991bda21a4554 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 30 Aug 2022 17:07:39 +0800 Subject: [PATCH 104/204] support mutli-gpu mining --- det-mmdetection-tmi/mmdet/utils/util_ymir.py | 6 +- det-mmdetection-tmi/start.py | 17 +++- det-mmdetection-tmi/training-template.yaml | 4 +- det-mmdetection-tmi/ymir_infer.py | 71 ++++++-------- det-mmdetection-tmi/ymir_mining.py | 97 +++++++++++++------- det-yolov5-tmi/mining/ymir_mining.py | 5 +- 6 files changed, 115 insertions(+), 85 deletions(-) diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index 810914b..982a1d4 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -132,7 +132,7 @@ def _modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: mmdet_cfg.log_config['hooks'][1].update(tensorboard_logger) # modify evaluation and interval - interval = max(1, mmdet_cfg.runner.max_epochs//30) + interval = max(1, mmdet_cfg.runner.max_epochs // 30) mmdet_cfg.evaluation.interval = interval mmdet_cfg.evaluation.metric = ymir_cfg.param.get('metric', 'bbox') # TODO Whether to evaluating the AP for each class @@ -146,9 +146,9 @@ def get_weight_file(cfg: edict) -> str: find weight file in cfg.param.pretrained_model_params or cfg.param.model_params_path """ if cfg.ymir.run_training: - model_params_path: List = cfg.param.get('pretrained_model_params', []) + model_params_path: List[str] = cfg.param.get('pretrained_model_params', []) else: - model_params_path: List = cfg.param.get('model_params_path', []) + model_params_path = cfg.param.get('model_params_path', []) model_dir = cfg.ymir.input.models_dir model_params_path = [ diff --git a/det-mmdetection-tmi/start.py b/det-mmdetection-tmi/start.py index 12a6f9c..b570b2d 100644 --- a/det-mmdetection-tmi/start.py +++ b/det-mmdetection-tmi/start.py @@ -4,9 +4,8 @@ import sys from easydict import EasyDict as edict - -from mmdet.utils.util_ymir import get_merged_config from ymir_exc import monitor +from ymir_exc.util import find_free_port, get_merged_config def start(cfg: edict) -> int: @@ -16,7 +15,7 @@ def start(cfg: edict) -> int: _run_training() elif cfg.ymir.run_mining or cfg.ymir.run_infer: if cfg.ymir.run_mining: - _run_mining() + _run_mining(cfg) if cfg.ymir.run_infer: _run_infer() else: @@ -35,12 +34,20 @@ def _run_training() -> None: logging.info("training finished") -def _run_mining() -> None: - command = 'python3 ymir_mining.py' +def _run_mining(cfg: edict) -> None: + gpu_id: str = str(cfg.param.get('gpu_id', '0')) + gpu_count = len(gpu_id.split(',')) + if gpu_count <= 1: + command = 'python3 ymir_mining.py' + else: + port = find_free_port() + command = f'python3 -m torch.distributed.launch --nproc_per_node {gpu_count} --master_port {port} ymir_mining.py' # noqa + logging.info(f'start mining: {command}') subprocess.run(command.split(), check=True) logging.info("mining finished") + def _run_infer() -> None: command = 'python3 ymir_infer.py' logging.info(f'start infer: {command}') diff --git a/det-mmdetection-tmi/training-template.yaml b/det-mmdetection-tmi/training-template.yaml index 37b2da9..d4c191f 100644 --- a/det-mmdetection-tmi/training-template.yaml +++ b/det-mmdetection-tmi/training-template.yaml @@ -1,7 +1,7 @@ shm_size: '32G' export_format: 'ark:raw' -samples_per_gpu: 2 -workers_per_gpu: 2 +samples_per_gpu: 16 +workers_per_gpu: 16 max_epochs: 300 config_file: 'configs/yolox/yolox_nano_8x8_300e_coco.py' args_options: '' diff --git a/det-mmdetection-tmi/ymir_infer.py b/det-mmdetection-tmi/ymir_infer.py index 661b2ea..9920ca2 100644 --- a/det-mmdetection-tmi/ymir_infer.py +++ b/det-mmdetection-tmi/ymir_infer.py @@ -8,12 +8,10 @@ import numpy as np from easydict import EasyDict as edict from mmcv import DictAction +from mmdet.apis import inference_detector, init_detector +from mmdet.utils.util_ymir import YmirStage, get_merged_config, get_weight_file, get_ymir_process from nptyping import NDArray, Shape from tqdm import tqdm - -from mmdet.apis import inference_detector, init_detector -from mmdet.utils.util_ymir import (YmirStage, get_merged_config, - get_weight_file, get_ymir_process) from ymir_exc import dataset_reader as dr from ymir_exc import env, monitor from ymir_exc import result_writer as rw @@ -23,58 +21,51 @@ def parse_option(cfg_options: str) -> dict: parser = argparse.ArgumentParser(description='parse cfg options') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' - 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' - 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') + parser.add_argument('--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') args = parser.parse_args(f'--cfg-options {cfg_options}'.split()) return args.cfg_options -def mmdet_result_to_ymir(results: List[DETECTION_RESULT], - class_names: List[str]) -> List[rw.Annotation]: +def mmdet_result_to_ymir(results: List[DETECTION_RESULT], class_names: List[str]) -> List[rw.Annotation]: ann_list = [] for idx, result in enumerate(results): for line in result: if any(np.isinf(line)): continue x1, y1, x2, y2, score = line - ann = rw.Annotation( - class_name=class_names[idx], - score=score, - box=rw.Box(x=round(x1), - y=round(y1), - w=round(x2-x1), - h=round(y2-y1)) - ) + ann = rw.Annotation(class_name=class_names[idx], + score=score, + box=rw.Box(x=round(x1), y=round(y1), w=round(x2 - x1), h=round(y2 - y1))) ann_list.append(ann) return ann_list def get_config_file(cfg): if cfg.ymir.run_training: - model_params_path: List = cfg.param.get('pretrained_model_params',[]) + model_params_path: List = cfg.param.get('pretrained_model_params', []) else: - model_params_path: List = cfg.param.get('model_params_path',[]) + model_params_path: List = cfg.param.get('model_params_path', []) model_dir = cfg.ymir.input.models_dir config_files = [ - osp.join(model_dir, p) for p in model_params_path if osp.exists(osp.join(model_dir, p)) and p.endswith(('.py'))] + osp.join(model_dir, p) for p in model_params_path if osp.exists(osp.join(model_dir, p)) and p.endswith(('.py')) + ] if len(config_files) > 0: if len(config_files) > 1: warnings.warn(f'multiple config file found! use {config_files[0]}') return config_files[0] else: - raise Exception( - f'no config_file found in {model_dir} and {model_params_path}') + raise Exception(f'no config_file found in {model_dir} and {model_params_path}') class YmirModel: @@ -90,8 +81,8 @@ def __init__(self, cfg: edict): infer_task_idx = 0 task_num = 1 - self.task_idx=infer_task_idx - self.task_num=task_num + self.task_idx = infer_task_idx + self.task_num = task_num # Specify the path to model config and checkpoint file config_file = get_config_file(cfg) @@ -103,8 +94,7 @@ def __init__(self, cfg: edict): gpu_ids = cfg.param.get('gpu_id', '0') gpu_id = gpu_ids.split(',')[0] # build the model from a config file and a checkpoint file - self.model = init_detector( - config_file, checkpoint_file, device=f'cuda:{gpu_id}', cfg_options=cfg_options) + self.model = init_detector(config_file, checkpoint_file, device=f'cuda:{gpu_id}', cfg_options=cfg_options) def infer(self, img): return inference_detector(self.model, img) @@ -124,21 +114,20 @@ def main(): for asset_path, _ in tqdm(dr.item_paths(dataset_type=env.DatasetType.CANDIDATE)): img = cv2.imread(asset_path) result = model.infer(img) - raw_anns = mmdet_result_to_ymir( - result, cfg.param.class_names) + raw_anns = mmdet_result_to_ymir(result, cfg.param.class_names) - infer_result[asset_path] = [ - ann for ann in raw_anns if ann.score >= conf_threshold] + infer_result[asset_path] = [ann for ann in raw_anns if ann.score >= conf_threshold] idx += 1 if idx % monitor_gap == 0: - percent = get_ymir_process( - stage=YmirStage.TASK, p=idx / N, task_idx=model.task_idx, task_num=model.task_num) + percent = get_ymir_process(stage=YmirStage.TASK, + p=idx / N, + task_idx=model.task_idx, + task_num=model.task_num) monitor.write_monitor_logger(percent=percent) rw.write_infer_result(infer_result=infer_result) - percent = get_ymir_process(stage=YmirStage.POSTPROCESS, - p=1, task_idx=model.task_idx, task_num=model.task_num) + percent = get_ymir_process(stage=YmirStage.POSTPROCESS, p=1, task_idx=model.task_idx, task_num=model.task_num) monitor.write_monitor_logger(percent=percent) return 0 diff --git a/det-mmdetection-tmi/ymir_mining.py b/det-mmdetection-tmi/ymir_mining.py index 0299edc..7eeaa1f 100644 --- a/det-mmdetection-tmi/ymir_mining.py +++ b/det-mmdetection-tmi/ymir_mining.py @@ -2,24 +2,31 @@ data augmentations for CALD method, including horizontal_flip, rotate(5'), cutout official code: https://github.com/we1pingyu/CALD/blob/master/cald/cald_helper.py """ +import os import random import sys -from typing import Any, Dict, List, Tuple +from typing import Any, Callable, Dict, List, Tuple import cv2 -from easydict import EasyDict as edict import numpy as np +import torch +import torch.distributed as dist +from easydict import EasyDict as edict +from mmcv.runner import init_dist +from mmdet.apis.test import collect_results_gpu +from mmdet.utils.util_ymir import BBOX, CV_IMAGE from nptyping import NDArray from scipy.stats import entropy from tqdm import tqdm - -from mmdet.utils.util_ymir import (BBOX, CV_IMAGE, YmirStage, - get_merged_config, get_ymir_process) -from ymir_exc import dataset_reader as dr -from ymir_exc import env, monitor +from ymir_exc import monitor from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, get_merged_config, get_ymir_process from ymir_infer import YmirModel +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + def intersect(boxes1: BBOX, boxes2: BBOX) -> NDArray: ''' @@ -32,11 +39,13 @@ def intersect(boxes1: BBOX, boxes2: BBOX) -> NDArray: ''' n1 = boxes1.shape[0] n2 = boxes2.shape[0] - max_xy = np.minimum(np.expand_dims(boxes1[:, 2:], axis=1).repeat(n2, axis=1), - np.expand_dims(boxes2[:, 2:], axis=0).repeat(n1, axis=0)) + max_xy = np.minimum( + np.expand_dims(boxes1[:, 2:], axis=1).repeat(n2, axis=1), + np.expand_dims(boxes2[:, 2:], axis=0).repeat(n1, axis=0)) - min_xy = np.maximum(np.expand_dims(boxes1[:, :2], axis=1).repeat(n2, axis=1), - np.expand_dims(boxes2[:, :2], axis=0).repeat(n1, axis=0)) + min_xy = np.maximum( + np.expand_dims(boxes1[:, :2], axis=1).repeat(n2, axis=1), + np.expand_dims(boxes2[:, :2], axis=0).repeat(n1, axis=0)) inter = np.clip(max_xy - min_xy, a_min=0, a_max=None) # (n1, n2, 2) return inter[:, :, 0] * inter[:, :, 1] # (n1, n2) @@ -59,8 +68,12 @@ def horizontal_flip(image: CV_IMAGE, bbox: BBOX) \ return image, bbox -def cutout(image: CV_IMAGE, bbox: BBOX, cut_num: int = 2, fill_val: int = 0, - bbox_remove_thres: float = 0.4, bbox_min_thres: float = 0.1) -> Tuple[CV_IMAGE, BBOX]: +def cutout(image: CV_IMAGE, + bbox: BBOX, + cut_num: int = 2, + fill_val: int = 0, + bbox_remove_thres: float = 0.4, + bbox_min_thres: float = 0.1) -> Tuple[CV_IMAGE, BBOX]: ''' Cutout augmentation image: A PIL image @@ -89,8 +102,7 @@ def cutout(image: CV_IMAGE, bbox: BBOX, cut_num: int = 2, fill_val: int = 0, right = left + cutout_size_w top = random.uniform(0, original_h - cutout_size_h) bottom = top + cutout_size_h - cutout = np.array( - [[float(left), float(top), float(right), float(bottom)]]) + cutout = np.array([[float(left), float(top), float(right), float(bottom)]]) # Calculate intersect between cutout and bounding boxes overlap_size = intersect(cutout, bbox) @@ -162,7 +174,7 @@ def get_affine_transform(center: NDArray, dst_h = output_size[1] rot_rad = np.pi * rot / 180 - src_dir = get_dir([0, src_w * -0.5], rot_rad) + src_dir = get_dir(np.array([0, src_w * -0.5], np.float32), rot_rad) dst_dir = np.array([0, dst_w * -0.5], np.float32) src = np.zeros((3, 2), dtype=np.float32) @@ -253,12 +265,24 @@ def __init__(self, cfg: edict): self.task_num = task_num def mining(self): - N = dr.items_count(env.DatasetType.CANDIDATE) + with open(self.cfg.ymir.input.candidate_index_file, 'r') as f: + images = [line.strip() for line in f.readlines()] + if RANK == -1: + N = len(images) + tbar = tqdm(images) + else: + images_rank = images[RANK::WORLD_SIZE] + N = len(images_rank) + if RANK == 0: + tbar = tqdm(images_rank) + else: + tbar = images_rank + monitor_gap = max(1, N // 100) idx = -1 beta = 1.3 mining_result = [] - for asset_path, _ in tqdm(dr.item_paths(dataset_type=env.DatasetType.CANDIDATE)): + for asset_path in tbar: img = cv2.imread(asset_path) # xyxy,conf,cls result = self.predict(img) @@ -296,10 +320,8 @@ def mining(self): consistency_box = max_iou consistency_cls = 0.5 * \ (conf[origin_idx] + conf_key[aug_idx]) * (1 - js) - consistency_per_inst = abs( - consistency_box + consistency_cls - beta) - consistency_per_aug = min( - consistency_per_aug, consistency_per_inst.item()) + consistency_per_inst = abs(consistency_box + consistency_cls - beta) + consistency_per_aug = min(consistency_per_aug, consistency_per_inst.item()) consistency += consistency_per_aug @@ -309,10 +331,15 @@ def mining(self): idx += 1 if idx % monitor_gap == 0: - percent = get_ymir_process( - stage=YmirStage.TASK, p=idx / N, task_idx=self.task_idx, task_num=self.task_num) + percent = get_ymir_process(stage=YmirStage.TASK, + p=idx / N, + task_idx=self.task_idx, + task_num=self.task_num) monitor.write_monitor_logger(percent=percent) + if RANK != -1: + mining_result = collect_results_gpu(mining_result, len(images)) + return mining_result def predict(self, img: CV_IMAGE) -> NDArray: @@ -342,10 +369,7 @@ def aug_predict(self, image: CV_IMAGE, bboxes: BBOX) -> Tuple[Dict[str, BBOX], D return the predict result and augment bbox. """ - aug_dict = dict(flip=horizontal_flip, - cutout=cutout, - rotate=rotate, - resize=resize) + aug_dict: Dict[str, Callable] = dict(flip=horizontal_flip, cutout=cutout, rotate=rotate, resize=resize) aug_bboxes = dict() aug_results = dict() @@ -360,14 +384,23 @@ def aug_predict(self, image: CV_IMAGE, bboxes: BBOX) -> Tuple[Dict[str, BBOX], D def main(): + if LOCAL_RANK != -1: + init_dist(launcher='pytorch', backend="nccl" if dist.is_nccl_available() else "gloo") + cfg = get_merged_config() miner = YmirMining(cfg) + gpu_id: str = str(cfg.param.get('gpu_id', '0')) + gpu = int(gpu_id.split(',')[LOCAL_RANK]) + device = torch.device('cuda', gpu) + miner.model.to(device) mining_result = miner.mining() - rw.write_mining_result(mining_result=mining_result) - percent = get_ymir_process(stage=YmirStage.POSTPROCESS, - p=1, task_idx=miner.task_idx, task_num=miner.task_num) - monitor.write_monitor_logger(percent=percent) + if RANK in [0, -1]: + rw.write_mining_result(mining_result=mining_result) + + percent = get_ymir_process(stage=YmirStage.POSTPROCESS, p=1, task_idx=miner.task_idx, task_num=miner.task_num) + monitor.write_monitor_logger(percent=percent) + return 0 diff --git a/det-yolov5-tmi/mining/ymir_mining.py b/det-yolov5-tmi/mining/ymir_mining.py index 917ea44..e8a6c59 100644 --- a/det-yolov5-tmi/mining/ymir_mining.py +++ b/det-yolov5-tmi/mining/ymir_mining.py @@ -66,7 +66,8 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): with torch.no_grad(): pred = ymir_yolov5.forward(batch['image'].float().to(device), nms=True) - ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) + if RANK in [-1, 0]: + ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) preprocess_image_shape = batch['image'].shape[2:] for inner_idx, det in enumerate(pred): # per image result_per_image = [] @@ -98,7 +99,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): monitor_gap = max(1, dataset_size // 1000 // batch_size_per_gpu) pbar = tqdm(aug_dataset_loader) if RANK == 0 else aug_dataset_loader for idx, batch in enumerate(pbar): - if idx % monitor_gap == 0: + if idx % monitor_gap == 0 and RANK in [-1, 0]: ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) batch_consistency = [0.0 for _ in range(len(batch['image_file']))] From 45d57c792596ca68ee47b61b816d1896ddabba67 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 31 Aug 2022 09:32:02 +0800 Subject: [PATCH 105/204] update mmdetection to ymir-executor-sdk 1.1.0 --- det-mmdetection-tmi/mmdet/utils/util_ymir.py | 130 ++++++------------- det-mmdetection-tmi/ymir_train.py | 5 +- 2 files changed, 45 insertions(+), 90 deletions(-) diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index 982a1d4..3f28149 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -5,7 +5,6 @@ import logging import os import os.path as osp -from enum import IntEnum from typing import Any, List, Optional import mmcv @@ -14,60 +13,12 @@ from mmcv import Config from nptyping import NDArray, Shape, UInt8 from packaging.version import Version -from ymir_exc import env from ymir_exc import result_writer as rw - -class YmirStage(IntEnum): - PREPROCESS = 1 # convert dataset - TASK = 2 # training/mining/infer - POSTPROCESS = 3 # export model - - BBOX = NDArray[Shape['*,4'], Any] CV_IMAGE = NDArray[Shape['*,*,3'], UInt8] -def get_ymir_process(stage: YmirStage, p: float, task_idx: int = 0, task_num: int = 1) -> float: - """ - stage: pre-process/task/post-process - p: percent for stage - task_idx: index for multiple tasks like mining (task_idx=0) and infer (task_idx=1) - task_num: the total number of multiple tasks. - """ - # const value for ymir process - PREPROCESS_PERCENT = 0.1 - TASK_PERCENT = 0.8 - POSTPROCESS_PERCENT = 0.1 - - if p < 0 or p > 1.0: - raise Exception(f'p not in [0,1], p={p}') - - init = task_idx * 1.0 / task_num - ratio = 1.0 / task_num - if stage == YmirStage.PREPROCESS: - return init + PREPROCESS_PERCENT * p * ratio - elif stage == YmirStage.TASK: - return init + (PREPROCESS_PERCENT + TASK_PERCENT * p) * ratio - elif stage == YmirStage.POSTPROCESS: - return init + (PREPROCESS_PERCENT + TASK_PERCENT + POSTPROCESS_PERCENT * p) * ratio - else: - raise NotImplementedError(f'unknown stage {stage}') - - -def get_merged_config() -> edict: - """ - merge ymir_config and executor_config - """ - merged_cfg = edict() - # the hyperparameter information - merged_cfg.param = env.get_executor_config() - - # the ymir path information - merged_cfg.ymir = env.get_current_env() - return merged_cfg - - def _modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: """ useful for training process @@ -76,11 +27,9 @@ def _modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: - modify epochs, checkpoint, tensorboard config """ # modify dataset config - ymir_ann_files = dict( - train=ymir_cfg.ymir.input.training_index_file, - val=ymir_cfg.ymir.input.val_index_file, - test=ymir_cfg.ymir.input.candidate_index_file - ) + ymir_ann_files = dict(train=ymir_cfg.ymir.input.training_index_file, + val=ymir_cfg.ymir.input.val_index_file, + test=ymir_cfg.ymir.input.candidate_index_file) # validation may augment the image and use more gpu # so set smaller samples_per_gpu for validation @@ -96,8 +45,7 @@ def _modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: ann_prefix=ymir_cfg.ymir.input.annotations_dir, classes=ymir_cfg.param.class_names, data_root=ymir_cfg.ymir.input.root_dir, - filter_empty_gt=False - ) + filter_empty_gt=False) # modify dataset config for `split` mmdet_dataset_cfg = mmdet_cfg.data.get(split, None) if mmdet_dataset_cfg is None: @@ -113,8 +61,7 @@ def _modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: elif src_dataset_type in ['MultiImageMixDataset', 'RepeatDataset']: mmdet_dataset_cfg.dataset.update(ymir_dataset_cfg) else: - raise Exception( - f'unsupported source dataset type {src_dataset_type}') + raise Exception(f'unsupported source dataset type {src_dataset_type}') # modify model output channel mmdet_model_cfg = mmdet_cfg.model.bbox_head @@ -124,8 +71,7 @@ def _modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: if ymir_cfg.param.get('max_epochs', None): mmdet_cfg.runner.max_epochs = ymir_cfg.param.max_epochs mmdet_cfg.checkpoint_config['out_dir'] = ymir_cfg.ymir.output.models_dir - tensorboard_logger = dict(type='TensorboardLoggerHook', - log_dir=ymir_cfg.ymir.output.tensorboard_dir) + tensorboard_logger = dict(type='TensorboardLoggerHook', log_dir=ymir_cfg.ymir.output.tensorboard_dir) if len(mmdet_cfg.log_config['hooks']) <= 1: mmdet_cfg.log_config['hooks'].append(tensorboard_logger) else: @@ -144,6 +90,7 @@ def get_weight_file(cfg: edict) -> str: """ return the weight file path by priority find weight file in cfg.param.pretrained_model_params or cfg.param.model_params_path + load coco-pretrained weight for yolox """ if cfg.ymir.run_training: model_params_path: List[str] = cfg.param.get('pretrained_model_params', []) @@ -152,19 +99,38 @@ def get_weight_file(cfg: edict) -> str: model_dir = cfg.ymir.input.models_dir model_params_path = [ - osp.join(model_dir, p) for p in model_params_path if osp.exists(osp.join(model_dir, p)) and p.endswith(('.pth', '.pt'))] + osp.join(model_dir, p) for p in model_params_path + if osp.exists(osp.join(model_dir, p)) and p.endswith(('.pth', '.pt')) + ] # choose weight file by priority, best_xxx.pth > latest.pth > epoch_xxx.pth - best_pth_files = [ - f for f in model_params_path if osp.basename(f).startswith('best_')] + best_pth_files = [f for f in model_params_path if osp.basename(f).startswith('best_')] if len(best_pth_files) > 0: return max(best_pth_files, key=os.path.getctime) - epoch_pth_files = [ - f for f in model_params_path if osp.basename(f).startswith(('epoch_', 'iter_'))] + epoch_pth_files = [f for f in model_params_path if osp.basename(f).startswith(('epoch_', 'iter_'))] if len(epoch_pth_files) > 0: return max(epoch_pth_files, key=os.path.getctime) + if cfg.ymir.run_training: + weight_files = [f for f in glob.glob('/weights/**/*', recursive=True) if f.endswith(('.pth', '.pt'))] + + model_name_splits = osp.basename(cfg.param.config_file).split('_') + if len(weight_files) > 0 and model_name_splits[0] == 'yolox': + yolox_weight_files = [ + f for f in weight_files if osp.basename(f).startswith(f'yolox_{model_name_splits[1]}') + ] + + if len(yolox_weight_files) == 0: + if model_name_splits[1] == 'nano': + # yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth or yolox_tiny.py + yolox_weight_files = [f for f in weight_files if osp.basename(f).startswith('yolox_tiny')] + elif model_name_splits[1] == 'm': + yolox_weight_files = [f for f in weight_files if osp.basename(f).startswith('yolox_l')] + + if len(yolox_weight_files) > 0: + logging.info(f'load yolox pretrained weight {yolox_weight_files[0]}') + return yolox_weight_files[0] return "" @@ -181,8 +147,7 @@ def _write_latest_ymir_training_result(last: bool = False, key_score: Optional[f logging.info(f'key_score is {key_score}') COCO_EVAL_TMP_FILE = os.getenv('COCO_EVAL_TMP_FILE') if COCO_EVAL_TMP_FILE is None: - raise Exception( - 'please set valid environment variable COCO_EVAL_TMP_FILE to write result into json file') + raise Exception('please set valid environment variable COCO_EVAL_TMP_FILE to write result into json file') eval_result = mmcv.load(COCO_EVAL_TMP_FILE) # eval_result may be empty dict {}. @@ -190,26 +155,22 @@ def _write_latest_ymir_training_result(last: bool = False, key_score: Optional[f WORK_DIR = os.getenv('YMIR_MODELS_DIR') if WORK_DIR is None or not osp.isdir(WORK_DIR): - raise Exception( - f'please set valid environment variable YMIR_MODELS_DIR, invalid directory {WORK_DIR}') + raise Exception(f'please set valid environment variable YMIR_MODELS_DIR, invalid directory {WORK_DIR}') # assert only one model config file in work_dir - result_files = [osp.basename(f) for f in glob.glob( - osp.join(WORK_DIR, '*')) if osp.basename(f) != 'result.yaml'] + result_files = [osp.basename(f) for f in glob.glob(osp.join(WORK_DIR, '*')) if osp.basename(f) != 'result.yaml'] if last: # save all output file - rw.write_model_stage(files=result_files, - mAP=float(map), - stage_name='last') + rw.write_model_stage(files=result_files, mAP=float(map), stage_name='last') else: # save newest weight file in format epoch_xxx.pth or iter_xxx.pth - weight_files = [osp.join(WORK_DIR, f) for f in result_files if f.startswith( - ('iter_', 'epoch_')) and f.endswith('.pth')] + weight_files = [ + osp.join(WORK_DIR, f) for f in result_files if f.startswith(('iter_', 'epoch_')) and f.endswith('.pth') + ] if len(weight_files) > 0: - newest_weight_file = osp.basename( - max(weight_files, key=os.path.getctime)) + newest_weight_file = osp.basename(max(weight_files, key=os.path.getctime)) stage_name = osp.splitext(newest_weight_file)[0] training_result_file = osp.join(WORK_DIR, 'result.yaml') @@ -222,9 +183,7 @@ def _write_latest_ymir_training_result(last: bool = False, key_score: Optional[f if stage_name not in model_stages: config_files = [f for f in result_files if f.endswith('.py')] - rw.write_model_stage(files=[newest_weight_file] + config_files, - mAP=float(map), - stage_name=stage_name) + rw.write_model_stage(files=[newest_weight_file] + config_files, mAP=float(map), stage_name=stage_name) def _write_ancient_ymir_training_result(key_score: Optional[float] = None): @@ -233,8 +192,7 @@ def _write_ancient_ymir_training_result(key_score: Optional[float] = None): COCO_EVAL_TMP_FILE = os.getenv('COCO_EVAL_TMP_FILE') if COCO_EVAL_TMP_FILE is None: - raise Exception( - 'please set valid environment variable COCO_EVAL_TMP_FILE to write result into json file') + raise Exception('please set valid environment variable COCO_EVAL_TMP_FILE to write result into json file') eval_result = mmcv.load(COCO_EVAL_TMP_FILE) # eval_result may be empty dict {}. @@ -242,12 +200,10 @@ def _write_ancient_ymir_training_result(key_score: Optional[float] = None): WORK_DIR = os.getenv('YMIR_MODELS_DIR') if WORK_DIR is None or not osp.isdir(WORK_DIR): - raise Exception( - f'please set valid environment variable YMIR_MODELS_DIR, invalid directory {WORK_DIR}') + raise Exception(f'please set valid environment variable YMIR_MODELS_DIR, invalid directory {WORK_DIR}') # assert only one model config file in work_dir - result_files = [osp.basename(f) for f in glob.glob( - osp.join(WORK_DIR, '*')) if osp.basename(f) != 'result.yaml'] + result_files = [osp.basename(f) for f in glob.glob(osp.join(WORK_DIR, '*')) if osp.basename(f) != 'result.yaml'] training_result_file = osp.join(WORK_DIR, 'result.yaml') if osp.exists(training_result_file): diff --git a/det-mmdetection-tmi/ymir_train.py b/det-mmdetection-tmi/ymir_train.py index 31c2375..552654d 100644 --- a/det-mmdetection-tmi/ymir_train.py +++ b/det-mmdetection-tmi/ymir_train.py @@ -5,10 +5,9 @@ import sys from easydict import EasyDict as edict +from mmdet.utils.util_ymir import get_weight_file, write_ymir_training_result from ymir_exc import monitor - -from mmdet.utils.util_ymir import (YmirStage, get_merged_config, get_weight_file, - get_ymir_process, write_ymir_training_result) +from ymir_exc.util import YmirStage, get_merged_config, get_ymir_process def main(cfg: edict) -> int: From 5cc425d4854529ceaa531b68f51b80d923239999 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 31 Aug 2022 18:14:37 +0800 Subject: [PATCH 106/204] yzbx to modelai, add vidt --- README.MD | 71 +++++++++++++----- README_zh-CN.MD | 72 ++++++++++++++----- det-mmdetection-tmi/docker/Dockerfile.cuda102 | 2 +- det-mmdetection-tmi/docker/Dockerfile.cuda111 | 11 ++- det-mmdetection-tmi/mmdet/utils/util_ymir.py | 4 +- det-yolov5-tmi/cuda102.dockerfile | 2 +- det-yolov5-tmi/cuda111.dockerfile | 2 +- det-yolov5-tmi/mining/data_augment.py | 1 - det-yolov5-tmi/mining/mining_cald.py | 5 +- 9 files changed, 122 insertions(+), 48 deletions(-) diff --git a/README.MD b/README.MD index 50ce730..e2073d1 100644 --- a/README.MD +++ b/README.MD @@ -2,11 +2,11 @@ - [ymir](https://github.com/IndustryEssentials/ymir) -- [wiki](https://github.com/yzbx/ymir-executor-fork/wiki) +- [wiki](https://github.com/modelai/ymir-executor-fork/wiki) ## ymir-1.1.0 official image -- [yolov4](https://github.com/yzbx/ymir-executor-fork#det-yolov4-training) +- [yolov4](https://github.com/modelai/ymir-executor-fork#det-yolov4-training) ``` docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov4-cu112-tmi @@ -14,7 +14,7 @@ docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov4-cu101-tmi ``` -- [yolov5](https://github.com/yzbx/ymir-executor-fork#det-yolov5-tmi) +- [yolov5](https://github.com/modelai/ymir-executor-fork#det-yolov5-tmi) - [change log](./det-yolov5-tmi/README.md) @@ -24,7 +24,7 @@ docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov5-cu102-tmi ``` -- [mmdetection](https://github.com/yzbx/ymir-executor-fork#det-mmdetection-tmi) +- [mmdetection](https://github.com/modelai/ymir-executor-fork#det-mmdetection-tmi) - [change log](./det-mmdetection-tmi/README.md) @@ -34,35 +34,68 @@ docker pull youdaoyzbx/ymir-executor:ymir1.1.0-mmdet-cu102-tmi ``` -- [detectron2](https://github.com/yzbx/ymir-detectron2) +- [detectron2](https://github.com/modelai/ymir-detectron2) - - [change log](https://github.com/yzbx/ymir-detectron2/blob/master/README.md) + - [change log](https://github.com/modelai/ymir-detectron2/blob/master/README.md) ``` docker pull youdaoyzbx/ymir-executor:ymir1.1.0-detectron2-cu111-tmi ``` -- [yolov7](https://github.com/yzbx/ymir-yolov7) +- [yolov7](https://github.com/modelai/ymir-yolov7) - - [change log](https://github.com/yzbx/ymir-yolov7/blob/main/ymir/README.md) + - [change log](https://github.com/modelai/ymir-yolov7/blob/main/ymir/README.md) ``` docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov7-cu111-tmi ``` -- overview +- [vidt](https://github.com/modelai/ymir-vidt) - | docker image | [finetune](https://github.com/yzbx/ymir-executor-fork/wiki/use-yolov5-to-finetune-or-training-model) | tensorboard | args | framework | onnx | pretrained weights | - | - | - | - | - | - | - | - | - | yolov4 | ? | ✔️ | ❌ | darknet + mxnet | ❌ | local | - | yolov5 | ✔️ | ✔️ | ✔️ | pytorch | ✔️ | local+online | - | yolov7 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | local+online | - | mmdetection | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | - | detectron2 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | + - [change log](https://github.com/modelai/ymir-vidt/tree/main/ymir) - - online pretrained weights may download through network + ``` + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-vidt-cu111-tmi + ``` + +## overview + +| docker image | [finetune](https://github.com/modelai/ymir-executor-fork/wiki/use-yolov5-to-finetune-or-training-model) | tensorboard | args | framework | onnx | pretrained weights | +| - | - | - | - | - | - | - | +| yolov4 | ? | ✔️ | ❌ | darknet + mxnet | ❌ | local | +| yolov5 | ✔️ | ✔️ | ✔️ | pytorch | ✔️ | local+online | +| yolov7 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | local+online | +| mmdetection | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | +| detectron2 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | +| vidt | ? | ✔️ | ✔️ | ✔️ pytorch | ❌ | online | + +- online pretrained weights may download through network + +- local pretrained weights have copied to docker images when building image + +### benchmark + +- training dataset: voc2012-train 5717 images +- validation dataset: voc2012-val 5823 images +- image size: 640 + +gpu: single Tesla P4 + +| docker image | batch size | epoch number | model | voc2012 val map50 | training time | note | +| - | - | - | - | - | - | - | +| yolov5 | 16 | 100 | yolov5s | 70.05% | 9h | coco-pretrained | +| vidt | 2 | 100 | swin-nano | 54.13% | 2d | imagenet-pretrained | +| yolov4 | 4 | 20000 steps | yolov4 | 66.18% | 2d | imagenet-pretrained | +| yolov7 | 16 | 100 | yolov7-tiny | 70% | 8h | coco-pretrained | + +gpu: single GeForce GTX 1080 Ti - - local pretrained weights have copied to docker images when building image +| docker image | batch size | epoch number | model | voc2012 val map50 | training time | note | +| - | - | - | - | - | - | - | +| yolov5 | 16 | 100 | yolov5s | 70.35% | 2h | coco-pretrained | +| yolov7 | 16 | 100 | yolov7-tiny | 70.4% | 5h | coco-pretrained | +| mmdetection | 16 | 100 | yolox_tiny | 66.2% | 5h | coco-pretrained | +| detectron2 | 2 | 20000 | retinanet_R_50_FPN_1x | 53.54% | 2h | imagenet-pretrained | --- @@ -101,7 +134,7 @@ docker build -t ymir-executor/mmdet:cu111-tmi -f docker/Dockerfile.cuda111 . - [custom ymir-executor](https://github.com/IndustryEssentials/ymir/blob/dev/docs/ymir-dataset-zh-CN.md) -- [ymir-executor-sdk](https://github.com/yzbx/ymir-executor-sdk) +- [ymir-executor-sdk](https://github.com/modelai/ymir-executor-sdk) ## how to import pretrained model weights diff --git a/README_zh-CN.MD b/README_zh-CN.MD index 83e7f57..443e3f0 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -2,11 +2,11 @@ - [ymir](https://github.com/IndustryEssentials/ymir) -- [wiki](https://github.com/yzbx/ymir-executor-fork/wiki) +- [wiki](https://github.com/modelai/ymir-executor-fork/wiki) ## ymir-1.1.0 官方镜像 -- [yolov4](https://github.com/yzbx/ymir-executor-fork#det-yolov4-training) +- [yolov4](https://github.com/modelai/ymir-executor-fork#det-yolov4-training) ``` docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov4-cu112-tmi @@ -14,7 +14,7 @@ docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov4-cu101-tmi ``` -- [yolov5](https://github.com/yzbx/ymir-executor-fork#det-yolov5-tmi) +- [yolov5](https://github.com/modelai/ymir-executor-fork#det-yolov5-tmi) - [change log](./det-yolov5-tmi/README.md) @@ -24,7 +24,7 @@ docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov5-cu102-tmi ``` -- [mmdetection](https://github.com/yzbx/ymir-executor-fork#det-mmdetection-tmi) +- [mmdetection](https://github.com/modelai/ymir-executor-fork#det-mmdetection-tmi) - [change log](./det-mmdetection-tmi/README.md) @@ -34,35 +34,69 @@ docker pull youdaoyzbx/ymir-executor:ymir1.1.0-mmdet-cu102-tmi ``` -- [detectron2](https://github.com/yzbx/ymir-detectron2) +- [detectron2](https://github.com/modelai/ymir-detectron2) - - [change log](https://github.com/yzbx/ymir-detectron2/blob/master/README.md) + - [change log](https://github.com/modelai/ymir-detectron2/blob/master/README.md) ``` docker pull youdaoyzbx/ymir-executor:ymir1.1.0-detectron2-cu111-tmi ``` -- [yolov7](https://github.com/yzbx/ymir-yolov7) +- [yolov7](https://github.com/modelai/ymir-yolov7) - - [change log](https://github.com/yzbx/ymir-yolov7/blob/main/ymir/README.md) + - [change log](https://github.com/modelai/ymir-yolov7/blob/main/ymir/README.md) ``` docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov7-cu111-tmi ``` -- 比较 +- [vidt](https://github.com/modelai/ymir-vidt) - | docker image | [finetune](https://github.com/yzbx/ymir-executor-fork/wiki/use-yolov5-to-finetune-or-training-model) | tensorboard | args | framework | onnx | pretrained weight | - | - | - | - | - | - | - | - | - | yolov4 | ? | ✔️ | ❌ | darknet + mxnet | ❌ | local | - | yolov5 | ✔️ | ✔️ | ✔️ | pytorch | ✔️ | local+online | - | yolov7 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | local+online | - | mmdetection | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | - | detectron2 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | + - [change log](https://github.com/modelai/ymir-vidt/tree/main/ymir) - - online 预训练权重可能在训练时通过网络下载 + ``` + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-vidt-cu111-tmi + ``` + +## 比较 + +| docker image | [finetune](https://github.com/modelai/ymir-executor-fork/wiki/use-yolov5-to-finetune-or-training-model) | tensorboard | args | framework | onnx | pretrained weight | +| - | - | - | - | - | - | - | +| yolov4 | ? | ✔️ | ❌ | darknet + mxnet | ❌ | local | +| yolov5 | ✔️ | ✔️ | ✔️ | pytorch | ✔️ | local+online | +| yolov7 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | local+online | +| mmdetection | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | +| detectron2 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | +| vidt | ? | ✔️ | ✔️ | ✔️ pytorch | ❌ | online | + +- online 预训练权重可能在训练时通过网络下载 + +- local 预训练权重在构建镜像时复制到了镜像 + +### benchmark + +- training dataset: voc2012-train 5717 images +- validation dataset: voc2012-val 5823 images +- image size: 640 + +gpu: single Tesla P4 + +| docker image | batch size | epoch number | model | voc2012 val map50 | training time | note | +| - | - | - | - | - | - | - | +| yolov5 | 16 | 100 | yolov5s | 70.05% | 9h | coco-pretrained | +| vidt | 2 | 100 | swin-nano | 54.13% | 2d | imagenet-pretrained | +| yolov4 | 4 | 20000 steps | yolov4 | 66.18% | 2d | imagenet-pretrained | +| yolov7 | 16 | 100 | yolov7-tiny | 70% | 8h | coco-pretrained | + +gpu: single GeForce GTX 1080 Ti + +| docker image | batch size | epoch number | model | voc2012 val map50 | training time | note | +| - | - | - | - | - | - | - | +| yolov5 | 16 | 100 | yolov5s | 70.35% | 2h | coco-pretrained | +| yolov7 | 16 | 100 | yolov7-tiny | 70.4% | 5h | coco-pretrained | +| mmdetection | 16 | 100 | yolox_tiny | 66.2% | 5h | coco-pretrained | +| detectron2 | 2 | 20000 | retinanet_R_50_FPN_1x | 53.54% | 2h | imagenet-pretrained | - - local 预训练权重在构建镜像时复制到了镜像 --- ## det-yolov4-tmi @@ -114,7 +148,7 @@ docker build -t ymir-executor/live-code:mxnet-tmi -f mxnet.dockerfile - [ymir-executor 制作指南](https://github.com/IndustryEssentials/ymir/blob/dev/docs/ymir-dataset-zh-CN.md) -- [ymir-executor-sdk](https://github.com/yzbx/ymir-executor-sdk) ymir镜像开发辅助库 +- [ymir-executor-sdk](https://github.com/modelai/ymir-executor-sdk) ymir镜像开发辅助库 ## 如何导入预训练模型 diff --git a/det-mmdetection-tmi/docker/Dockerfile.cuda102 b/det-mmdetection-tmi/docker/Dockerfile.cuda102 index 517acd0..6d07aa6 100644 --- a/det-mmdetection-tmi/docker/Dockerfile.cuda102 +++ b/det-mmdetection-tmi/docker/Dockerfile.cuda102 @@ -28,7 +28,7 @@ RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC \ # Install ymir-exc sdk and MMCV (no cu102/torch1.8.1, use torch1.8.0 instead) RUN pip install --no-cache-dir --upgrade pip wheel setuptools \ && pip install --no-cache-dir mmcv-full==${MMCV} -f https://download.openmmlab.com/mmcv/dist/cu102/torch1.8.0/index.html \ - && pip install "git+https://github.com/yzbx/ymir-executor-sdk.git@ymir1.0.0" \ + && pip install "git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.0.0" \ && conda clean --all # Install det-mmdetection-tmi diff --git a/det-mmdetection-tmi/docker/Dockerfile.cuda111 b/det-mmdetection-tmi/docker/Dockerfile.cuda111 index fbf2508..c811c85 100644 --- a/det-mmdetection-tmi/docker/Dockerfile.cuda111 +++ b/det-mmdetection-tmi/docker/Dockerfile.cuda111 @@ -26,7 +26,7 @@ RUN apt-get update && apt-get install -y build-essential ffmpeg libsm6 libxext6 # Install ymir-exc sdk and MMCV RUN pip install --no-cache-dir --upgrade pip wheel setuptools \ && pip install --no-cache-dir mmcv-full==${MMCV} -f https://download.openmmlab.com/mmcv/dist/cu111/torch1.8.0/index.html \ - && pip install "git+https://github.com/yzbx/ymir-executor-sdk.git@ymir1.0.0" \ + && pip install "git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.0.0" \ && conda clean --all # Install det-mmdetection-tmi @@ -37,4 +37,13 @@ RUN pip install --no-cache-dir -r requirements/runtime.txt \ && mv *-template.yaml /img-man \ && echo "cd /app && python3 start.py" > /usr/bin/start.sh +# Download coco-pretrained yolox weight to /weights +# view https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox for detail +# RUN apt-get update && apt install -y wget && rm -rf /var/lib/apt/lists/* +# RUN mkdir -p /weights && cd /weights \ +# && wget https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth \ +# && wget https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_20211121_095711-4592a793.pth \ +# && wget https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth \ +# && wget https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254-1ef88d67.pth + CMD bash /usr/bin/start.sh diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index 3f28149..8498d9c 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -125,8 +125,8 @@ def get_weight_file(cfg: edict) -> str: if model_name_splits[1] == 'nano': # yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth or yolox_tiny.py yolox_weight_files = [f for f in weight_files if osp.basename(f).startswith('yolox_tiny')] - elif model_name_splits[1] == 'm': - yolox_weight_files = [f for f in weight_files if osp.basename(f).startswith('yolox_l')] + else: + yolox_weight_files = [f for f in weight_files if osp.basename(f).startswith('yolox_s')] if len(yolox_weight_files) > 0: logging.info(f'load yolox pretrained weight {yolox_weight_files[0]}') diff --git a/det-yolov5-tmi/cuda102.dockerfile b/det-yolov5-tmi/cuda102.dockerfile index e8ab497..0014b60 100644 --- a/det-yolov5-tmi/cuda102.dockerfile +++ b/det-yolov5-tmi/cuda102.dockerfile @@ -20,7 +20,7 @@ RUN apt-get update && apt-get install -y gnupg2 git libglib2.0-0 \ && rm -rf /var/lib/apt/lists/* # install ymir-exc sdk -RUN pip install "git+https://github.com/yzbx/ymir-executor-sdk.git@ymir1.0.0" +RUN pip install "git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.0.0" # Copy file from host to docker and install requirements COPY . /app diff --git a/det-yolov5-tmi/cuda111.dockerfile b/det-yolov5-tmi/cuda111.dockerfile index 5d1e165..84427a8 100644 --- a/det-yolov5-tmi/cuda111.dockerfile +++ b/det-yolov5-tmi/cuda111.dockerfile @@ -23,7 +23,7 @@ RUN apt-get update && apt-get install -y gnupg2 git libglib2.0-0 \ COPY ./requirements.txt /workspace/ # install ymir-exc sdk and requirements -RUN pip install "git+https://github.com/yzbx/ymir-executor-sdk.git@ymir1.0.0" \ +RUN pip install "git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.0.0" \ && pip install -r /workspace/requirements.txt # Copy file from host to docker and install requirements diff --git a/det-yolov5-tmi/mining/data_augment.py b/det-yolov5-tmi/mining/data_augment.py index 47b1d50..42af914 100644 --- a/det-yolov5-tmi/mining/data_augment.py +++ b/det-yolov5-tmi/mining/data_augment.py @@ -8,7 +8,6 @@ import cv2 import numpy as np from nptyping import NDArray - from utils.ymir_yolov5 import BBOX, CV_IMAGE diff --git a/det-yolov5-tmi/mining/mining_cald.py b/det-yolov5-tmi/mining/mining_cald.py index 0e08660..560326c 100644 --- a/det-yolov5-tmi/mining/mining_cald.py +++ b/det-yolov5-tmi/mining/mining_cald.py @@ -8,17 +8,16 @@ import cv2 import numpy as np from easydict import EasyDict as edict +from mining.data_augment import cutout, horizontal_flip, intersect, resize, rotate from nptyping import NDArray from scipy.stats import entropy from tqdm import tqdm +from utils.ymir_yolov5 import BBOX, CV_IMAGE, YmirYolov5 from ymir_exc import dataset_reader as dr from ymir_exc import env, monitor from ymir_exc import result_writer as rw from ymir_exc.util import YmirStage, get_merged_config, get_ymir_process -from mining.data_augment import cutout, horizontal_flip, intersect, resize, rotate -from utils.ymir_yolov5 import BBOX, CV_IMAGE, YmirYolov5 - def split_result(result: NDArray) -> Tuple[BBOX, NDArray, NDArray]: if len(result) > 0: From a7d65a6d59be59d7950776cdb3529fb147d25d14 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 31 Aug 2022 18:36:00 +0800 Subject: [PATCH 107/204] add minig algorithm reference --- README.MD | 4 ++++ README_zh-CN.MD | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/README.MD b/README.MD index e2073d1..d3cbe03 100644 --- a/README.MD +++ b/README.MD @@ -139,3 +139,7 @@ docker build -t ymir-executor/mmdet:cu111-tmi -f docker/Dockerfile.cuda111 . ## how to import pretrained model weights - [import pretainted model weights](https://github.com/IndustryEssentials/ymir/blob/dev/docs/import-extra-models.md) + +## reference + +- [mining algorithm: CALD](https://github.com/we1pingyu/CALD/) diff --git a/README_zh-CN.MD b/README_zh-CN.MD index 443e3f0..e0086ac 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -156,6 +156,10 @@ docker build -t ymir-executor/live-code:mxnet-tmi -f mxnet.dockerfile - 通过ymir网页端的 `模型管理/模型列表/导入模型` 同样可以导入模型 +## 参考 + +- [挖掘算法CALD](https://github.com/we1pingyu/CALD/) + --- # FAQ From 4d3c8696b1599b5fb1edf128c0664c73a0ac1701 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 31 Aug 2022 18:44:38 +0800 Subject: [PATCH 108/204] add other reference --- README.MD | 5 +++++ README_zh-CN.MD | 15 ++++++++++----- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/README.MD b/README.MD index d3cbe03..525edf3 100644 --- a/README.MD +++ b/README.MD @@ -143,3 +143,8 @@ docker build -t ymir-executor/mmdet:cu111-tmi -f docker/Dockerfile.cuda111 . ## reference - [mining algorithm: CALD](https://github.com/we1pingyu/CALD/) +- [yolov5](https://github.com/ultralytics/yolov5) +- [mmdetection](https://github.com/open-mmlab/mmdetection) +- [yolov7](https://github.com/wongkinyiu/yolov7) +- [detectron2](https://github.com/facebookresearch/detectron2) +- [vidt](https://github.com/naver-ai/vidt) diff --git a/README_zh-CN.MD b/README_zh-CN.MD index e0086ac..89d2283 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -159,6 +159,11 @@ docker build -t ymir-executor/live-code:mxnet-tmi -f mxnet.dockerfile ## 参考 - [挖掘算法CALD](https://github.com/we1pingyu/CALD/) +- [yolov5](https://github.com/ultralytics/yolov5) +- [mmdetection](https://github.com/open-mmlab/mmdetection) +- [yolov7](https://github.com/wongkinyiu/yolov7) +- [detectron2](https://github.com/facebookresearch/detectron2) +- [vidt](https://github.com/naver-ai/vidt) --- @@ -166,9 +171,9 @@ docker build -t ymir-executor/live-code:mxnet-tmi -f mxnet.dockerfile ## 关于cuda版本 -- 推荐安装11.2以上的cuda版本, 使用11.1及以上的镜像 +- 推荐主机安装11.2以上的cuda版本, 使用11.1及以上的镜像 -- GTX3080/GTX3090系统不支持11.1以下的cuda,只能使用cuda11.1及以上的镜像 +- GTX3080/GTX3090不支持11.1以下的cuda,只能使用cuda11.1及以上的镜像 ## apt 或 pip 安装慢或出错 @@ -247,10 +252,10 @@ tail -f -n 100 ymir_app.log - 挂载目录并运行镜像``,注意需要将ymir部署目录挂载到镜像中 ``` - docker run -it --gpus all --shm-size 12G -v $PWD/in:/in -v $PWD/out:/out -v : bash + docker run -it --gpus all --shm-size 12G -v $PWD/in:/in -v $PWD/out:/out -v : -v /sandbox//training_assset_cache:/in/assets bash - # 以/home/ymir/ymir-workplace作为ymir部署目录为例 - docker run -it --gpus all --shm-size 12G -v $PWD/in:/in -v $PWD/out:/out -v /home/ymir/ymir-workplace:/home/ymir/ymir-workplace bash + # 以/home/ymir/ymir-workplace作为ymir部署目录为例, 以实际情况为准 + docker run -it --gpus all --shm-size 12G -v $PWD/in:/in -v /home/ymir/ymir-workplace/sandbox/0001/training_assset_cache:/in/assets -v $PWD/out:/out -v /home/ymir/ymir-workplace:/home/ymir/ymir-workplace bash ``` - 进入到docker 容器中后, 执行镜像默认的命令, 如dockerfile中写的 `CMD bash /usr/bin/start.sh` From 8a30f5a83fb48cf9862b5cfdee8a5c4eec289ce5 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 31 Aug 2022 18:49:27 +0800 Subject: [PATCH 109/204] update doc --- README.MD | 2 +- README_zh-CN.MD | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.MD b/README.MD index 525edf3..cde68fb 100644 --- a/README.MD +++ b/README.MD @@ -95,7 +95,7 @@ gpu: single GeForce GTX 1080 Ti | yolov5 | 16 | 100 | yolov5s | 70.35% | 2h | coco-pretrained | | yolov7 | 16 | 100 | yolov7-tiny | 70.4% | 5h | coco-pretrained | | mmdetection | 16 | 100 | yolox_tiny | 66.2% | 5h | coco-pretrained | -| detectron2 | 2 | 20000 | retinanet_R_50_FPN_1x | 53.54% | 2h | imagenet-pretrained | +| detectron2 | 2 | 20000 steps | retinanet_R_50_FPN_1x | 53.54% | 2h | imagenet-pretrained | --- diff --git a/README_zh-CN.MD b/README_zh-CN.MD index 89d2283..fb369f1 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -95,7 +95,7 @@ gpu: single GeForce GTX 1080 Ti | yolov5 | 16 | 100 | yolov5s | 70.35% | 2h | coco-pretrained | | yolov7 | 16 | 100 | yolov7-tiny | 70.4% | 5h | coco-pretrained | | mmdetection | 16 | 100 | yolox_tiny | 66.2% | 5h | coco-pretrained | -| detectron2 | 2 | 20000 | retinanet_R_50_FPN_1x | 53.54% | 2h | imagenet-pretrained | +| detectron2 | 2 | 20000 steps | retinanet_R_50_FPN_1x | 53.54% | 2h | imagenet-pretrained | --- From 09522d40eda8a927065c1f9a36a845cd3f19dd61 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 31 Aug 2022 18:51:09 +0800 Subject: [PATCH 110/204] zzz --- README.MD | 2 +- README_zh-CN.MD | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.MD b/README.MD index cde68fb..4b52b4b 100644 --- a/README.MD +++ b/README.MD @@ -67,7 +67,7 @@ | yolov7 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | local+online | | mmdetection | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | | detectron2 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | -| vidt | ? | ✔️ | ✔️ | ✔️ pytorch | ❌ | online | +| vidt | ? | ✔️ | ✔️ | pytorch | ❌ | online | - online pretrained weights may download through network diff --git a/README_zh-CN.MD b/README_zh-CN.MD index fb369f1..765b6d6 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -67,7 +67,7 @@ | yolov7 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | local+online | | mmdetection | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | | detectron2 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | -| vidt | ? | ✔️ | ✔️ | ✔️ pytorch | ❌ | online | +| vidt | ? | ✔️ | ✔️ | pytorch | ❌ | online | - online 预训练权重可能在训练时通过网络下载 From 7544900cb2765fe7dc9aec5ce0ed871a16f31f36 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 2 Sep 2022 11:08:42 +0800 Subject: [PATCH 111/204] update readme --- README.MD | 11 +++++++++++ README_zh-CN.MD | 11 +++++++++++ .../mmdet/core/evaluation/eval_hooks.py | 5 ++--- det-mmdetection-tmi/tools/train.py | 4 ++-- det-mmdetection-tmi/ymir_infer.py | 11 ++++++----- det-mmdetection-tmi/ymir_mining.py | 2 +- det-yolov5-tmi/mining/ymir_infer.py | 7 +++---- det-yolov5-tmi/mining/ymir_mining.py | 7 +++---- 8 files changed, 39 insertions(+), 19 deletions(-) diff --git a/README.MD b/README.MD index 4b52b4b..890bba7 100644 --- a/README.MD +++ b/README.MD @@ -58,6 +58,14 @@ docker pull youdaoyzbx/ymir-executor:ymir1.1.0-vidt-cu111-tmi ``` +- [nanodet](https://github.com/modelai/ymir-nanodet/tree/ymir-dev) + + - [change log](https://github.com/modelai/ymir-nanodet/tree/ymir-dev/ymir) + + ``` + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-nanodet-cu111-tmi + ``` + ## overview | docker image | [finetune](https://github.com/modelai/ymir-executor-fork/wiki/use-yolov5-to-finetune-or-training-model) | tensorboard | args | framework | onnx | pretrained weights | @@ -68,6 +76,7 @@ | mmdetection | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | | detectron2 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | | vidt | ? | ✔️ | ✔️ | pytorch | ❌ | online | +| nanodet | ❌ | ✔️ | ❌ | pytorch_lightning | ❌ | online | - online pretrained weights may download through network @@ -143,8 +152,10 @@ docker build -t ymir-executor/mmdet:cu111-tmi -f docker/Dockerfile.cuda111 . ## reference - [mining algorithm: CALD](https://github.com/we1pingyu/CALD/) +- [yolov4](https://github.com/AlexeyAB/darknet) - [yolov5](https://github.com/ultralytics/yolov5) - [mmdetection](https://github.com/open-mmlab/mmdetection) - [yolov7](https://github.com/wongkinyiu/yolov7) - [detectron2](https://github.com/facebookresearch/detectron2) - [vidt](https://github.com/naver-ai/vidt) +- [nanodet](https://github.com/RangiLyu/nanodet) diff --git a/README_zh-CN.MD b/README_zh-CN.MD index 765b6d6..53a25c9 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -58,6 +58,14 @@ docker pull youdaoyzbx/ymir-executor:ymir1.1.0-vidt-cu111-tmi ``` +- [nanodet](https://github.com/modelai/ymir-nanodet/tree/ymir-dev) + + - [change log](https://github.com/modelai/ymir-nanodet/tree/ymir-dev/ymir) + + ``` + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-nanodet-cu111-tmi + ``` + ## 比较 | docker image | [finetune](https://github.com/modelai/ymir-executor-fork/wiki/use-yolov5-to-finetune-or-training-model) | tensorboard | args | framework | onnx | pretrained weight | @@ -68,6 +76,7 @@ | mmdetection | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | | detectron2 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | | vidt | ? | ✔️ | ✔️ | pytorch | ❌ | online | +| nanodet | ❌ | ✔️ | ❌ | pytorch_lightning | ❌ | online | - online 预训练权重可能在训练时通过网络下载 @@ -159,11 +168,13 @@ docker build -t ymir-executor/live-code:mxnet-tmi -f mxnet.dockerfile ## 参考 - [挖掘算法CALD](https://github.com/we1pingyu/CALD/) +- [yolov4](https://github.com/AlexeyAB/darknet) - [yolov5](https://github.com/ultralytics/yolov5) - [mmdetection](https://github.com/open-mmlab/mmdetection) - [yolov7](https://github.com/wongkinyiu/yolov7) - [detectron2](https://github.com/facebookresearch/detectron2) - [vidt](https://github.com/naver-ai/vidt) +- [nanodet](https://github.com/RangiLyu/nanodet) --- diff --git a/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py b/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py index 6b10dc1..b2e7dff 100644 --- a/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py +++ b/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py @@ -6,11 +6,10 @@ import torch.distributed as dist from mmcv.runner import DistEvalHook as BaseDistEvalHook from mmcv.runner import EvalHook as BaseEvalHook +from mmdet.utils.util_ymir import write_ymir_training_result from torch.nn.modules.batchnorm import _BatchNorm from ymir_exc import monitor - -from mmdet.utils.util_ymir import (YmirStage, get_ymir_process, - write_ymir_training_result) +from ymir_exc.util import YmirStage, get_ymir_process def _calc_dynamic_intervals(start_interval, dynamic_interval_list): diff --git a/det-mmdetection-tmi/tools/train.py b/det-mmdetection-tmi/tools/train.py index b3b6d65..2ecc642 100644 --- a/det-mmdetection-tmi/tools/train.py +++ b/det-mmdetection-tmi/tools/train.py @@ -11,13 +11,13 @@ from mmcv import Config, DictAction from mmcv.runner import get_dist_info, init_dist from mmcv.utils import get_git_hash - from mmdet import __version__ from mmdet.apis import init_random_seed, set_random_seed, train_detector from mmdet.datasets import build_dataset from mmdet.models import build_detector from mmdet.utils import collect_env, get_root_logger, setup_multi_processes -from mmdet.utils.util_ymir import _modify_mmdet_config, get_merged_config +from mmdet.utils.util_ymir import _modify_mmdet_config +from ymir_exc.util import get_merged_config def parse_args(): diff --git a/det-mmdetection-tmi/ymir_infer.py b/det-mmdetection-tmi/ymir_infer.py index 9920ca2..a7f22bd 100644 --- a/det-mmdetection-tmi/ymir_infer.py +++ b/det-mmdetection-tmi/ymir_infer.py @@ -9,14 +9,12 @@ from easydict import EasyDict as edict from mmcv import DictAction from mmdet.apis import inference_detector, init_detector -from mmdet.utils.util_ymir import YmirStage, get_merged_config, get_weight_file, get_ymir_process -from nptyping import NDArray, Shape +from mmdet.utils.util_ymir import get_weight_file from tqdm import tqdm from ymir_exc import dataset_reader as dr from ymir_exc import env, monitor from ymir_exc import result_writer as rw - -DETECTION_RESULT = NDArray[Shape['*,5'], Any] +from ymir_exc.util import YmirStage, get_merged_config, get_ymir_process def parse_option(cfg_options: str) -> dict: @@ -35,7 +33,10 @@ def parse_option(cfg_options: str) -> dict: return args.cfg_options -def mmdet_result_to_ymir(results: List[DETECTION_RESULT], class_names: List[str]) -> List[rw.Annotation]: +def mmdet_result_to_ymir(results: List[Any], class_names: List[str]) -> List[rw.Annotation]: + """ + results: List[NDArray[Shape['*,5'], Any]] + """ ann_list = [] for idx, result in enumerate(results): for line in result: diff --git a/det-mmdetection-tmi/ymir_mining.py b/det-mmdetection-tmi/ymir_mining.py index 7eeaa1f..787290e 100644 --- a/det-mmdetection-tmi/ymir_mining.py +++ b/det-mmdetection-tmi/ymir_mining.py @@ -314,7 +314,7 @@ def mining(self): p = cls_scores_aug[aug_idx] q = cls_scores[origin_idx] m = (p + q) / 2. - js = 0.5 * entropy(p, m) + 0.5 * entropy(q, m) + js = 0.5 * entropy([p, 1 - p], [m, 1 - m]) + 0.5 * entropy([q, 1 - q], [m, 1 - m]) if js < 0: js = 0 consistency_box = max_iou diff --git a/det-yolov5-tmi/mining/ymir_infer.py b/det-yolov5-tmi/mining/ymir_infer.py index 827dc8a..f10f210 100644 --- a/det-yolov5-tmi/mining/ymir_infer.py +++ b/det-yolov5-tmi/mining/ymir_infer.py @@ -12,13 +12,12 @@ import torch.distributed as dist import torch.utils.data as td from easydict import EasyDict as edict -from tqdm import tqdm -from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_merged_config - from mining.util import YmirDataset, load_image_file +from tqdm import tqdm from utils.general import scale_coords from utils.ymir_yolov5 import YmirYolov5 +from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, get_merged_config LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) diff --git a/det-yolov5-tmi/mining/ymir_mining.py b/det-yolov5-tmi/mining/ymir_mining.py index e8a6c59..7ac11bd 100644 --- a/det-yolov5-tmi/mining/ymir_mining.py +++ b/det-yolov5-tmi/mining/ymir_mining.py @@ -14,14 +14,13 @@ import torch.distributed as dist import torch.utils.data as td from easydict import EasyDict as edict -from tqdm import tqdm -from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_merged_config - from mining.util import (YmirDataset, collate_fn_with_fake_ann, load_image_file, load_image_file_with_ann, update_consistency) +from tqdm import tqdm from utils.general import scale_coords from utils.ymir_yolov5 import YmirYolov5 +from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, get_merged_config LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) From 53bc0f50ca19b10d6976c13211d9170929998610 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 2 Sep 2022 11:30:39 +0800 Subject: [PATCH 112/204] fix mining entropy bug --- det-mmdetection-tmi/training-template.yaml | 2 +- det-yolov5-tmi/mining/mining_cald.py | 2 +- det-yolov5-tmi/mining/util.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/det-mmdetection-tmi/training-template.yaml b/det-mmdetection-tmi/training-template.yaml index d4c191f..7744172 100644 --- a/det-mmdetection-tmi/training-template.yaml +++ b/det-mmdetection-tmi/training-template.yaml @@ -3,7 +3,7 @@ export_format: 'ark:raw' samples_per_gpu: 16 workers_per_gpu: 16 max_epochs: 300 -config_file: 'configs/yolox/yolox_nano_8x8_300e_coco.py' +config_file: 'configs/yolox/yolox_tiny_8x8_300e_coco.py' args_options: '' cfg_options: '' metric: 'bbox' diff --git a/det-yolov5-tmi/mining/mining_cald.py b/det-yolov5-tmi/mining/mining_cald.py index 560326c..1588665 100644 --- a/det-yolov5-tmi/mining/mining_cald.py +++ b/det-yolov5-tmi/mining/mining_cald.py @@ -85,7 +85,7 @@ def mining(self) -> List: p = cls_scores_aug[aug_idx] q = cls_scores[origin_idx] m = (p + q) / 2. - js = 0.5 * entropy(p, m) + 0.5 * entropy(q, m) + js = 0.5 * entropy([p, 1 - p], [m, 1 - m]) + 0.5 * entropy([q, 1 - q], [m, 1 - m]) if js < 0: js = 0 consistency_box = max_iou diff --git a/det-yolov5-tmi/mining/util.py b/det-yolov5-tmi/mining/util.py index 41c7c73..54ef5dd 100644 --- a/det-yolov5-tmi/mining/util.py +++ b/det-yolov5-tmi/mining/util.py @@ -107,7 +107,7 @@ def update_consistency(consistency, consistency_per_aug, beta, pred_bboxes_key, p = cls_scores_aug[aug_idx] q = cls_scores[origin_idx] m = (p + q) / 2. - js = 0.5 * entropy(p, m) + 0.5 * entropy(q, m) + js = 0.5 * entropy([p, 1 - p], [m, 1 - m]) + 0.5 * entropy([q, 1 - q], [m, 1 - m]) if js < 0: js = 0 consistency_box = max_iou From f920f48911ee64bbc0abe1eab1df11cbec2f84bb Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 2 Sep 2022 12:00:40 +0800 Subject: [PATCH 113/204] fix mining bug --- README.MD | 8 ++++---- README_zh-CN.MD | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/README.MD b/README.MD index 890bba7..c4046bf 100644 --- a/README.MD +++ b/README.MD @@ -68,7 +68,7 @@ ## overview -| docker image | [finetune](https://github.com/modelai/ymir-executor-fork/wiki/use-yolov5-to-finetune-or-training-model) | tensorboard | args | framework | onnx | pretrained weights | +| docker image | [finetune](https://github.com/modelai/ymir-executor-fork/wiki/use-yolov5-to-finetune-or-training-model) | tensorboard | args/cfg options | framework | onnx | pretrained weights | | - | - | - | - | - | - | - | | yolov4 | ? | ✔️ | ❌ | darknet + mxnet | ❌ | local | | yolov5 | ✔️ | ✔️ | ✔️ | pytorch | ✔️ | local+online | @@ -76,11 +76,11 @@ | mmdetection | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | | detectron2 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | | vidt | ? | ✔️ | ✔️ | pytorch | ❌ | online | -| nanodet | ❌ | ✔️ | ❌ | pytorch_lightning | ❌ | online | +| nanodet | ✔️ | ✔️ | ❌ | pytorch_lightning | ❌ | online | -- online pretrained weights may download through network +- `online` pretrained weights may download through network -- local pretrained weights have copied to docker images when building image +- `local` pretrained weights have copied to docker images when building image ### benchmark diff --git a/README_zh-CN.MD b/README_zh-CN.MD index 53a25c9..a5baec1 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -68,7 +68,7 @@ ## 比较 -| docker image | [finetune](https://github.com/modelai/ymir-executor-fork/wiki/use-yolov5-to-finetune-or-training-model) | tensorboard | args | framework | onnx | pretrained weight | +| docker image | [finetune](https://github.com/modelai/ymir-executor-fork/wiki/use-yolov5-to-finetune-or-training-model) | tensorboard | args/cfg options | framework | onnx | pretrained weight | | - | - | - | - | - | - | - | | yolov4 | ? | ✔️ | ❌ | darknet + mxnet | ❌ | local | | yolov5 | ✔️ | ✔️ | ✔️ | pytorch | ✔️ | local+online | @@ -76,11 +76,11 @@ | mmdetection | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | | detectron2 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | | vidt | ? | ✔️ | ✔️ | pytorch | ❌ | online | -| nanodet | ❌ | ✔️ | ❌ | pytorch_lightning | ❌ | online | +| nanodet | ✔️ | ✔️ | ❌ | pytorch_lightning | ❌ | online | -- online 预训练权重可能在训练时通过网络下载 +- `online` 预训练权重可能在训练时通过网络下载 -- local 预训练权重在构建镜像时复制到了镜像 +- `local` 预训练权重在构建镜像时复制到了镜像 ### benchmark From 70817f8c5aaec25d0240b998978a4f0520c0f57f Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 2 Sep 2022 15:48:20 +0800 Subject: [PATCH 114/204] zzz| --- README.MD | 1 + README_zh-CN.MD | 1 + 2 files changed, 2 insertions(+) diff --git a/README.MD b/README.MD index c4046bf..3c9eaab 100644 --- a/README.MD +++ b/README.MD @@ -105,6 +105,7 @@ gpu: single GeForce GTX 1080 Ti | yolov7 | 16 | 100 | yolov7-tiny | 70.4% | 5h | coco-pretrained | | mmdetection | 16 | 100 | yolox_tiny | 66.2% | 5h | coco-pretrained | | detectron2 | 2 | 20000 steps | retinanet_R_50_FPN_1x | 53.54% | 2h | imagenet-pretrained | +| nanodet | 16 | 100 | nanodet-plus-m_416 | 58.63% | 5h | --- diff --git a/README_zh-CN.MD b/README_zh-CN.MD index a5baec1..f9004a1 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -105,6 +105,7 @@ gpu: single GeForce GTX 1080 Ti | yolov7 | 16 | 100 | yolov7-tiny | 70.4% | 5h | coco-pretrained | | mmdetection | 16 | 100 | yolox_tiny | 66.2% | 5h | coco-pretrained | | detectron2 | 2 | 20000 steps | retinanet_R_50_FPN_1x | 53.54% | 2h | imagenet-pretrained | +| nanodet | 16 | 100 | nanodet-plus-m_416 | 58.63% | 5h |s --- From ec58f3c12189fb2cb246365364132e743a140884 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 2 Sep 2022 15:48:59 +0800 Subject: [PATCH 115/204] zzz --- README_zh-CN.MD | 2 +- det-yolov5-tmi/mining/util.py | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/README_zh-CN.MD b/README_zh-CN.MD index f9004a1..47eb7b5 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -105,7 +105,7 @@ gpu: single GeForce GTX 1080 Ti | yolov7 | 16 | 100 | yolov7-tiny | 70.4% | 5h | coco-pretrained | | mmdetection | 16 | 100 | yolox_tiny | 66.2% | 5h | coco-pretrained | | detectron2 | 2 | 20000 steps | retinanet_R_50_FPN_1x | 53.54% | 2h | imagenet-pretrained | -| nanodet | 16 | 100 | nanodet-plus-m_416 | 58.63% | 5h |s +| nanodet | 16 | 100 | nanodet-plus-m_416 | 58.63% | 5h | --- diff --git a/det-yolov5-tmi/mining/util.py b/det-yolov5-tmi/mining/util.py index 54ef5dd..5c9b669 100644 --- a/det-yolov5-tmi/mining/util.py +++ b/det-yolov5-tmi/mining/util.py @@ -19,11 +19,10 @@ import cv2 import numpy as np import torch.utils.data as td -from scipy.stats import entropy -from torch.utils.data._utils.collate import default_collate - from mining.data_augment import cutout, horizontal_flip, resize, rotate from mining.mining_cald import get_ious +from scipy.stats import entropy +from torch.utils.data._utils.collate import default_collate from utils.augmentations import letterbox LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html From 0c8b49f65de31ab737ff1b95e4b2bc41eb37fd20 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 2 Sep 2022 15:54:03 +0800 Subject: [PATCH 116/204] add nanodet benchmark --- README.MD | 2 +- README_zh-CN.MD | 10 ++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/README.MD b/README.MD index 3c9eaab..433dc2d 100644 --- a/README.MD +++ b/README.MD @@ -105,7 +105,7 @@ gpu: single GeForce GTX 1080 Ti | yolov7 | 16 | 100 | yolov7-tiny | 70.4% | 5h | coco-pretrained | | mmdetection | 16 | 100 | yolox_tiny | 66.2% | 5h | coco-pretrained | | detectron2 | 2 | 20000 steps | retinanet_R_50_FPN_1x | 53.54% | 2h | imagenet-pretrained | -| nanodet | 16 | 100 | nanodet-plus-m_416 | 58.63% | 5h | +| nanodet | 16 | 100 | nanodet-plus-m_416 | 58.63% | 5h | imagenet-pretrained | --- diff --git a/README_zh-CN.MD b/README_zh-CN.MD index 47eb7b5..09b3fcc 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -84,9 +84,11 @@ ### benchmark -- training dataset: voc2012-train 5717 images -- validation dataset: voc2012-val 5823 images -- image size: 640 +- 训练集: voc2012-train 5717 images +- 测试集: voc2012-val 5823 images +- 图像大小: 640 (nanodet为416) + +**由于 coco 数据集包含 voc 数据集中的类, 因此这个对比并不公平, 仅供参考** gpu: single Tesla P4 @@ -105,7 +107,7 @@ gpu: single GeForce GTX 1080 Ti | yolov7 | 16 | 100 | yolov7-tiny | 70.4% | 5h | coco-pretrained | | mmdetection | 16 | 100 | yolox_tiny | 66.2% | 5h | coco-pretrained | | detectron2 | 2 | 20000 steps | retinanet_R_50_FPN_1x | 53.54% | 2h | imagenet-pretrained | -| nanodet | 16 | 100 | nanodet-plus-m_416 | 58.63% | 5h | +| nanodet | 16 | 100 | nanodet-plus-m_416 | 58.63% | 5h | imagenet-pretrained | --- From 197fa095486d4817bbcb9664922f69dedc71fbf7 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 6 Sep 2022 09:38:58 +0800 Subject: [PATCH 117/204] fix mmdet ddp bug --- det-mmdetection-tmi/README.md | 4 ++++ det-mmdetection-tmi/mmdet/utils/util_ymir.py | 5 ++++- det-mmdetection-tmi/tools/train.py | 4 ++-- det-mmdetection-tmi/ymir_train.py | 2 +- 4 files changed, 11 insertions(+), 4 deletions(-) diff --git a/det-mmdetection-tmi/README.md b/det-mmdetection-tmi/README.md index b2ed690..5c1934d 100644 --- a/det-mmdetection-tmi/README.md +++ b/det-mmdetection-tmi/README.md @@ -25,3 +25,7 @@ docker build -t ymir-executor/mmdet:cuda111-tmi --build-arg SERVER_MODE=dev --bu - add `training-template.yaml, infer-template.yaml, mining-template.yaml` for ymir pre-defined hyper-parameters. - add `docker/Dockerfile.cuda102, docker/Dockerfile.cuda111` to build docker image - remove `docker/Dockerfile` to avoid misuse + +--- + +- 2022/09/06: set `find_unused_parameters = True`, fix DDP bug diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index 8498d9c..674117f 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -19,7 +19,7 @@ CV_IMAGE = NDArray[Shape['*,*,3'], UInt8] -def _modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: +def modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: """ useful for training process - modify dataset config @@ -83,6 +83,9 @@ def _modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: mmdet_cfg.evaluation.metric = ymir_cfg.param.get('metric', 'bbox') # TODO Whether to evaluating the AP for each class # mmdet_cfg.evaluation.classwise = True + + # fix DDP error + mmdet_cfg.find_unused_parameters = True return mmdet_cfg diff --git a/det-mmdetection-tmi/tools/train.py b/det-mmdetection-tmi/tools/train.py index 2ecc642..3868d1e 100644 --- a/det-mmdetection-tmi/tools/train.py +++ b/det-mmdetection-tmi/tools/train.py @@ -16,7 +16,7 @@ from mmdet.datasets import build_dataset from mmdet.models import build_detector from mmdet.utils import collect_env, get_root_logger, setup_multi_processes -from mmdet.utils.util_ymir import _modify_mmdet_config +from mmdet.utils.util_ymir import modify_mmdet_config from ymir_exc.util import get_merged_config @@ -101,7 +101,7 @@ def main(): cfg = Config.fromfile(args.config) print(cfg) # modify mmdet config from file - cfg = _modify_mmdet_config(mmdet_cfg=cfg, ymir_cfg=ymir_cfg) + cfg = modify_mmdet_config(mmdet_cfg=cfg, ymir_cfg=ymir_cfg) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) diff --git a/det-mmdetection-tmi/ymir_train.py b/det-mmdetection-tmi/ymir_train.py index 552654d..a84a805 100644 --- a/det-mmdetection-tmi/ymir_train.py +++ b/det-mmdetection-tmi/ymir_train.py @@ -12,7 +12,7 @@ def main(cfg: edict) -> int: # default ymir config - gpu_id = cfg.param.get("gpu_id", '0') + gpu_id: str = str(cfg.param.get("gpu_id", '0')) num_gpus = len(gpu_id.split(",")) if num_gpus == 0: raise Exception(f'gpu_id = {gpu_id} is not valid, eg: 0 or 2,4') From 5edd3909456477c7c0b2cd83e4f1f749029158ab Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 6 Sep 2022 14:56:25 +0800 Subject: [PATCH 118/204] fix link change --- README.MD | 4 ++-- README_zh-CN.MD | 4 ++-- live-code-executor/ymir_start.py | 8 +++++++- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/README.MD b/README.MD index 433dc2d..e82a242 100644 --- a/README.MD +++ b/README.MD @@ -142,13 +142,13 @@ docker build -t ymir-executor/mmdet:cu111-tmi -f docker/Dockerfile.cuda111 . ## how to custom ymir-executor -- [custom ymir-executor](https://github.com/IndustryEssentials/ymir/blob/dev/docs/ymir-dataset-zh-CN.md) +- [custom ymir-executor](https://github.com/IndustryEssentials/ymir/blob/dev/dev_docs/ymir-dataset-zh-CN.md) - [ymir-executor-sdk](https://github.com/modelai/ymir-executor-sdk) ## how to import pretrained model weights -- [import pretainted model weights](https://github.com/IndustryEssentials/ymir/blob/dev/docs/import-extra-models.md) +- [import pretainted model weights](https://github.com/IndustryEssentials/ymir/blob/dev/dev_docs/import-extra-models.md) ## reference diff --git a/README_zh-CN.MD b/README_zh-CN.MD index 09b3fcc..f367f39 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -158,13 +158,13 @@ docker build -t ymir-executor/live-code:mxnet-tmi -f mxnet.dockerfile ## 如何制作自己的ymir-executor -- [ymir-executor 制作指南](https://github.com/IndustryEssentials/ymir/blob/dev/docs/ymir-dataset-zh-CN.md) +- [ymir-executor 制作指南](https://github.com/IndustryEssentials/ymir/blob/dev/dev_docs/ymir-dataset-zh-CN.md) - [ymir-executor-sdk](https://github.com/modelai/ymir-executor-sdk) ymir镜像开发辅助库 ## 如何导入预训练模型 -- [如何导入外部模型](https://github.com/IndustryEssentials/ymir/blob/dev/docs/import-extra-models.md) +- [如何导入外部模型](https://github.com/IndustryEssentials/ymir/blob/dev/dev_docs/import-extra-models.md) - 通过ymir网页端的 `模型管理/模型列表/导入模型` 同样可以导入模型 diff --git a/live-code-executor/ymir_start.py b/live-code-executor/ymir_start.py index d2c5415..ee81336 100644 --- a/live-code-executor/ymir_start.py +++ b/live-code-executor/ymir_start.py @@ -50,7 +50,13 @@ def main(): logger.info('no python package needs to install') # step 3. run /app/start.py - cmd = 'python3 start.py' + if osp.exists('/app/start.py'): + cmd = 'python3 start.py' + elif osp.exists('/app/ymir/start.py'): + cmd = 'python3 ymir/start.py' + else: + raise Exception('cannot found start.py') + logger.info(f'run task: {cmd}') subprocess.run(cmd.split(), check=True, cwd='/app') From 958d1214c6ca89dd9a286dd370b3d81318d8ac5b Mon Sep 17 00:00:00 2001 From: LuciferZap <92283801+LuciferZap@users.noreply.github.com> Date: Thu, 8 Sep 2022 16:33:11 +0800 Subject: [PATCH 119/204] add yolov5 mining code --- .idea/encodings.xml | 4 + .idea/misc.xml | 7 + .idea/modules.xml | 8 + .idea/vcs.xml | 6 + .idea/workspace.xml | 222 ++++++++++++++++++++++ .idea/ymir-executor-fork.iml | 12 ++ det-yolov5-tmi/mining/ymir_mining_aldd.py | 167 ++++++++++++++++ 7 files changed, 426 insertions(+) create mode 100644 .idea/encodings.xml create mode 100644 .idea/misc.xml create mode 100644 .idea/modules.xml create mode 100644 .idea/vcs.xml create mode 100644 .idea/workspace.xml create mode 100644 .idea/ymir-executor-fork.iml create mode 100644 det-yolov5-tmi/mining/ymir_mining_aldd.py diff --git a/.idea/encodings.xml b/.idea/encodings.xml new file mode 100644 index 0000000..15a15b2 --- /dev/null +++ b/.idea/encodings.xml @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml new file mode 100644 index 0000000..a09183e --- /dev/null +++ b/.idea/misc.xml @@ -0,0 +1,7 @@ + + + + + + \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml new file mode 100644 index 0000000..c5ce0fc --- /dev/null +++ b/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml new file mode 100644 index 0000000..94a25f7 --- /dev/null +++ b/.idea/vcs.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/.idea/workspace.xml b/.idea/workspace.xml new file mode 100644 index 0000000..c116a57 --- /dev/null +++ b/.idea/workspace.xml @@ -0,0 +1,222 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + nd.sum + kernel + + + torch.log + torch.sum + avg_pool_kernel + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - 1662622258260 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/ymir-executor-fork.iml b/.idea/ymir-executor-fork.iml deleted file mode 100644 index 7c9d48f..0000000 --- a/.idea/ymir-executor-fork.iml +++ /dev/null @@ -1,12 +0,0 @@ - - - - - - - - - - \ No newline at end of file From b6e25f9f84419bc9b7ff9043051ad21f6841cc4d Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 8 Sep 2022 16:48:44 +0800 Subject: [PATCH 121/204] update mmdet and readme --- README.MD | 19 +++-- README_zh-CN.MD | 21 ++--- det-mmdetection-tmi/mmdet/utils/util_ymir.py | 85 +++++++++++++++----- det-mmdetection-tmi/tools/train.py | 4 +- det-mmdetection-tmi/ymir_infer.py | 4 +- det-mmdetection-tmi/ymir_train.py | 8 +- 6 files changed, 97 insertions(+), 44 deletions(-) diff --git a/README.MD b/README.MD index e82a242..d4ef769 100644 --- a/README.MD +++ b/README.MD @@ -99,13 +99,14 @@ gpu: single Tesla P4 gpu: single GeForce GTX 1080 Ti -| docker image | batch size | epoch number | model | voc2012 val map50 | training time | note | -| - | - | - | - | - | - | - | -| yolov5 | 16 | 100 | yolov5s | 70.35% | 2h | coco-pretrained | -| yolov7 | 16 | 100 | yolov7-tiny | 70.4% | 5h | coco-pretrained | -| mmdetection | 16 | 100 | yolox_tiny | 66.2% | 5h | coco-pretrained | -| detectron2 | 2 | 20000 steps | retinanet_R_50_FPN_1x | 53.54% | 2h | imagenet-pretrained | -| nanodet | 16 | 100 | nanodet-plus-m_416 | 58.63% | 5h | imagenet-pretrained | +| docker image | image size | batch size | epoch number | model | voc2012 val map50 | training time | note | +| - | - | - | - | - | - | - | - | +| yolov4 | 608 | 64/32 | 20000 steps | yolov4 | 72.73% | 6h | imagenet-pretrained | +| yolov5 | 640 | 16 | 100 | yolov5s | 70.35% | 2h | coco-pretrained | +| yolov7 | 640 | 16 | 100 | yolov7-tiny | 70.4% | 5h | coco-pretrained | +| mmdetection | 640 | 16 | 100 | yolox_tiny | 66.2% | 5h | coco-pretrained | +| detectron2 | 640 | 2 | 20000 steps | retinanet_R_50_FPN_1x | 53.54% | 2h | imagenet-pretrained | +| nanodet | 416 | 16 | 100 | nanodet-plus-m_416 | 58.63% | 5h | imagenet-pretrained | --- @@ -148,7 +149,9 @@ docker build -t ymir-executor/mmdet:cu111-tmi -f docker/Dockerfile.cuda111 . ## how to import pretrained model weights -- [import pretainted model weights](https://github.com/IndustryEssentials/ymir/blob/dev/dev_docs/import-extra-models.md) +- [import and finetune model](https://github.com/modelai/ymir-executor-fork/wiki/import-and-finetune-model) + +- ~~[import pretainted model weights](https://github.com/IndustryEssentials/ymir/blob/dev/dev_docs/import-extra-models.md)~~ ## reference diff --git a/README_zh-CN.MD b/README_zh-CN.MD index f367f39..6cbcd8c 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -86,7 +86,7 @@ - 训练集: voc2012-train 5717 images - 测试集: voc2012-val 5823 images -- 图像大小: 640 (nanodet为416) +- 图像大小: 640 (nanodet为416, yolov4为608) **由于 coco 数据集包含 voc 数据集中的类, 因此这个对比并不公平, 仅供参考** @@ -101,13 +101,14 @@ gpu: single Tesla P4 gpu: single GeForce GTX 1080 Ti -| docker image | batch size | epoch number | model | voc2012 val map50 | training time | note | -| - | - | - | - | - | - | - | -| yolov5 | 16 | 100 | yolov5s | 70.35% | 2h | coco-pretrained | -| yolov7 | 16 | 100 | yolov7-tiny | 70.4% | 5h | coco-pretrained | -| mmdetection | 16 | 100 | yolox_tiny | 66.2% | 5h | coco-pretrained | -| detectron2 | 2 | 20000 steps | retinanet_R_50_FPN_1x | 53.54% | 2h | imagenet-pretrained | -| nanodet | 16 | 100 | nanodet-plus-m_416 | 58.63% | 5h | imagenet-pretrained | +| docker image | image size | batch size | epoch number | model | voc2012 val map50 | training time | note | +| - | - | - | - | - | - | - | - | +| yolov4 | 608 | 64/32 | 20000 steps | yolov4 | 72.73% | 6h | imagenet-pretrained | +| yolov5 | 640 | 16 | 100 | yolov5s | 70.35% | 2h | coco-pretrained | +| yolov7 | 640 | 16 | 100 | yolov7-tiny | 70.4% | 5h | coco-pretrained | +| mmdetection | 640 | 16 | 100 | yolox_tiny | 66.2% | 5h | coco-pretrained | +| detectron2 | 640 | 2 | 20000 steps | retinanet_R_50_FPN_1x | 53.54% | 2h | imagenet-pretrained | +| nanodet | 416 | 16 | 100 | nanodet-plus-m_416 | 58.63% | 5h | imagenet-pretrained | --- @@ -164,7 +165,9 @@ docker build -t ymir-executor/live-code:mxnet-tmi -f mxnet.dockerfile ## 如何导入预训练模型 -- [如何导入外部模型](https://github.com/IndustryEssentials/ymir/blob/dev/dev_docs/import-extra-models.md) +- [如何导入并精调外部模型](https://github.com/modelai/ymir-executor-fork/wiki/import-and-finetune-model) + +- ~~[如何导入外部模型](https://github.com/IndustryEssentials/ymir/blob/dev/dev_docs/import-extra-models.md)~~ - 通过ymir网页端的 `模型管理/模型列表/导入模型` 同样可以导入模型 diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index 674117f..12910ea 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -5,7 +5,7 @@ import logging import os import os.path as osp -from typing import Any, List, Optional +from typing import Any, Iterable, List, Optional import mmcv import yaml @@ -19,13 +19,24 @@ CV_IMAGE = NDArray[Shape['*,*,3'], UInt8] -def modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: +def modify_mmcv_config(mmcv_cfg: Config, ymir_cfg: edict) -> None: """ useful for training process - modify dataset config - modify model output channel - modify epochs, checkpoint, tensorboard config """ + def recursive_modify(mmcv_cfg: Config, attribute_key: str, attribute_value: Any): + for key in mmcv_cfg: + if key == attribute_key: + mmcv_cfg[key] = attribute_value + elif isinstance(mmcv_cfg[key], Config): + recursive_modify(mmcv_cfg[key], attribute_key, attribute_value) + elif isinstance(mmcv_cfg[key], Iterable): + for cfg in mmcv_cfg[key]: + if isinstance(cfg, Config): + recursive_modify(cfg, attribute_key, attribute_value) + # modify dataset config ymir_ann_files = dict(train=ymir_cfg.ymir.input.training_index_file, val=ymir_cfg.ymir.input.val_index_file, @@ -35,8 +46,11 @@ def modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: # so set smaller samples_per_gpu for validation samples_per_gpu = ymir_cfg.param.samples_per_gpu workers_per_gpu = ymir_cfg.param.workers_per_gpu - mmdet_cfg.data.samples_per_gpu = samples_per_gpu - mmdet_cfg.data.workers_per_gpu = workers_per_gpu + mmcv_cfg.data.samples_per_gpu = samples_per_gpu + mmcv_cfg.data.workers_per_gpu = workers_per_gpu + + num_classes = len(ymir_cfg.param.class_names) + recursive_modify(mmcv_cfg.model, 'num_classes', num_classes) for split in ['train', 'val', 'test']: ymir_dataset_cfg = dict(type='YmirDataset', @@ -47,7 +61,7 @@ def modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: data_root=ymir_cfg.ymir.input.root_dir, filter_empty_gt=False) # modify dataset config for `split` - mmdet_dataset_cfg = mmdet_cfg.data.get(split, None) + mmdet_dataset_cfg = mmcv_cfg.data.get(split, None) if mmdet_dataset_cfg is None: continue @@ -63,33 +77,65 @@ def modify_mmdet_config(mmdet_cfg: Config, ymir_cfg: edict) -> Config: else: raise Exception(f'unsupported source dataset type {src_dataset_type}') - # modify model output channel - mmdet_model_cfg = mmdet_cfg.model.bbox_head - mmdet_model_cfg.num_classes = len(ymir_cfg.param.class_names) + # # modify model output channel + # if mmcv_cfg.model.get('bbox_head'): # yolox, yolo, yolof, retinanet, ssd + # mmdet_model_cfg = mmcv_cfg.model.bbox_head + # elif mmcv_cfg.model.get('roi_head'): # Faster-RCNN, fast-rcnn + # mmdet_model_cfg = mmcv_cfg.model.roi_head.bbox_head + # elif mmcv_cfg.model.get('mask_head'): # SOLO + # mmdet_model_cfg = mmcv_cfg.model.mask_head + # else: + # raise Exception('unknown model structure') + + # if mmdet_model_cfg.get('num_classes'): + # mmdet_model_cfg.num_classes = len(ymir_cfg.param.class_names) + # else: + # raise Exception('unknown model structure, no attr num_classes found') # modify epochs, checkpoint, tensorboard config if ymir_cfg.param.get('max_epochs', None): - mmdet_cfg.runner.max_epochs = ymir_cfg.param.max_epochs - mmdet_cfg.checkpoint_config['out_dir'] = ymir_cfg.ymir.output.models_dir + mmcv_cfg.runner.max_epochs = ymir_cfg.param.max_epochs + mmcv_cfg.checkpoint_config['out_dir'] = ymir_cfg.ymir.output.models_dir tensorboard_logger = dict(type='TensorboardLoggerHook', log_dir=ymir_cfg.ymir.output.tensorboard_dir) - if len(mmdet_cfg.log_config['hooks']) <= 1: - mmdet_cfg.log_config['hooks'].append(tensorboard_logger) + if len(mmcv_cfg.log_config['hooks']) <= 1: + mmcv_cfg.log_config['hooks'].append(tensorboard_logger) else: - mmdet_cfg.log_config['hooks'][1].update(tensorboard_logger) + mmcv_cfg.log_config['hooks'][1].update(tensorboard_logger) # modify evaluation and interval - interval = max(1, mmdet_cfg.runner.max_epochs // 30) - mmdet_cfg.evaluation.interval = interval - mmdet_cfg.evaluation.metric = ymir_cfg.param.get('metric', 'bbox') + interval = max(1, mmcv_cfg.runner.max_epochs // 10) + mmcv_cfg.evaluation.interval = interval + mmcv_cfg.evaluation.metric = ymir_cfg.param.get('metric', 'bbox') + mmcv_cfg.checkpoint_config.interval = mmcv_cfg.evaluation.interval # TODO Whether to evaluating the AP for each class # mmdet_cfg.evaluation.classwise = True # fix DDP error - mmdet_cfg.find_unused_parameters = True - return mmdet_cfg + mmcv_cfg.find_unused_parameters = True + + # set work dir + mmcv_cfg.work_dir = ymir_cfg.ymir.output.models_dir + + args_options = ymir_cfg.param.get("args_options", '') + cfg_options = ymir_cfg.param.get("cfg_options", '') + + # auto load offered weight file if not set by user! + if (args_options.find('--resume-from') == -1 and + args_options.find('--load-from') == -1 and + cfg_options.find('load_from') == -1 and + cfg_options.find('resume_from') == -1): # noqa: E129 + + weight_file = get_best_weight_file(ymir_cfg) + if weight_file: + if cfg_options: + cfg_options += f' load_from={weight_file}' + else: + cfg_options = f'load_from={weight_file}' + else: + logging.warning('no weight file used for training!') -def get_weight_file(cfg: edict) -> str: +def get_best_weight_file(cfg: edict) -> str: """ return the weight file path by priority find weight file in cfg.param.pretrained_model_params or cfg.param.model_params_path @@ -118,6 +164,7 @@ def get_weight_file(cfg: edict) -> str: if cfg.ymir.run_training: weight_files = [f for f in glob.glob('/weights/**/*', recursive=True) if f.endswith(('.pth', '.pt'))] + # load pretrained model weight for yolox only model_name_splits = osp.basename(cfg.param.config_file).split('_') if len(weight_files) > 0 and model_name_splits[0] == 'yolox': yolox_weight_files = [ diff --git a/det-mmdetection-tmi/tools/train.py b/det-mmdetection-tmi/tools/train.py index 3868d1e..df4f184 100644 --- a/det-mmdetection-tmi/tools/train.py +++ b/det-mmdetection-tmi/tools/train.py @@ -16,7 +16,7 @@ from mmdet.datasets import build_dataset from mmdet.models import build_detector from mmdet.utils import collect_env, get_root_logger, setup_multi_processes -from mmdet.utils.util_ymir import modify_mmdet_config +from mmdet.utils.util_ymir import modify_mmcv_config from ymir_exc.util import get_merged_config @@ -101,7 +101,7 @@ def main(): cfg = Config.fromfile(args.config) print(cfg) # modify mmdet config from file - cfg = modify_mmdet_config(mmdet_cfg=cfg, ymir_cfg=ymir_cfg) + modify_mmcv_config(mmcv_cfg=cfg, ymir_cfg=ymir_cfg) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) diff --git a/det-mmdetection-tmi/ymir_infer.py b/det-mmdetection-tmi/ymir_infer.py index a7f22bd..939e5bf 100644 --- a/det-mmdetection-tmi/ymir_infer.py +++ b/det-mmdetection-tmi/ymir_infer.py @@ -9,7 +9,7 @@ from easydict import EasyDict as edict from mmcv import DictAction from mmdet.apis import inference_detector, init_detector -from mmdet.utils.util_ymir import get_weight_file +from mmdet.utils.util_ymir import get_best_weight_file from tqdm import tqdm from ymir_exc import dataset_reader as dr from ymir_exc import env, monitor @@ -87,7 +87,7 @@ def __init__(self, cfg: edict): # Specify the path to model config and checkpoint file config_file = get_config_file(cfg) - checkpoint_file = get_weight_file(cfg) + checkpoint_file = get_best_weight_file(cfg) options = cfg.param.get('cfg_options', None) cfg_options = parse_option(options) if options else None diff --git a/det-mmdetection-tmi/ymir_train.py b/det-mmdetection-tmi/ymir_train.py index a84a805..06ed4dd 100644 --- a/det-mmdetection-tmi/ymir_train.py +++ b/det-mmdetection-tmi/ymir_train.py @@ -5,9 +5,9 @@ import sys from easydict import EasyDict as edict -from mmdet.utils.util_ymir import get_weight_file, write_ymir_training_result +from mmdet.utils.util_ymir import get_best_weight_file, write_ymir_training_result from ymir_exc import monitor -from ymir_exc.util import YmirStage, get_merged_config, get_ymir_process +from ymir_exc.util import YmirStage, find_free_port, get_merged_config, get_ymir_process def main(cfg: edict) -> int: @@ -32,7 +32,7 @@ def main(cfg: edict) -> int: (cfg_options is None or (cfg_options.find('load_from') == -1 and cfg_options.find('resume_from') == -1)): - weight_file = get_weight_file(cfg) + weight_file = get_best_weight_file(cfg) if weight_file: if cfg_options: cfg_options += f' load_from={weight_file}' @@ -55,7 +55,7 @@ def main(cfg: edict) -> int: f"--work-dir {work_dir} --gpu-id {gpu_id}" else: os.environ.setdefault('CUDA_VISIBLE_DEVICES', gpu_id) - port = cfg.param.get('port') + port = find_free_port() os.environ.setdefault('PORT', str(port)) cmd = f"bash ./tools/dist_train.sh {config_file} {num_gpus} " + \ f"--work-dir {work_dir}" From 30194ebde5b02b6b029ffe1c4871805442cc92da Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 8 Sep 2022 18:53:35 +0800 Subject: [PATCH 122/204] add new mining algorithm for yolov5 --- det-yolov5-tmi/mining-template.yaml | 2 + det-yolov5-tmi/mining/ymir_mining_aldd.py | 164 +++++++++++------- .../{ymir_mining.py => ymir_mining_cald.py} | 6 +- det-yolov5-tmi/start.py | 25 ++- det-yolov5-tmi/training-template.yaml | 1 - det-yolov5-tmi/utils/ymir_yolov5.py | 11 +- 6 files changed, 122 insertions(+), 87 deletions(-) rename det-yolov5-tmi/mining/{ymir_mining.py => ymir_mining_cald.py} (97%) diff --git a/det-yolov5-tmi/mining-template.yaml b/det-yolov5-tmi/mining-template.yaml index 1ae6d29..0979de2 100644 --- a/det-yolov5-tmi/mining-template.yaml +++ b/det-yolov5-tmi/mining-template.yaml @@ -8,6 +8,8 @@ # class_names: [] img_size: 640 +mining_algorithm: aldd +class_distribution_scores: '' conf_thres: 0.25 iou_thres: 0.45 batch_size_per_gpu: 16 diff --git a/det-yolov5-tmi/mining/ymir_mining_aldd.py b/det-yolov5-tmi/mining/ymir_mining_aldd.py index 7891630..c9bc4f2 100644 --- a/det-yolov5-tmi/mining/ymir_mining_aldd.py +++ b/det-yolov5-tmi/mining/ymir_mining_aldd.py @@ -7,15 +7,16 @@ """ import os import sys +import warnings from functools import partial +from typing import Any, List import numpy as np import torch import torch.distributed as dist import torch.utils.data as td from easydict import EasyDict as edict -from mining.util import (YmirDataset, collate_fn_with_fake_ann, load_image_file, load_image_file_with_ann, - update_consistency) +from mining.util import YmirDataset, load_image_file from tqdm import tqdm from utils.ymir_yolov5 import YmirYolov5 from ymir_exc import result_writer as rw @@ -26,69 +27,95 @@ WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) -def calc_unc_val(heatmap): - avg_pool_kernel = 9 - max_pool_kernel = 30 - pad = (avg_pool_kernel - 1) // 2 - - avg_pooling_layer = torch.nn.AvgPool2d(kernel=(avg_pool_kernel, avg_pool_kernel), stride = (1, 1), count_include_pad=False, pad=(pad, pad)) - max_pooling_layer = torch.nn.MaxPool2d(kernel=(max_pool_kernel, max_pool_kernel), stride = (30, 30), pad=(2, 2)) - - # mean of entropy - prob_pixel = heatmap - prob_pixel_m1 = 1 - heatmap - ent = -(prob_pixel * torch.log(prob_pixel + 1e-12) + prob_pixel_m1 * torch.log(prob_pixel_m1 + 1e-12)) # N, C, H, W - ent = torch.sum(ent, axis=1, keepdims=True) # N, 1, H, W - mean_of_entropy = avg_pooling_layer(ent) # N, 1, H, W - - # entropy of mean - prob_local = avg_pooling_layer(heatmap) # N, C, H, W - prob_local_m1 = 1 - prob_local - entropy_of_mean = -(prob_local * torch.log(prob_local + 1e-12) + prob_local_m1 * torch.log(prob_local_m1 + 1e-12)) # N, C, H, W - entropy_of_mean = torch.sum(entropy_of_mean, axis=1, keepdims=True) # N, 1, H, W - - uncertainty = entropy_of_mean - mean_of_entropy - unc = max_pooling_layer(uncertainty) - - # aggregating - scores = torch.mean(unc, axis=(1, 2, 3)) - return scores - - -def compute_aldd_score(net_output, num_of_class, net_input_shape): - """ - args: - imgs: list[np.array(H, W, C)] - returns: - scores: list of float - """ - - CLASS_DISTRIBUTION_SCORE = np.array([1.0] * num_of_class) - total_scores = [] - - for each_class_index in range(num_of_class): - feature_map_concate = [] - for each_output_feature_map in net_output: - net_output_conf = each_output_feature_map[:, :, :, :, 4] - net_output_cls_mult_conf = net_output_conf * each_output_feature_map[:, :, :, :, 5 + each_class_index] - feature_map_reshape = torch.nn.functional.interpolate(net_output_cls_mult_conf, (net_input_shape, net_input_shape), mode='bilinear') - feature_map_concate.append(feature_map_reshape) - - feature_map_concate = torch.cat(feature_map_concate, 1) - scores = calc_unc_val(feature_map_concate) - scores = scores.cpu().detach().numpy() - total_scores.append(scores) - - total_scores = np.array(total_scores) - total_scores = total_scores * CLASS_DISTRIBUTION_SCORE - total_scores = np.sum(total_scores, axis=0) - - return total_scores +class ALDD(object): + def __init__(self, ymir_cfg: edict): + avg_pool_kernel = 9 + max_pool_kernel = 30 + pad = (avg_pool_kernel - 1) // 2 + + self.avg_pooling_layer = torch.nn.AvgPool2d(kernel_size=(avg_pool_kernel, avg_pool_kernel), + stride=(1, 1), + count_include_pad=False, + padding=(pad, pad)) + self.max_pooling_layer = torch.nn.MaxPool2d(kernel_size=(max_pool_kernel, max_pool_kernel), + stride=(30, 30), + padding=(2, 2)) + + self.num_classes = len(ymir_cfg.param.class_names) + if ymir_cfg.param.get('class_distribution_scores', ''): + scores = [float(x.strip()) for x in ymir_cfg.param.class_distribution_scores.split(',')] + if len(scores) < self.num_classes: + warnings.warn('extend 1.0 to class_distribution_scores') + scores.extend([1.0] * (self.num_classes - len(scores))) + self.class_distribution_scores = np.array(scores, dtype=np.float32) + else: + self.class_distribution_scores = np.array([1.0] * self.num_classes, dtype=np.float32) + + def calc_unc_val(self, heatmap: torch.Tensor) -> torch.Tensor: + # mean of entropy + prob_pixel = heatmap + prob_pixel_m1 = 1 - heatmap + ent = -(prob_pixel * torch.log(prob_pixel + 1e-12) + prob_pixel_m1 * torch.log(prob_pixel_m1 + 1e-12) + ) # N, C, H, W + ent = torch.sum(ent, dim=1, keepdim=True) # N, 1, H, W + mean_of_entropy = self.avg_pooling_layer(ent) # N, 1, H, W + + # entropy of mean + prob_local = self.avg_pooling_layer(heatmap) # N, C, H, W + prob_local_m1 = 1 - prob_local + entropy_of_mean = -( + prob_local * torch.log(prob_local + 1e-12) + prob_local_m1 * torch.log(prob_local_m1 + 1e-12)) # N, C, H, W + entropy_of_mean = torch.sum(entropy_of_mean, dim=1, keepdim=True) # N, 1, H, W + + uncertainty = entropy_of_mean - mean_of_entropy + unc = self.max_pooling_layer(uncertainty) + + # aggregating + scores = torch.mean(unc, dim=(1, 2, 3)) + return scores + + def compute_aldd_score(self, net_output: torch.Tensor, net_input_shape: Any): + """ + args: + imgs: list[np.array(H, W, C)] + returns: + scores: list of float + """ + if not isinstance(net_input_shape, (list, tuple)): + net_input_shape = (net_input_shape, net_input_shape) + + # CLASS_DISTRIBUTION_SCORE = np.array([1.0] * num_of_class) + scores_list = [] + + for each_class_index in range(self.num_classes): + feature_map_list: List[torch.Tensor] = [] + + for each_output_feature_map in net_output: + each_output_feature_map.sigmoid_() + net_output_conf = each_output_feature_map[:, :, :, :, 4] + net_output_cls_mult_conf = net_output_conf * each_output_feature_map[:, :, :, :, 5 + each_class_index] + feature_map_reshape = torch.nn.functional.interpolate(net_output_cls_mult_conf, + net_input_shape, + mode='bilinear', + align_corners=False) + feature_map_list.append(feature_map_reshape) + + feature_map_concate = torch.cat(feature_map_list, 1) + scores = self.calc_unc_val(feature_map_concate) + scores = scores.cpu().detach().numpy() + scores_list.append(scores) + + total_scores = np.array(scores_list) + total_scores = total_scores * self.class_distribution_scores + total_scores = np.sum(total_scores, axis=0) + + return total_scores def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): # eg: gpu_id = 1,3,5,7 for LOCAL_RANK = 2, will use gpu 5. - gpu = int(ymir_yolov5.gpu_id.split(',')[LOCAL_RANK]) + # gpu = int(ymir_yolov5.gpu_id.split(',')[LOCAL_RANK]) + gpu = LOCAL_RANK if LOCAL_RANK >= 0 else 0 device = torch.device('cuda', gpu) ymir_yolov5.to(device) @@ -105,7 +132,10 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): images = [line.strip() for line in f.readlines()] # origin dataset - images_rank = images[RANK::WORLD_SIZE] + if RANK != -1: + images_rank = images[RANK::WORLD_SIZE] + else: + images_rank = images origin_dataset = YmirDataset(images_rank, load_fn=load_fn) origin_dataset_loader = td.DataLoader(origin_dataset, batch_size=batch_size_per_gpu, @@ -118,10 +148,11 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): mining_results = dict() dataset_size = len(images_rank) pbar = tqdm(origin_dataset_loader) if RANK == 0 else origin_dataset_loader + miner = ALDD(ymir_cfg) for idx, batch in enumerate(pbar): with torch.no_grad(): featuremap_output = ymir_yolov5.model.model(batch['image'].float().to(device))[1] - unc_scores = compute_aldd_score(featuremap_output, len(ymir_cfg.param.class_names), ymir_yolov5.img_size) + unc_scores = miner.compute_aldd_score(featuremap_output, ymir_yolov5.img_size) for each_imgname, each_score in zip(batch["image_file"], unc_scores): mining_results[each_imgname] = each_score @@ -134,18 +165,21 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): def main() -> int: ymir_cfg = get_merged_config() + # note select_device(gpu_id) will set os.environ['CUDA_VISIBLE_DEVICES'] to gpu_id ymir_yolov5 = YmirYolov5(ymir_cfg, task='mining') if LOCAL_RANK != -1: assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' - gpu = int(ymir_yolov5.gpu_id.split(',')[LOCAL_RANK]) + # gpu = int(ymir_yolov5.gpu_id.split(',')[LOCAL_RANK]) + gpu = LOCAL_RANK if LOCAL_RANK >= 0 else 0 torch.cuda.set_device(gpu) dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") run(ymir_cfg, ymir_yolov5) # wait all process to save the mining result - dist.barrier() + if WORLD_SIZE > 1: + dist.barrier() if RANK in [0, -1]: results = [] diff --git a/det-yolov5-tmi/mining/ymir_mining.py b/det-yolov5-tmi/mining/ymir_mining_cald.py similarity index 97% rename from det-yolov5-tmi/mining/ymir_mining.py rename to det-yolov5-tmi/mining/ymir_mining_cald.py index 7ac11bd..d84e2f7 100644 --- a/det-yolov5-tmi/mining/ymir_mining.py +++ b/det-yolov5-tmi/mining/ymir_mining_cald.py @@ -29,7 +29,8 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): # eg: gpu_id = 1,3,5,7 for LOCAL_RANK = 2, will use gpu 5. - gpu = int(ymir_yolov5.gpu_id.split(',')[LOCAL_RANK]) + # gpu = int(ymir_yolov5.gpu_id.split(',')[LOCAL_RANK]) + gpu = LOCAL_RANK if LOCAL_RANK >= 0 else 0 device = torch.device('cuda', gpu) ymir_yolov5.to(device) @@ -158,7 +159,8 @@ def main() -> int: if LOCAL_RANK != -1: assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' - gpu = int(ymir_yolov5.gpu_id.split(',')[LOCAL_RANK]) + # gpu = int(ymir_yolov5.gpu_id.split(',')[LOCAL_RANK]) + gpu = LOCAL_RANK if LOCAL_RANK >= 0 else 0 torch.cuda.set_device(gpu) dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py index 9e2dfa1..0cc29df 100644 --- a/det-yolov5-tmi/start.py +++ b/det-yolov5-tmi/start.py @@ -5,15 +5,14 @@ import cv2 from easydict import EasyDict as edict +from models.experimental import attempt_download +from utils.ymir_yolov5 import YmirYolov5, convert_ymir_to_yolov5, get_weight_file from ymir_exc import dataset_reader as dr from ymir_exc import env, monitor from ymir_exc import result_writer as rw from ymir_exc.util import (YmirStage, find_free_port, get_bool, get_merged_config, get_ymir_process, write_ymir_training_result) -from models.experimental import attempt_download -from utils.ymir_yolov5 import YmirYolov5, convert_ymir_to_yolov5, get_weight_file - def start() -> int: cfg = get_merged_config() @@ -119,18 +118,21 @@ def _run_mining(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: convert_ymir_to_yolov5(cfg) logging.info(f'generate {out_dir}/data.yaml') monitor.write_monitor_logger( - percent=get_ymir_process(stage=YmirStage.PREPROCESS, - p=1.0, - task_idx=task_idx, - task_num=task_num)) + percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) gpu_id: str = str(cfg.param.get('gpu_id', '0')) gpu_count: int = len(gpu_id.split(',')) if gpu_id else 0 - if gpu_count <= 1: + mining_algorithm = cfg.param.get('mining_algorithm', 'aldd') + support_mining_algorithms = ['aldd', 'cald'] + if mining_algorithm not in support_mining_algorithms: + raise Exception(f'unknown mining algorithm {mining_algorithm}, not in {support_mining_algorithms}') + + if gpu_count <= 1 and mining_algorithm in ['cald']: command = 'python3 mining/mining_cald.py' else: port = find_free_port() - command = f'python3 -m torch.distributed.launch --nproc_per_node {gpu_count} --master_port {port} mining/ymir_mining.py' # noqa + command = f'python3 -m torch.distributed.launch --nproc_per_node {gpu_count} --master_port {port} mining/ymir_mining_{mining_algorithm}.py' # noqa + logging.info(f'mining: {command}') subprocess.run(command.split(), check=True) monitor.write_monitor_logger( @@ -143,10 +145,7 @@ def _run_infer(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: convert_ymir_to_yolov5(cfg) logging.info(f'generate {out_dir}/data.yaml') monitor.write_monitor_logger( - percent=get_ymir_process(stage=YmirStage.PREPROCESS, - p=1.0, - task_idx=task_idx, - task_num=task_num)) + percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) gpu_id: str = str(cfg.param.get('gpu_id', '0')) gpu_count: int = len(gpu_id.split(',')) if gpu_id else 0 diff --git a/det-yolov5-tmi/training-template.yaml b/det-yolov5-tmi/training-template.yaml index ac9a91f..f3f7a20 100644 --- a/det-yolov5-tmi/training-template.yaml +++ b/det-yolov5-tmi/training-template.yaml @@ -17,4 +17,3 @@ opset: 11 args_options: '--exist-ok' save_period: 10 sync_bn: False # work for multi-gpu only -port: 29500 # work for multi-gpu only diff --git a/det-yolov5-tmi/utils/ymir_yolov5.py b/det-yolov5-tmi/utils/ymir_yolov5.py index 4093100..22ec372 100644 --- a/det-yolov5-tmi/utils/ymir_yolov5.py +++ b/det-yolov5-tmi/utils/ymir_yolov5.py @@ -11,16 +11,15 @@ import torch import yaml from easydict import EasyDict as edict +from models.common import DetectMultiBackend from nptyping import NDArray, Shape, UInt8 from packaging.version import Version -from ymir_exc import monitor -from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_bool, get_weight_files, get_ymir_process - -from models.common import DetectMultiBackend from utils.augmentations import letterbox from utils.general import check_img_size, non_max_suppression, scale_coords from utils.torch_utils import select_device +from ymir_exc import monitor +from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, get_bool, get_weight_files, get_ymir_process BBOX = NDArray[Shape['*,4'], Any] CV_IMAGE = NDArray[Shape['*,*,3'], UInt8] @@ -65,7 +64,7 @@ def __init__(self, cfg: edict, task='infer'): self.task_num = 1 self.gpu_id: str = str(cfg.param.get('gpu_id', '0')) - device = select_device(self.gpu_id) + device = select_device(self.gpu_id) # will set CUDA_VISIBLE_DEVICES=self.gpu_id self.gpu_count: int = len(self.gpu_id.split(',')) if self.gpu_id else 0 self.batch_size_per_gpu: int = int(cfg.param.get('batch_size_per_gpu', 4)) self.num_workers_per_gpu: int = int(cfg.param.get('num_workers_per_gpu', 4)) From 953fc9a91992a3ec48d841da041e8ae4be3aad59 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 8 Sep 2022 19:00:30 +0800 Subject: [PATCH 123/204] add new mining algorith aldd for yolov5 --- README.MD | 1 + README_zh-CN.MD | 1 + det-yolov5-tmi/README.md | 6 +++++- 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/README.MD b/README.MD index d4ef769..1d834f2 100644 --- a/README.MD +++ b/README.MD @@ -156,6 +156,7 @@ docker build -t ymir-executor/mmdet:cu111-tmi -f docker/Dockerfile.cuda111 . ## reference - [mining algorithm: CALD](https://github.com/we1pingyu/CALD/) +- [mining algorithm: ALDD](https://gitlab.com/haghdam/deep_active_learning) - [yolov4](https://github.com/AlexeyAB/darknet) - [yolov5](https://github.com/ultralytics/yolov5) - [mmdetection](https://github.com/open-mmlab/mmdetection) diff --git a/README_zh-CN.MD b/README_zh-CN.MD index 6cbcd8c..f22015a 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -174,6 +174,7 @@ docker build -t ymir-executor/live-code:mxnet-tmi -f mxnet.dockerfile ## 参考 - [挖掘算法CALD](https://github.com/we1pingyu/CALD/) +- [挖掘算法ALDD](https://gitlab.com/haghdam/deep_active_learning) - [yolov4](https://github.com/AlexeyAB/darknet) - [yolov5](https://github.com/ultralytics/yolov5) - [mmdetection](https://github.com/open-mmlab/mmdetection) diff --git a/det-yolov5-tmi/README.md b/det-yolov5-tmi/README.md index 520d78c..102c198 100644 --- a/det-yolov5-tmi/README.md +++ b/det-yolov5-tmi/README.md @@ -7,7 +7,7 @@ docker build -t ymir/ymir-executor:ymir1.1.0-cuda102-yolov5-tmi --build-arg SERV docker build -t ymir/ymir-executor:ymir1.1.0-cuda111-yolov5-tmi --build-arg SERVER_MODE=dev --build-arg YMIR=1.1.0 -f cuda111.dockerfile . ``` -## change log +## main change log - add `start.py` and `utils/ymir_yolov5.py` for train/infer/mining @@ -34,3 +34,7 @@ docker build -t ymir/ymir-executor:ymir1.1.0-cuda111-yolov5-tmi --build-arg SERV - modify `requirements.txt` - other modify support onnx export, not important. + +## new features + +- 2022/09/08: add aldd active learning algorithm for mining task. [Active Learning for Deep Detection Neural Networks (ICCV 2019)](https://gitlab.com/haghdam/deep_active_learning) From 2e03292d99dadd992da760dcd4ed48bd4a79e112 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 8 Sep 2022 19:03:50 +0800 Subject: [PATCH 124/204] support long scores --- det-yolov5-tmi/mining-template.yaml | 2 +- det-yolov5-tmi/mining/ymir_mining_aldd.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/det-yolov5-tmi/mining-template.yaml b/det-yolov5-tmi/mining-template.yaml index 0979de2..9011fe6 100644 --- a/det-yolov5-tmi/mining-template.yaml +++ b/det-yolov5-tmi/mining-template.yaml @@ -9,7 +9,7 @@ img_size: 640 mining_algorithm: aldd -class_distribution_scores: '' +class_distribution_scores: '' # 1.0,1.0,0.1,0.2 conf_thres: 0.25 iou_thres: 0.45 batch_size_per_gpu: 16 diff --git a/det-yolov5-tmi/mining/ymir_mining_aldd.py b/det-yolov5-tmi/mining/ymir_mining_aldd.py index c9bc4f2..52a85b8 100644 --- a/det-yolov5-tmi/mining/ymir_mining_aldd.py +++ b/det-yolov5-tmi/mining/ymir_mining_aldd.py @@ -47,7 +47,7 @@ def __init__(self, ymir_cfg: edict): if len(scores) < self.num_classes: warnings.warn('extend 1.0 to class_distribution_scores') scores.extend([1.0] * (self.num_classes - len(scores))) - self.class_distribution_scores = np.array(scores, dtype=np.float32) + self.class_distribution_scores = np.array(scores[0:self.num_classes], dtype=np.float32) else: self.class_distribution_scores = np.array([1.0] * self.num_classes, dtype=np.float32) From 9abe0c99d4513f736e5c53f82269481470527c6e Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 14 Sep 2022 10:42:59 +0800 Subject: [PATCH 125/204] support change num_workers_per_gpu --- det-mmdetection-tmi/training-template.yaml | 2 +- det-yolov5-tmi/start.py | 4 +++- det-yolov5-tmi/training-template.yaml | 1 + 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/det-mmdetection-tmi/training-template.yaml b/det-mmdetection-tmi/training-template.yaml index 7744172..7a265ac 100644 --- a/det-mmdetection-tmi/training-template.yaml +++ b/det-mmdetection-tmi/training-template.yaml @@ -1,7 +1,7 @@ shm_size: '32G' export_format: 'ark:raw' samples_per_gpu: 16 -workers_per_gpu: 16 +workers_per_gpu: 8 max_epochs: 300 config_file: 'configs/yolox/yolox_tiny_8x8_300e_coco.py' args_options: '' diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py index 0cc29df..bd4b537 100644 --- a/det-yolov5-tmi/start.py +++ b/det-yolov5-tmi/start.py @@ -56,6 +56,7 @@ def _run_training(cfg: edict) -> None: # 2. training model epochs: int = int(cfg.param.epochs) batch_size_per_gpu: int = int(cfg.param.batch_size_per_gpu) + num_workers_per_gpu: int = int(cfg.param.get('num_workers_per_gpu', 8)) model: str = cfg.param.model img_size: int = int(cfg.param.img_size) save_period: int = max(1, min(epochs // 10, int(cfg.param.save_period))) @@ -87,7 +88,8 @@ def _run_training(cfg: edict) -> None: str(batch_size), '--data', f'{out_dir}/data.yaml', '--project', project, '--cfg', f'models/{model}.yaml', '--name', name, '--weights', weights, '--img-size', str(img_size), '--save-period', - str(save_period), '--device', device + str(save_period), '--device', device, + '--workers', str(num_workers_per_gpu) ]) if gpu_count > 1 and sync_bn: diff --git a/det-yolov5-tmi/training-template.yaml b/det-yolov5-tmi/training-template.yaml index f3f7a20..4bd27b5 100644 --- a/det-yolov5-tmi/training-template.yaml +++ b/det-yolov5-tmi/training-template.yaml @@ -11,6 +11,7 @@ shm_size: '32G' export_format: 'ark:raw' model: 'yolov5s' batch_size_per_gpu: 16 +num_workers_per_gpu: 8 epochs: 300 img_size: 640 opset: 11 From a3079e5cf697828094b66cdd3cadf0971e571597 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 15 Sep 2022 17:28:13 +0800 Subject: [PATCH 126/204] assign before used --- det-yolov5-tmi/README.md | 1 + det-yolov5-tmi/mining/ymir_infer.py | 2 +- det-yolov5-tmi/mining/ymir_mining_aldd.py | 14 +++++++++++--- 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/det-yolov5-tmi/README.md b/det-yolov5-tmi/README.md index 102c198..bc1d11a 100644 --- a/det-yolov5-tmi/README.md +++ b/det-yolov5-tmi/README.md @@ -38,3 +38,4 @@ docker build -t ymir/ymir-executor:ymir1.1.0-cuda111-yolov5-tmi --build-arg SERV ## new features - 2022/09/08: add aldd active learning algorithm for mining task. [Active Learning for Deep Detection Neural Networks (ICCV 2019)](https://gitlab.com/haghdam/deep_active_learning) +- 2022/09/14: support change hyper-parameter `num_workers_per_gpu` diff --git a/det-yolov5-tmi/mining/ymir_infer.py b/det-yolov5-tmi/mining/ymir_infer.py index f10f210..258af64 100644 --- a/det-yolov5-tmi/mining/ymir_infer.py +++ b/det-yolov5-tmi/mining/ymir_infer.py @@ -67,9 +67,9 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): preprocess_image_shape = batch['image'].shape[2:] for idx, det in enumerate(pred): # per image result_per_image = [] + image_file = batch['image_file'][idx] if len(det): origin_image_shape = (batch['origin_shape'][0][idx], batch['origin_shape'][1][idx]) - image_file = batch['image_file'][idx] # Rescale boxes from img_size to img size det[:, :4] = scale_coords(preprocess_image_shape, det[:, :4], origin_image_shape).round() result_per_image.append(det) diff --git a/det-yolov5-tmi/mining/ymir_mining_aldd.py b/det-yolov5-tmi/mining/ymir_mining_aldd.py index 52a85b8..5a2dd72 100644 --- a/det-yolov5-tmi/mining/ymir_mining_aldd.py +++ b/det-yolov5-tmi/mining/ymir_mining_aldd.py @@ -74,7 +74,7 @@ def calc_unc_val(self, heatmap: torch.Tensor) -> torch.Tensor: scores = torch.mean(unc, dim=(1, 2, 3)) return scores - def compute_aldd_score(self, net_output: torch.Tensor, net_input_shape: Any): + def compute_aldd_score(self, net_output: List[torch.Tensor], net_input_shape: Any): """ args: imgs: list[np.array(H, W, C)] @@ -87,27 +87,35 @@ def compute_aldd_score(self, net_output: torch.Tensor, net_input_shape: Any): # CLASS_DISTRIBUTION_SCORE = np.array([1.0] * num_of_class) scores_list = [] + for feature_map in net_output: + feature_map.sigmoid_() + for each_class_index in range(self.num_classes): feature_map_list: List[torch.Tensor] = [] + # each_output_feature_map: [bs, 3, h, w, 5 + num_classes] for each_output_feature_map in net_output: - each_output_feature_map.sigmoid_() net_output_conf = each_output_feature_map[:, :, :, :, 4] net_output_cls_mult_conf = net_output_conf * each_output_feature_map[:, :, :, :, 5 + each_class_index] + # feature_map_reshape: [bs, 3, h, w] feature_map_reshape = torch.nn.functional.interpolate(net_output_cls_mult_conf, net_input_shape, mode='bilinear', align_corners=False) feature_map_list.append(feature_map_reshape) + # len(net_output) = 3 + # feature_map_concate: [bs, 9, h, w] feature_map_concate = torch.cat(feature_map_list, 1) + # scores: [bs, 1] scores = self.calc_unc_val(feature_map_concate) scores = scores.cpu().detach().numpy() scores_list.append(scores) + # total_scores: [bs, num_classes] total_scores = np.array(scores_list) total_scores = total_scores * self.class_distribution_scores - total_scores = np.sum(total_scores, axis=0) + total_scores = np.sum(total_scores, axis=1) return total_scores From f11acb40a7a70e037205ea396bb31689531df810 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 16 Sep 2022 10:05:38 +0800 Subject: [PATCH 127/204] update aldd mining algorithm --- det-yolov5-tmi/.dockerignore | 1 + det-yolov5-tmi/mining/ymir_mining_aldd.py | 60 +++++++++++------------ det-yolov5-tmi/utils/ymir_yolov5.py | 6 +++ 3 files changed, 37 insertions(+), 30 deletions(-) diff --git a/det-yolov5-tmi/.dockerignore b/det-yolov5-tmi/.dockerignore index bee6b98..9f34de6 100644 --- a/det-yolov5-tmi/.dockerignore +++ b/det-yolov5-tmi/.dockerignore @@ -14,6 +14,7 @@ data/samples/* # Neural Network weights ----------------------------------------------------------------------------------------------- #**/*.pt **/*.pth +**/*.pkl **/*.onnx **/*.engine **/*.mlmodel diff --git a/det-yolov5-tmi/mining/ymir_mining_aldd.py b/det-yolov5-tmi/mining/ymir_mining_aldd.py index 5a2dd72..2ae2845 100644 --- a/det-yolov5-tmi/mining/ymir_mining_aldd.py +++ b/det-yolov5-tmi/mining/ymir_mining_aldd.py @@ -14,6 +14,7 @@ import numpy as np import torch import torch.distributed as dist +import torch.nn.functional as F import torch.utils.data as td from easydict import EasyDict as edict from mining.util import YmirDataset, load_image_file @@ -29,17 +30,9 @@ class ALDD(object): def __init__(self, ymir_cfg: edict): - avg_pool_kernel = 9 - max_pool_kernel = 30 - pad = (avg_pool_kernel - 1) // 2 - - self.avg_pooling_layer = torch.nn.AvgPool2d(kernel_size=(avg_pool_kernel, avg_pool_kernel), - stride=(1, 1), - count_include_pad=False, - padding=(pad, pad)) - self.max_pooling_layer = torch.nn.MaxPool2d(kernel_size=(max_pool_kernel, max_pool_kernel), - stride=(30, 30), - padding=(2, 2)) + self.avg_pool_size = 9 + self.max_pool_size = 32 + self.avg_pool_pad = (self.avg_pool_size - 1) // 2 self.num_classes = len(ymir_cfg.param.class_names) if ymir_cfg.param.get('class_distribution_scores', ''): @@ -53,25 +46,32 @@ def __init__(self, ymir_cfg: edict): def calc_unc_val(self, heatmap: torch.Tensor) -> torch.Tensor: # mean of entropy - prob_pixel = heatmap - prob_pixel_m1 = 1 - heatmap - ent = -(prob_pixel * torch.log(prob_pixel + 1e-12) + prob_pixel_m1 * torch.log(prob_pixel_m1 + 1e-12) - ) # N, C, H, W - ent = torch.sum(ent, dim=1, keepdim=True) # N, 1, H, W - mean_of_entropy = self.avg_pooling_layer(ent) # N, 1, H, W + ent = F.binary_cross_entropy(heatmap, heatmap, reduction='none') + avg_ent = F.avg_pool2d(ent, + kernel_size=self.avg_pool_size, + stride=1, + padding=self.avg_pool_pad, + count_include_pad=False) # N, 1, H, W + mean_of_entropy = torch.sum(avg_ent, dim=1, keepdim=True) # N, 1, H, W # entropy of mean - prob_local = self.avg_pooling_layer(heatmap) # N, C, H, W - prob_local_m1 = 1 - prob_local - entropy_of_mean = -( - prob_local * torch.log(prob_local + 1e-12) + prob_local_m1 * torch.log(prob_local_m1 + 1e-12)) # N, C, H, W - entropy_of_mean = torch.sum(entropy_of_mean, dim=1, keepdim=True) # N, 1, H, W + avg_heatmap = F.avg_pool2d(heatmap, + kernel_size=self.avg_pool_size, + stride=1, + padding=self.avg_pool_pad, + count_include_pad=False) # N, C, H, W + ent_avg = F.binary_cross_entropy(avg_heatmap, avg_heatmap, reduction='none') + entropy_of_mean = torch.sum(ent_avg, dim=1, keepdim=True) # N, 1, H, W uncertainty = entropy_of_mean - mean_of_entropy - unc = self.max_pooling_layer(uncertainty) + unc = F.max_pool2d(uncertainty, + kernel_size=self.max_pool_size, + stride=self.max_pool_size, + padding=0, + ceil_mode=False) # aggregating - scores = torch.mean(unc, dim=(1, 2, 3)) + scores = torch.mean(unc, dim=(1, 2, 3)) # (N,) return scores def compute_aldd_score(self, net_output: List[torch.Tensor], net_input_shape: Any): @@ -98,22 +98,22 @@ def compute_aldd_score(self, net_output: List[torch.Tensor], net_input_shape: An net_output_conf = each_output_feature_map[:, :, :, :, 4] net_output_cls_mult_conf = net_output_conf * each_output_feature_map[:, :, :, :, 5 + each_class_index] # feature_map_reshape: [bs, 3, h, w] - feature_map_reshape = torch.nn.functional.interpolate(net_output_cls_mult_conf, - net_input_shape, - mode='bilinear', - align_corners=False) + feature_map_reshape = F.interpolate(net_output_cls_mult_conf, + net_input_shape, + mode='bilinear', + align_corners=False) feature_map_list.append(feature_map_reshape) # len(net_output) = 3 # feature_map_concate: [bs, 9, h, w] feature_map_concate = torch.cat(feature_map_list, 1) - # scores: [bs, 1] + # scores: [bs, 1] for each class scores = self.calc_unc_val(feature_map_concate) scores = scores.cpu().detach().numpy() scores_list.append(scores) # total_scores: [bs, num_classes] - total_scores = np.array(scores_list) + total_scores = np.stack(scores_list, axis=1) total_scores = total_scores * self.class_distribution_scores total_scores = np.sum(total_scores, axis=1) diff --git a/det-yolov5-tmi/utils/ymir_yolov5.py b/det-yolov5-tmi/utils/ymir_yolov5.py index 22ec372..675110c 100644 --- a/det-yolov5-tmi/utils/ymir_yolov5.py +++ b/det-yolov5-tmi/utils/ymir_yolov5.py @@ -85,6 +85,12 @@ def __init__(self, cfg: edict, task='infer'): self.model.warmup(imgsz=(1, 3, *imgsz), half=False) # warmup self.img_size: List[int] = imgsz + def extract_feats(self, x): + """ + return the feature maps before sigmoid for mining + """ + return self.model.model(x)[1] + def forward(self, x, nms=False): pred = self.model(x) if not nms: From 4affb2908922078544ddc77ffed51b0261160fde Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 16 Sep 2022 15:31:37 +0800 Subject: [PATCH 128/204] fix code-review conversation --- det-mmdetection-tmi/mmdet/utils/util_ymir.py | 49 +++++++++----------- det-mmdetection-tmi/training-template.yaml | 1 + det-yolov5-tmi/mining/ymir_mining_aldd.py | 7 ++- det-yolov5-tmi/mining/ymir_mining_cald.py | 8 ++-- 4 files changed, 31 insertions(+), 34 deletions(-) diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index 12910ea..a809af4 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -26,16 +26,23 @@ def modify_mmcv_config(mmcv_cfg: Config, ymir_cfg: edict) -> None: - modify model output channel - modify epochs, checkpoint, tensorboard config """ - def recursive_modify(mmcv_cfg: Config, attribute_key: str, attribute_value: Any): + def recursive_modify_attribute(mmcv_cfg: Config, attribute_key: str, attribute_value: Any): + """ + recursive modify mmcv_cfg: + 1. mmcv_cfg.attribute_key to attribute_value + 2. mmcv_cfg.xxx.xxx.xxx.attribute_key to attribute_value (recursive) + 3. mmcv_cfg.xxx[i].attribute_key to attribute_value (i=0, 1, 2 ...) + 4. mmcv_cfg.xxx[i].xxx.xxx[j].attribute_key to attribute_value + """ for key in mmcv_cfg: if key == attribute_key: mmcv_cfg[key] = attribute_value elif isinstance(mmcv_cfg[key], Config): - recursive_modify(mmcv_cfg[key], attribute_key, attribute_value) + recursive_modify_attribute(mmcv_cfg[key], attribute_key, attribute_value) elif isinstance(mmcv_cfg[key], Iterable): for cfg in mmcv_cfg[key]: if isinstance(cfg, Config): - recursive_modify(cfg, attribute_key, attribute_value) + recursive_modify_attribute(cfg, attribute_key, attribute_value) # modify dataset config ymir_ann_files = dict(train=ymir_cfg.ymir.input.training_index_file, @@ -49,8 +56,9 @@ def recursive_modify(mmcv_cfg: Config, attribute_key: str, attribute_value: Any) mmcv_cfg.data.samples_per_gpu = samples_per_gpu mmcv_cfg.data.workers_per_gpu = workers_per_gpu + # modify model output channel num_classes = len(ymir_cfg.param.class_names) - recursive_modify(mmcv_cfg.model, 'num_classes', num_classes) + recursive_modify_attribute(mmcv_cfg.model, 'num_classes', num_classes) for split in ['train', 'val', 'test']: ymir_dataset_cfg = dict(type='YmirDataset', @@ -77,24 +85,9 @@ def recursive_modify(mmcv_cfg: Config, attribute_key: str, attribute_value: Any) else: raise Exception(f'unsupported source dataset type {src_dataset_type}') - # # modify model output channel - # if mmcv_cfg.model.get('bbox_head'): # yolox, yolo, yolof, retinanet, ssd - # mmdet_model_cfg = mmcv_cfg.model.bbox_head - # elif mmcv_cfg.model.get('roi_head'): # Faster-RCNN, fast-rcnn - # mmdet_model_cfg = mmcv_cfg.model.roi_head.bbox_head - # elif mmcv_cfg.model.get('mask_head'): # SOLO - # mmdet_model_cfg = mmcv_cfg.model.mask_head - # else: - # raise Exception('unknown model structure') - - # if mmdet_model_cfg.get('num_classes'): - # mmdet_model_cfg.num_classes = len(ymir_cfg.param.class_names) - # else: - # raise Exception('unknown model structure, no attr num_classes found') - # modify epochs, checkpoint, tensorboard config if ymir_cfg.param.get('max_epochs', None): - mmcv_cfg.runner.max_epochs = ymir_cfg.param.max_epochs + mmcv_cfg.runner.max_epochs = int(ymir_cfg.param.max_epochs) mmcv_cfg.checkpoint_config['out_dir'] = ymir_cfg.ymir.output.models_dir tensorboard_logger = dict(type='TensorboardLoggerHook', log_dir=ymir_cfg.ymir.output.tensorboard_dir) if len(mmcv_cfg.log_config['hooks']) <= 1: @@ -102,9 +95,15 @@ def recursive_modify(mmcv_cfg: Config, attribute_key: str, attribute_value: Any) else: mmcv_cfg.log_config['hooks'][1].update(tensorboard_logger) + # TODO save only the best top-k model weight files. # modify evaluation and interval - interval = max(1, mmcv_cfg.runner.max_epochs // 10) - mmcv_cfg.evaluation.interval = interval + val_interval: int = int(ymir_cfg.param.get('val_interval', 1)) + if val_interval > 0: + val_interval = min(val_interval, mmcv_cfg.runner.max_epochs) + else: + val_interval = max(1, mmcv_cfg.runner.max_epochs // 10) + + mmcv_cfg.evaluation.interval = val_interval mmcv_cfg.evaluation.metric = ymir_cfg.param.get('metric', 'bbox') mmcv_cfg.checkpoint_config.interval = mmcv_cfg.evaluation.interval # TODO Whether to evaluating the AP for each class @@ -120,10 +119,8 @@ def recursive_modify(mmcv_cfg: Config, attribute_key: str, attribute_value: Any) cfg_options = ymir_cfg.param.get("cfg_options", '') # auto load offered weight file if not set by user! - if (args_options.find('--resume-from') == -1 and - args_options.find('--load-from') == -1 and - cfg_options.find('load_from') == -1 and - cfg_options.find('resume_from') == -1): # noqa: E129 + if (args_options.find('--resume-from') == -1 and args_options.find('--load-from') == -1 + and cfg_options.find('load_from') == -1 and cfg_options.find('resume_from') == -1): # noqa: E129 weight_file = get_best_weight_file(ymir_cfg) if weight_file: diff --git a/det-mmdetection-tmi/training-template.yaml b/det-mmdetection-tmi/training-template.yaml index 7a265ac..dcb0ce9 100644 --- a/det-mmdetection-tmi/training-template.yaml +++ b/det-mmdetection-tmi/training-template.yaml @@ -7,4 +7,5 @@ config_file: 'configs/yolox/yolox_tiny_8x8_300e_coco.py' args_options: '' cfg_options: '' metric: 'bbox' +val_interval: 0 # <0 means use auto interval = max(1, max_epochs//10) port: 12345 diff --git a/det-yolov5-tmi/mining/ymir_mining_aldd.py b/det-yolov5-tmi/mining/ymir_mining_aldd.py index 2ae2845..928c6e1 100644 --- a/det-yolov5-tmi/mining/ymir_mining_aldd.py +++ b/det-yolov5-tmi/mining/ymir_mining_aldd.py @@ -122,7 +122,6 @@ def compute_aldd_score(self, net_output: List[torch.Tensor], net_input_shape: An def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): # eg: gpu_id = 1,3,5,7 for LOCAL_RANK = 2, will use gpu 5. - # gpu = int(ymir_yolov5.gpu_id.split(',')[LOCAL_RANK]) gpu = LOCAL_RANK if LOCAL_RANK >= 0 else 0 device = torch.device('cuda', gpu) ymir_yolov5.to(device) @@ -178,7 +177,6 @@ def main() -> int: if LOCAL_RANK != -1: assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' - # gpu = int(ymir_yolov5.gpu_id.split(',')[LOCAL_RANK]) gpu = LOCAL_RANK if LOCAL_RANK >= 0 else 0 torch.cuda.set_device(gpu) dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") @@ -186,7 +184,7 @@ def main() -> int: run(ymir_cfg, ymir_yolov5) # wait all process to save the mining result - if WORLD_SIZE > 1: + if LOCAL_RANK != -1: dist.barrier() if RANK in [0, -1]: @@ -200,7 +198,8 @@ def main() -> int: ymir_mining_result.append((img_file, score)) rw.write_mining_result(mining_result=ymir_mining_result) - print(f'rank: {RANK}, start destroy process group') + if LOCAL_RANK != -1: + print(f'rank: {RANK}, start destroy process group') dist.destroy_process_group() return 0 diff --git a/det-yolov5-tmi/mining/ymir_mining_cald.py b/det-yolov5-tmi/mining/ymir_mining_cald.py index d84e2f7..06f2542 100644 --- a/det-yolov5-tmi/mining/ymir_mining_cald.py +++ b/det-yolov5-tmi/mining/ymir_mining_cald.py @@ -29,7 +29,6 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): # eg: gpu_id = 1,3,5,7 for LOCAL_RANK = 2, will use gpu 5. - # gpu = int(ymir_yolov5.gpu_id.split(',')[LOCAL_RANK]) gpu = LOCAL_RANK if LOCAL_RANK >= 0 else 0 device = torch.device('cuda', gpu) ymir_yolov5.to(device) @@ -159,7 +158,6 @@ def main() -> int: if LOCAL_RANK != -1: assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' - # gpu = int(ymir_yolov5.gpu_id.split(',')[LOCAL_RANK]) gpu = LOCAL_RANK if LOCAL_RANK >= 0 else 0 torch.cuda.set_device(gpu) dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") @@ -167,7 +165,8 @@ def main() -> int: run(ymir_cfg, ymir_yolov5) # wait all process to save the mining result - dist.barrier() + if LOCAL_RANK != -1: + dist.barrier() if RANK in [0, -1]: results = [] @@ -180,7 +179,8 @@ def main() -> int: ymir_mining_result.append((img_file, score)) rw.write_mining_result(mining_result=ymir_mining_result) - print(f'rank: {RANK}, start destroy process group') + if LOCAL_RANK != -1: + print(f'rank: {RANK}, start destroy process group') dist.destroy_process_group() return 0 From 680147bb2f0a77c75ffe424b364e2143c40d2518 Mon Sep 17 00:00:00 2001 From: LuciferZap <92283801+LuciferZap@users.noreply.github.com> Date: Fri, 16 Sep 2022 15:34:02 +0800 Subject: [PATCH 129/204] use imagesize to get img w and h instead of read img --- det-yolov4-tmi/convert_label_ark2txt.py | 7 ++----- det-yolov4-tmi/cuda101.dockerfile | 2 +- det-yolov4-tmi/cuda112.dockerfile | 2 +- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/det-yolov4-tmi/convert_label_ark2txt.py b/det-yolov4-tmi/convert_label_ark2txt.py index ae54b63..2e963f7 100755 --- a/det-yolov4-tmi/convert_label_ark2txt.py +++ b/det-yolov4-tmi/convert_label_ark2txt.py @@ -1,6 +1,6 @@ import os +import imagesize -import cv2 def _annotation_path_for_image(image_path: str, annotations_dir: str) -> str: @@ -30,10 +30,7 @@ def _convert_annotations(index_file_path: str, dst_annotations_dir: str) -> None # each_txtfile: annotation path each_imgpath, each_txtfile = each_img_anno_path.split() - img = cv2.imread(each_imgpath) - if img is None: - raise ValueError(f"can not read image: {each_imgpath}") - img_h, img_w, _ = img.shape + img_w, img_h = imagesize.get(each_imgpath) with open(each_txtfile, 'r') as f: txt_content = f.readlines() diff --git a/det-yolov4-tmi/cuda101.dockerfile b/det-yolov4-tmi/cuda101.dockerfile index 53aa01b..66273c3 100644 --- a/det-yolov4-tmi/cuda101.dockerfile +++ b/det-yolov4-tmi/cuda101.dockerfile @@ -15,7 +15,7 @@ RUN wget https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v3_o RUN rm /usr/bin/python3 RUN ln -s /usr/bin/python3.7 /usr/bin/python3 RUN python3 get-pip.py -RUN pip3 install -i ${PIP_SOURCE} mxnet-cu101==1.5.1 numpy opencv-python pyyaml watchdog tensorboardX six scipy tqdm +RUN pip3 install -i ${PIP_SOURCE} mxnet-cu101==1.5.1 numpy opencv-python pyyaml watchdog tensorboardX six scipy tqdm imagesize ENV DEBIAN_FRONTEND noninteractive RUN apt-get update && apt-get install -y libopencv-dev diff --git a/det-yolov4-tmi/cuda112.dockerfile b/det-yolov4-tmi/cuda112.dockerfile index aac49de..bab5c7d 100644 --- a/det-yolov4-tmi/cuda112.dockerfile +++ b/det-yolov4-tmi/cuda112.dockerfile @@ -15,7 +15,7 @@ RUN wget https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v3_o RUN rm /usr/bin/python3 RUN ln -s /usr/bin/python3.7 /usr/bin/python3 RUN python3 get-pip.py -RUN pip3 install -i ${PIP_SOURCE} mxnet-cu112==1.9.1 numpy opencv-python pyyaml watchdog tensorboardX six scipy tqdm +RUN pip3 install -i ${PIP_SOURCE} mxnet-cu112==1.9.1 numpy opencv-python pyyaml watchdog tensorboardX six scipy tqdm imagesize ENV DEBIAN_FRONTEND noninteractive RUN apt-get update && apt-get install -y libopencv-dev From 512194c471439544326c357484165136b3ce32fa Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 16 Sep 2022 17:14:45 +0800 Subject: [PATCH 130/204] save topk checkpoint weight files --- det-mmdetection-tmi/mmdet/utils/util_ymir.py | 48 ++++++++++++++++++-- det-mmdetection-tmi/training-template.yaml | 3 +- det-yolov5-tmi/README.md | 1 + det-yolov5-tmi/models/common.py | 23 +++++++++- det-yolov5-tmi/models/experimental.py | 14 +++++- det-yolov5-tmi/start.py | 12 +++-- 6 files changed, 88 insertions(+), 13 deletions(-) diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index a809af4..515c22a 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -14,6 +14,7 @@ from nptyping import NDArray, Shape, UInt8 from packaging.version import Version from ymir_exc import result_writer as rw +from ymir_exc.util import get_merged_config BBOX = NDArray[Shape['*,4'], Any] CV_IMAGE = NDArray[Shape['*,*,3'], UInt8] @@ -101,11 +102,17 @@ def recursive_modify_attribute(mmcv_cfg: Config, attribute_key: str, attribute_v if val_interval > 0: val_interval = min(val_interval, mmcv_cfg.runner.max_epochs) else: - val_interval = max(1, mmcv_cfg.runner.max_epochs // 10) + val_interval = 1 mmcv_cfg.evaluation.interval = val_interval mmcv_cfg.evaluation.metric = ymir_cfg.param.get('metric', 'bbox') + + # save best top-k model weights files + # max_keep_ckpts <= 0 # save all checkpoints + max_keep_ckpts: int = int(ymir_cfg.param.get('max_keep_checkpoints', 1)) mmcv_cfg.checkpoint_config.interval = mmcv_cfg.evaluation.interval + mmcv_cfg.checkpoint_config.max_keep_ckpts = max_keep_ckpts + # TODO Whether to evaluating the AP for each class # mmdet_cfg.evaluation.classwise = True @@ -189,6 +196,30 @@ def write_ymir_training_result(last: bool = False, key_score: Optional[float] = _write_ancient_ymir_training_result(key_score) +def get_topk_checkpoints(files: List[str], k: int) -> List[str]: + """ + keep topk checkpoint files, remove other files. + """ + checkpoints_files = [f for f in files if f.endswith(('.pth', '.pt'))] + + best_pth_files = [f for f in checkpoints_files if osp.basename(f).startswith('best_')] + if len(best_pth_files) > 0: + # newest first + topk_best_pth_files = sorted(best_pth_files, key=os.path.getctime, reverse=True) + else: + topk_best_pth_files = [] + + epoch_pth_files = [f for f in checkpoints_files if osp.basename(f).startswith(('epoch_', 'iter_'))] + if len(epoch_pth_files) > 0: + topk_epoch_pth_files = sorted(epoch_pth_files, key=os.path.getctime, reverse=True) + else: + topk_epoch_pth_files = [] + + # python will check the length of list + return topk_best_pth_files[0:k] + topk_epoch_pth_files[0:k] + + +# TODO save topk checkpoints, fix invalid stage due to delete checkpoint def _write_latest_ymir_training_result(last: bool = False, key_score: Optional[float] = None): if key_score: logging.info(f'key_score is {key_score}') @@ -209,6 +240,11 @@ def _write_latest_ymir_training_result(last: bool = False, key_score: Optional[f if last: # save all output file + ymir_cfg = get_merged_config() + max_keep_checkpoints = int(ymir_cfg.param.get('max_keep_checkpoints', 1)) + if max_keep_checkpoints > 0: + topk_checkpoints = get_topk_checkpoints(result_files, max_keep_checkpoints) + result_files = [f for f in result_files if not f.endswith(('.pth', '.pt'))] + topk_checkpoints rw.write_model_stage(files=result_files, mAP=float(map), stage_name='last') else: # save newest weight file in format epoch_xxx.pth or iter_xxx.pth @@ -245,13 +281,17 @@ def _write_ancient_ymir_training_result(key_score: Optional[float] = None): # eval_result may be empty dict {}. map = eval_result.get('bbox_mAP_50', 0) - WORK_DIR = os.getenv('YMIR_MODELS_DIR') - if WORK_DIR is None or not osp.isdir(WORK_DIR): - raise Exception(f'please set valid environment variable YMIR_MODELS_DIR, invalid directory {WORK_DIR}') + ymir_cfg = get_merged_config() + WORK_DIR = ymir_cfg.ymir.output.models_dir # assert only one model config file in work_dir result_files = [osp.basename(f) for f in glob.glob(osp.join(WORK_DIR, '*')) if osp.basename(f) != 'result.yaml'] + max_keep_checkpoints = int(ymir_cfg.param.get('max_keep_checkpoints', 1)) + if max_keep_checkpoints > 0: + topk_checkpoints = get_topk_checkpoints(result_files, max_keep_checkpoints) + result_files = [f for f in result_files if not f.endswith(('.pth', '.pt'))] + topk_checkpoints + training_result_file = osp.join(WORK_DIR, 'result.yaml') if osp.exists(training_result_file): with open(training_result_file, 'r') as f: diff --git a/det-mmdetection-tmi/training-template.yaml b/det-mmdetection-tmi/training-template.yaml index dcb0ce9..902f435 100644 --- a/det-mmdetection-tmi/training-template.yaml +++ b/det-mmdetection-tmi/training-template.yaml @@ -7,5 +7,6 @@ config_file: 'configs/yolox/yolox_tiny_8x8_300e_coco.py' args_options: '' cfg_options: '' metric: 'bbox' -val_interval: 0 # <0 means use auto interval = max(1, max_epochs//10) +val_interval: 1 # <0 means evaluation every interval +max_keep_checkpoints: 1 # <0 means save all weight file, 1 means save last and best weight files, k means save topk best weight files and topk epoch/step weigth files port: 12345 diff --git a/det-yolov5-tmi/README.md b/det-yolov5-tmi/README.md index bc1d11a..c2ad3c2 100644 --- a/det-yolov5-tmi/README.md +++ b/det-yolov5-tmi/README.md @@ -39,3 +39,4 @@ docker build -t ymir/ymir-executor:ymir1.1.0-cuda111-yolov5-tmi --build-arg SERV - 2022/09/08: add aldd active learning algorithm for mining task. [Active Learning for Deep Detection Neural Networks (ICCV 2019)](https://gitlab.com/haghdam/deep_active_learning) - 2022/09/14: support change hyper-parameter `num_workers_per_gpu` +- 2022/09/16: support change activation, view [rknn](https://github.com/airockchip/rknn_model_zoo/tree/main/models/vision/object_detection/yolov5-pytorch) diff --git a/det-yolov5-tmi/models/common.py b/det-yolov5-tmi/models/common.py index d116aa5..b7b6d16 100644 --- a/det-yolov5-tmi/models/common.py +++ b/det-yolov5-tmi/models/common.py @@ -3,6 +3,7 @@ Common modules """ +import os import json import math import platform @@ -41,7 +42,17 @@ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, k super().__init__() self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) self.bn = nn.BatchNorm2d(c2) - self.act = nn.Hardswish() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) + + activation = os.environ.get('ACTIVATION', None) + if activation is None: + self.act = nn.Hardswish() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) + else: + if activation.lower() == 'relu': + custom_act = nn.ReLU() + else: + warnings.warn(f'unknown activation {activation}, use Hardswish instead') + custom_act = nn.Hardswish() + self.act = custom_act if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) def forward(self, x): return self.act(self.bn(self.conv(x))) @@ -115,7 +126,15 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False) self.cv4 = Conv(2 * c_, c2, 1, 1) self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) - self.act = nn.SiLU() + activation = os.environ.get('ACTIVATION', None) + if activation is None: + self.act = nn.SiLU() + else: + if activation.lower() == 'relu': + self.act = nn.ReLU() + else: + warnings.warn(f'unknown activation {activation}, use SiLU instead') + self.act = nn.SiLU() self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) def forward(self, x): diff --git a/det-yolov5-tmi/models/experimental.py b/det-yolov5-tmi/models/experimental.py index 463e551..dbfecbf 100644 --- a/det-yolov5-tmi/models/experimental.py +++ b/det-yolov5-tmi/models/experimental.py @@ -2,6 +2,7 @@ """ Experimental modules """ +import os import math import numpy as np @@ -10,6 +11,7 @@ from models.common import Conv from utils.downloads import attempt_download +import warnings class CrossConv(nn.Module): @@ -59,14 +61,22 @@ def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kern b = [c2] + [0] * n a = np.eye(n + 1, n, k=-1) a -= np.roll(a, 1, axis=1) - a *= np.array(k) ** 2 + a *= np.array(k)**2 a[0] = 1 c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b self.m = nn.ModuleList( [nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)]) self.bn = nn.BatchNorm2d(c2) - self.act = nn.SiLU() + activation = os.environ.get('ACTIVATION', None) + if activation is None: + self.act = nn.SiLU() + else: + if activation.lower() == 'relu': + self.act = nn.ReLU() + else: + warnings.warn(f'unknown activation {activation}, use SiLU instead') + self.act = nn.SiLU() def forward(self, x): return self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py index bd4b537..8cd13b4 100644 --- a/det-yolov5-tmi/start.py +++ b/det-yolov5-tmi/start.py @@ -14,9 +14,7 @@ write_ymir_training_result) -def start() -> int: - cfg = get_merged_config() - +def start(cfg: edict) -> int: logging.info(f'merged config: {cfg}') if cfg.ymir.run_training: @@ -187,5 +185,11 @@ def _run_infer(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: datefmt='%Y%m%d-%H:%M:%S', level=logging.INFO) + cfg = get_merged_config() os.environ.setdefault('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', 'python') - sys.exit(start()) + + # activation: relu + activation: str = cfg.param.get('activation', '') + if activation: + os.environ.setdefault('ACTIVATION', activation) + sys.exit(start(cfg)) From ef09dcf7e2b91e710d5856859864cc388ffc08ad Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 21 Sep 2022 14:29:30 +0800 Subject: [PATCH 131/204] fix 30min dist.barrier() time-out errors --- det-mmdetection-tmi/ymir_mining.py | 4 ++++ det-yolov5-tmi/mining/ymir_infer.py | 4 ++++ det-yolov5-tmi/mining/ymir_mining_aldd.py | 6 +++++- det-yolov5-tmi/mining/ymir_mining_cald.py | 8 ++++++++ 4 files changed, 21 insertions(+), 1 deletion(-) diff --git a/det-mmdetection-tmi/ymir_mining.py b/det-mmdetection-tmi/ymir_mining.py index 787290e..506506d 100644 --- a/det-mmdetection-tmi/ymir_mining.py +++ b/det-mmdetection-tmi/ymir_mining.py @@ -283,6 +283,10 @@ def mining(self): beta = 1.3 mining_result = [] for asset_path in tbar: + # batch-level sync, avoid 30min time-out error + if LOCAL_RANK != -1: + dist.barrier() + img = cv2.imread(asset_path) # xyxy,conf,cls result = self.predict(img) diff --git a/det-yolov5-tmi/mining/ymir_infer.py b/det-yolov5-tmi/mining/ymir_infer.py index 258af64..7ac0c4b 100644 --- a/det-yolov5-tmi/mining/ymir_infer.py +++ b/det-yolov5-tmi/mining/ymir_infer.py @@ -58,6 +58,10 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): monitor_gap = max(1, dataset_size // 1000 // batch_size_per_gpu) pbar = tqdm(origin_dataset_loader) if RANK == 0 else origin_dataset_loader for idx, batch in enumerate(pbar): + # batch-level sync, avoid 30min time-out error + if LOCAL_RANK != -1: + dist.barrier() + with torch.no_grad(): pred = ymir_yolov5.forward(batch['image'].float().to(device), nms=True) diff --git a/det-yolov5-tmi/mining/ymir_mining_aldd.py b/det-yolov5-tmi/mining/ymir_mining_aldd.py index 928c6e1..f013584 100644 --- a/det-yolov5-tmi/mining/ymir_mining_aldd.py +++ b/det-yolov5-tmi/mining/ymir_mining_aldd.py @@ -157,6 +157,10 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): pbar = tqdm(origin_dataset_loader) if RANK == 0 else origin_dataset_loader miner = ALDD(ymir_cfg) for idx, batch in enumerate(pbar): + # batch-level sync, avoid 30min time-out error + if LOCAL_RANK != -1: + dist.barrier() + with torch.no_grad(): featuremap_output = ymir_yolov5.model.model(batch['image'].float().to(device))[1] unc_scores = miner.compute_aldd_score(featuremap_output, ymir_yolov5.img_size) @@ -200,7 +204,7 @@ def main() -> int: if LOCAL_RANK != -1: print(f'rank: {RANK}, start destroy process group') - dist.destroy_process_group() + dist.destroy_process_group() return 0 diff --git a/det-yolov5-tmi/mining/ymir_mining_cald.py b/det-yolov5-tmi/mining/ymir_mining_cald.py index 06f2542..bd5df34 100644 --- a/det-yolov5-tmi/mining/ymir_mining_cald.py +++ b/det-yolov5-tmi/mining/ymir_mining_cald.py @@ -62,6 +62,10 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): dataset_size = len(images_rank) pbar = tqdm(origin_dataset_loader) if RANK == 0 else origin_dataset_loader for idx, batch in enumerate(pbar): + # batch-level sync, avoid 30min time-out error + if LOCAL_RANK != -1: + dist.barrier() + with torch.no_grad(): pred = ymir_yolov5.forward(batch['image'].float().to(device), nms=True) @@ -98,6 +102,10 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): monitor_gap = max(1, dataset_size // 1000 // batch_size_per_gpu) pbar = tqdm(aug_dataset_loader) if RANK == 0 else aug_dataset_loader for idx, batch in enumerate(pbar): + # batch-level sync, avoid 30min time-out error + if LOCAL_RANK != -1: + dist.barrier() + if idx % monitor_gap == 0 and RANK in [-1, 0]: ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) From 20491affaa948114424dbcbbd2dd498d82204b2d Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 22 Sep 2022 13:52:35 +0800 Subject: [PATCH 132/204] update readme --- README.MD | 6 +++--- README_zh-CN.MD | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/README.MD b/README.MD index 1d834f2..1e06bd9 100644 --- a/README.MD +++ b/README.MD @@ -73,10 +73,10 @@ | yolov4 | ? | ✔️ | ❌ | darknet + mxnet | ❌ | local | | yolov5 | ✔️ | ✔️ | ✔️ | pytorch | ✔️ | local+online | | yolov7 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | local+online | -| mmdetection | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | +| mmdetection | ✔️ | ✔️ | ✔️ | pytorch | ❌ | local+online | | detectron2 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | | vidt | ? | ✔️ | ✔️ | pytorch | ❌ | online | -| nanodet | ✔️ | ✔️ | ❌ | pytorch_lightning | ❌ | online | +| nanodet | ✔️ | ✔️ | ❌ | pytorch_lightning | ❌ | local+online | - `online` pretrained weights may download through network @@ -156,7 +156,7 @@ docker build -t ymir-executor/mmdet:cu111-tmi -f docker/Dockerfile.cuda111 . ## reference - [mining algorithm: CALD](https://github.com/we1pingyu/CALD/) -- [mining algorithm: ALDD](https://gitlab.com/haghdam/deep_active_learning) +- [mining algorithm: ALDD](https://gitlab.com/haghdam/deep_active_learning) - [yolov4](https://github.com/AlexeyAB/darknet) - [yolov5](https://github.com/ultralytics/yolov5) - [mmdetection](https://github.com/open-mmlab/mmdetection) diff --git a/README_zh-CN.MD b/README_zh-CN.MD index f22015a..ac6d483 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -73,10 +73,10 @@ | yolov4 | ? | ✔️ | ❌ | darknet + mxnet | ❌ | local | | yolov5 | ✔️ | ✔️ | ✔️ | pytorch | ✔️ | local+online | | yolov7 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | local+online | -| mmdetection | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | +| mmdetection | ✔️ | ✔️ | ✔️ | pytorch | ❌ | local+online | | detectron2 | ✔️ | ✔️ | ✔️ | pytorch | ❌ | online | | vidt | ? | ✔️ | ✔️ | pytorch | ❌ | online | -| nanodet | ✔️ | ✔️ | ❌ | pytorch_lightning | ❌ | online | +| nanodet | ✔️ | ✔️ | ❌ | pytorch_lightning | ❌ | local+online | - `online` 预训练权重可能在训练时通过网络下载 From ebc3f24e4e7d64f48e942d9126c2249a4f0ca212 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 26 Sep 2022 16:54:57 +0800 Subject: [PATCH 133/204] update readme --- README.MD | 64 +---------------- README_zh-CN.MD | 70 ++----------------- det-yolov4-tmi/training-template.yaml | 5 +- det-yolov5-tmi/mining/ymir_infer.py | 2 - det-yolov5-tmi/mining/ymir_mining_aldd.py | 3 +- det-yolov5-tmi/mining/ymir_mining_cald.py | 3 +- det-yolov5-tmi/train.py | 7 +- det-yolov5-tmi/utils/ymir_yolov5.py | 69 ++---------------- docs/ymir-executor-version.md | 19 +++++ .../img-man/training-template.yaml | 2 + official-docker-image.md | 61 ++++++++++++++++ 11 files changed, 104 insertions(+), 201 deletions(-) create mode 100644 docs/ymir-executor-version.md create mode 100644 official-docker-image.md diff --git a/README.MD b/README.MD index 1e06bd9..e326aeb 100644 --- a/README.MD +++ b/README.MD @@ -4,67 +4,7 @@ - [wiki](https://github.com/modelai/ymir-executor-fork/wiki) -## ymir-1.1.0 official image - -- [yolov4](https://github.com/modelai/ymir-executor-fork#det-yolov4-training) - - ``` - docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov4-cu112-tmi - - docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov4-cu101-tmi - ``` - -- [yolov5](https://github.com/modelai/ymir-executor-fork#det-yolov5-tmi) - - - [change log](./det-yolov5-tmi/README.md) - - ``` - docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov5-cu111-tmi - - docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov5-cu102-tmi - ``` - -- [mmdetection](https://github.com/modelai/ymir-executor-fork#det-mmdetection-tmi) - - - [change log](./det-mmdetection-tmi/README.md) - - ``` - docker pull youdaoyzbx/ymir-executor:ymir1.1.0-mmdet-cu111-tmi - - docker pull youdaoyzbx/ymir-executor:ymir1.1.0-mmdet-cu102-tmi - ``` - -- [detectron2](https://github.com/modelai/ymir-detectron2) - - - [change log](https://github.com/modelai/ymir-detectron2/blob/master/README.md) - - ``` - docker pull youdaoyzbx/ymir-executor:ymir1.1.0-detectron2-cu111-tmi - ``` - -- [yolov7](https://github.com/modelai/ymir-yolov7) - - - [change log](https://github.com/modelai/ymir-yolov7/blob/main/ymir/README.md) - - ``` - docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov7-cu111-tmi - ``` - -- [vidt](https://github.com/modelai/ymir-vidt) - - - [change log](https://github.com/modelai/ymir-vidt/tree/main/ymir) - - ``` - docker pull youdaoyzbx/ymir-executor:ymir1.1.0-vidt-cu111-tmi - ``` - -- [nanodet](https://github.com/modelai/ymir-nanodet/tree/ymir-dev) - - - [change log](https://github.com/modelai/ymir-nanodet/tree/ymir-dev/ymir) - - ``` - docker pull youdaoyzbx/ymir-executor:ymir1.1.0-nanodet-cu111-tmi - ``` +- [ymir executor](./official-docker-image.md) ## overview @@ -110,6 +50,8 @@ gpu: single GeForce GTX 1080 Ti --- +# build ymir executor + ## det-yolov4-tmi - yolov4 training, mining and infer docker image, use `mxnet` and `darknet` framework diff --git a/README_zh-CN.MD b/README_zh-CN.MD index ac6d483..3ca0c44 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -2,69 +2,9 @@ - [ymir](https://github.com/IndustryEssentials/ymir) -- [wiki](https://github.com/modelai/ymir-executor-fork/wiki) +- [说明文档](https://github.com/modelai/ymir-executor-fork/wiki) -## ymir-1.1.0 官方镜像 - -- [yolov4](https://github.com/modelai/ymir-executor-fork#det-yolov4-training) - - ``` - docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov4-cu112-tmi - - docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov4-cu101-tmi - ``` - -- [yolov5](https://github.com/modelai/ymir-executor-fork#det-yolov5-tmi) - - - [change log](./det-yolov5-tmi/README.md) - - ``` - docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov5-cu111-tmi - - docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov5-cu102-tmi - ``` - -- [mmdetection](https://github.com/modelai/ymir-executor-fork#det-mmdetection-tmi) - - - [change log](./det-mmdetection-tmi/README.md) - - ``` - docker pull youdaoyzbx/ymir-executor:ymir1.1.0-mmdet-cu111-tmi - - docker pull youdaoyzbx/ymir-executor:ymir1.1.0-mmdet-cu102-tmi - ``` - -- [detectron2](https://github.com/modelai/ymir-detectron2) - - - [change log](https://github.com/modelai/ymir-detectron2/blob/master/README.md) - - ``` - docker pull youdaoyzbx/ymir-executor:ymir1.1.0-detectron2-cu111-tmi - ``` - -- [yolov7](https://github.com/modelai/ymir-yolov7) - - - [change log](https://github.com/modelai/ymir-yolov7/blob/main/ymir/README.md) - - ``` - docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov7-cu111-tmi - ``` - -- [vidt](https://github.com/modelai/ymir-vidt) - - - [change log](https://github.com/modelai/ymir-vidt/tree/main/ymir) - - ``` - docker pull youdaoyzbx/ymir-executor:ymir1.1.0-vidt-cu111-tmi - ``` - -- [nanodet](https://github.com/modelai/ymir-nanodet/tree/ymir-dev) - - - [change log](https://github.com/modelai/ymir-nanodet/tree/ymir-dev/ymir) - - ``` - docker pull youdaoyzbx/ymir-executor:ymir1.1.0-nanodet-cu111-tmi - ``` +- [ymir镜像](./official-docker-image.md) ## 比较 @@ -112,6 +52,8 @@ gpu: single GeForce GTX 1080 Ti --- +# 手动构建ymir镜像 + ## det-yolov4-tmi - yolov4的训练、挖掘与推理镜像,采用mxnet与darknet框架 @@ -145,7 +87,7 @@ docker build -t ymir-executor/mmdet:cu111-tmi -f docker/Dockerfile.cuda111 . ## live-code-executor -- 可以通过`git_url`, `commit id` 或 `tag` 从网上clone代码到镜像并运行, 不推荐使用`branch`, 因为这样拉取的代码可能随时间变化, 实验结果不具备可重复性. +- 可以通过`git_url`, `commit id` 或 `tag` 从网上clone代码到镜像并运行, 不推荐使用`branch`, 因为这样拉取的代码可能随时间变化, 过程不具备可重复性. - 参考 [live-code](https://github.com/IndustryEssentials/ymir-remote-git) @@ -189,7 +131,7 @@ docker build -t ymir-executor/live-code:mxnet-tmi -f mxnet.dockerfile ## 关于cuda版本 -- 推荐主机安装11.2以上的cuda版本, 使用11.1及以上的镜像 +- 推荐主机安装高版本驱动,支持11.2以上的cuda版本, 使用11.1及以上的镜像 - GTX3080/GTX3090不支持11.1以下的cuda,只能使用cuda11.1及以上的镜像 diff --git a/det-yolov4-tmi/training-template.yaml b/det-yolov4-tmi/training-template.yaml index 5e75eaf..17810f6 100644 --- a/det-yolov4-tmi/training-template.yaml +++ b/det-yolov4-tmi/training-template.yaml @@ -4,9 +4,10 @@ image_width: 608 learning_rate: 0.0013 max_batches: 20000 warmup_iterations: 1000 -batch: 4 -subdivisions: 32 +batch: 64 +subdivisions: 64 shm_size: '16G' +export_format: 'ark:raw' # class_names: # - cat # gpu_id: '0,1,2,3' diff --git a/det-yolov5-tmi/mining/ymir_infer.py b/det-yolov5-tmi/mining/ymir_infer.py index 7ac0c4b..7b86684 100644 --- a/det-yolov5-tmi/mining/ymir_infer.py +++ b/det-yolov5-tmi/mining/ymir_infer.py @@ -88,8 +88,6 @@ def main() -> int: if LOCAL_RANK != -1: assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' - gpu = int(ymir_yolov5.gpu_id.split(',')[LOCAL_RANK]) - torch.cuda.set_device(gpu) torch.cuda.set_device(LOCAL_RANK) dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") diff --git a/det-yolov5-tmi/mining/ymir_mining_aldd.py b/det-yolov5-tmi/mining/ymir_mining_aldd.py index f013584..5397372 100644 --- a/det-yolov5-tmi/mining/ymir_mining_aldd.py +++ b/det-yolov5-tmi/mining/ymir_mining_aldd.py @@ -181,8 +181,7 @@ def main() -> int: if LOCAL_RANK != -1: assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' - gpu = LOCAL_RANK if LOCAL_RANK >= 0 else 0 - torch.cuda.set_device(gpu) + torch.cuda.set_device(LOCAL_RANK) dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") run(ymir_cfg, ymir_yolov5) diff --git a/det-yolov5-tmi/mining/ymir_mining_cald.py b/det-yolov5-tmi/mining/ymir_mining_cald.py index bd5df34..63022fc 100644 --- a/det-yolov5-tmi/mining/ymir_mining_cald.py +++ b/det-yolov5-tmi/mining/ymir_mining_cald.py @@ -166,8 +166,7 @@ def main() -> int: if LOCAL_RANK != -1: assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' - gpu = LOCAL_RANK if LOCAL_RANK >= 0 else 0 - torch.cuda.set_device(gpu) + torch.cuda.set_device(LOCAL_RANK) dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") run(ymir_cfg, ymir_yolov5) diff --git a/det-yolov5-tmi/train.py b/det-yolov5-tmi/train.py index 0d208bf..f84f343 100644 --- a/det-yolov5-tmi/train.py +++ b/det-yolov5-tmi/train.py @@ -39,8 +39,6 @@ sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative -from ymir_exc.util import YmirStage, get_merged_config, get_ymir_process, write_ymir_training_result - import val # for end-of-epoch mAP from models.experimental import attempt_load from models.yolo import Model @@ -59,6 +57,7 @@ from utils.metrics import fitness from utils.plots import plot_evolve, plot_labels from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, select_device, torch_distributed_zero_first +from ymir_exc.util import YmirStage, get_merged_config, get_ymir_process, write_ymir_training_result LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) @@ -419,7 +418,7 @@ def lf(x): return (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear if (epoch > 0) and (opt.save_period > 0) and (epoch % opt.save_period == 0): torch.save(ckpt, w / f'epoch{epoch}.pt') weight_file = str(w / f'epoch{epoch}.pt') - write_ymir_training_result(ymir_cfg, map50=results[2], id=str(epoch), files=[weight_file]) + write_ymir_training_result(ymir_cfg, map50=results[2], id=f'epoch_{epoch}', files=[weight_file]) del ckpt callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) @@ -468,7 +467,7 @@ def lf(x): return (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear torch.cuda.empty_cache() # save the best and last weight file with other files in models_dir if RANK in [-1, 0]: - write_ymir_training_result(ymir_cfg, map50=best_fitness, id=str(epochs), files=[]) + write_ymir_training_result(ymir_cfg, map50=best_fitness, id=f'epoch_{epochs}', files=[]) return results diff --git a/det-yolov5-tmi/utils/ymir_yolov5.py b/det-yolov5-tmi/utils/ymir_yolov5.py index 675110c..e2b4c7d 100644 --- a/det-yolov5-tmi/utils/ymir_yolov5.py +++ b/det-yolov5-tmi/utils/ymir_yolov5.py @@ -171,79 +171,20 @@ def write_monitor_logger(self, stage: YmirStage, p: float): percent=get_ymir_process(stage=stage, p=p, task_idx=self.task_idx, task_num=self.task_num)) -def convert_ymir_to_yolov5(cfg: edict): +def convert_ymir_to_yolov5(cfg: edict, out_dir: str = None): """ convert ymir format dataset to yolov5 format generate data.yaml for training/mining/infer """ - data = dict(path=cfg.ymir.output.root_dir, nc=len(cfg.param.class_names), names=cfg.param.class_names) + out_dir = out_dir or cfg.ymir.output.root_dir + data = dict(path=out_dir, nc=len(cfg.param.class_names), names=cfg.param.class_names) for split, prefix in zip(['train', 'val', 'test'], ['training', 'val', 'candidate']): src_file = getattr(cfg.ymir.input, f'{prefix}_index_file') if osp.exists(src_file): - shutil.copy(src_file, f'{cfg.ymir.output.root_dir}/{split}.tsv') + shutil.copy(src_file, f'{out_dir}/{split}.tsv') data[split] = f'{split}.tsv' - with open(osp.join(cfg.ymir.output.root_dir, 'data.yaml'), 'w') as fw: + with open(osp.join(out_dir, 'data.yaml'), 'w') as fw: fw.write(yaml.safe_dump(data)) - - -def write_ymir_training_result(cfg: edict, map50: float = 0.0, epoch: int = 0, weight_file: str = ""): - YMIR_VERSION = os.getenv('YMIR_VERSION', '1.2.0') - if Version(YMIR_VERSION) >= Version('1.2.0'): - _write_latest_ymir_training_result(cfg, float(map50), epoch, weight_file) - else: - _write_ancient_ymir_training_result(cfg, float(map50)) - - -def _write_latest_ymir_training_result(cfg: edict, map50: float, epoch: int, weight_file: str) -> int: - """ - for ymir>=1.2.0 - cfg: ymir config - map50: map50 - epoch: stage - weight_file: saved weight files, empty weight_file will save all files - - 1. save weight file for each epoch. - 2. save weight file for last.pt, best.pt and other config file - 3. save weight file for best.onnx, no valid map50, attach to stage f"{model}_last_and_best" - """ - model = cfg.param.model - # use `rw.write_training_result` to save training result - if weight_file: - rw.write_model_stage(stage_name=f"{model}_{epoch}", files=[osp.basename(weight_file)], mAP=float(map50)) - else: - # save other files with - files = [ - osp.basename(f) for f in glob.glob(osp.join(cfg.ymir.output.models_dir, '*')) if not f.endswith('.pt') - ] + ['last.pt', 'best.pt'] - - training_result_file = cfg.ymir.output.training_result_file - if osp.exists(training_result_file): - with open(training_result_file, 'r') as f: - training_result = yaml.safe_load(stream=f) - - map50 = max(training_result.get('map', 0.0), map50) - rw.write_model_stage(stage_name=f"{model}_last_and_best", files=files, mAP=float(map50)) - return 0 - - -def _write_ancient_ymir_training_result(cfg: edict, map50: float) -> None: - """ - for 1.0.0 <= ymir <=1.1.0 - """ - - files = [osp.basename(f) for f in glob.glob(osp.join(cfg.ymir.output.models_dir, '*'))] - training_result_file = cfg.ymir.output.training_result_file - if osp.exists(training_result_file): - with open(training_result_file, 'r') as f: - training_result = yaml.safe_load(stream=f) - - training_result['model'] = files - training_result['map'] = max(float(training_result.get('map', 0)), map50) - else: - training_result = {'model': files, 'map': float(map50), 'stage_name': cfg.param.model} - - with open(training_result_file, 'w') as f: - yaml.safe_dump(training_result, f) diff --git a/docs/ymir-executor-version.md b/docs/ymir-executor-version.md new file mode 100644 index 0000000..247ee13 --- /dev/null +++ b/docs/ymir-executor-version.md @@ -0,0 +1,19 @@ +# ymir1.3.0 (2022-09-30) + +- 支持分开输出模型权重,用户可以采用epoch10.pth进行推理,也可以选择epoch20.pth进行推理 + +- 训练镜像需要指定数据集标注格式, ymir1.1.0默认标注格式为`ark:raw` + +- 训练镜像可以获得系统的ymir版本,方便镜像做兼容 + +## 辅助库 + +- [ymir-executor-sdk](https://github.com/modelai/ymir-executor-sdk) 采用ymir1.3.0分支 + +- [ymir-executor-verifier]() 镜像检查工具 + +# ymir1.1.0 + +- [custom ymir-executor](https://github.com/IndustryEssentials/ymir/blob/dev/dev_docs/ymir-dataset-zh-CN.md) + +- [ymir-executor-sdk](https://github.com/modelai/ymir-executor-sdk) 采用ymir1.0.0分支 diff --git a/live-code-executor/img-man/training-template.yaml b/live-code-executor/img-man/training-template.yaml index 865b40b..0ac8798 100644 --- a/live-code-executor/img-man/training-template.yaml +++ b/live-code-executor/img-man/training-template.yaml @@ -6,3 +6,5 @@ gpu_id: '0' task_id: 'default-training-task' pretrained_model_params: [] class_names: [] +export_format: 'ark:raw' +shm_size: '32G' diff --git a/official-docker-image.md b/official-docker-image.md new file mode 100644 index 0000000..a01a91a --- /dev/null +++ b/official-docker-image.md @@ -0,0 +1,61 @@ +# official docker image + +- [yolov4](https://github.com/modelai/ymir-executor-fork#det-yolov4-training) + + ``` + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov4-cu112-tmi + + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov4-cu101-tmi + ``` + +- [yolov5](https://github.com/modelai/ymir-executor-fork#det-yolov5-tmi) + + - [change log](./det-yolov5-tmi/README.md) + + ``` + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov5-cu111-tmi + + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov5-cu102-tmi + ``` + +- [mmdetection](https://github.com/modelai/ymir-executor-fork#det-mmdetection-tmi) + + - [change log](./det-mmdetection-tmi/README.md) + + ``` + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-mmdet-cu111-tmi + + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-mmdet-cu102-tmi + ``` + +- [detectron2](https://github.com/modelai/ymir-detectron2) + + - [change log](https://github.com/modelai/ymir-detectron2/blob/master/README.md) + + ``` + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-detectron2-cu111-tmi + ``` + +- [yolov7](https://github.com/modelai/ymir-yolov7) + + - [change log](https://github.com/modelai/ymir-yolov7/blob/main/ymir/README.md) + + ``` + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov7-cu111-tmi + ``` + +- [vidt](https://github.com/modelai/ymir-vidt) + + - [change log](https://github.com/modelai/ymir-vidt/tree/main/ymir) + + ``` + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-vidt-cu111-tmi + ``` + +- [nanodet](https://github.com/modelai/ymir-nanodet/tree/ymir-dev) + + - [change log](https://github.com/modelai/ymir-nanodet/tree/ymir-dev/ymir) + + ``` + docker pull youdaoyzbx/ymir-executor:ymir1.1.0-nanodet-cu111-tmi + ``` From 92f4c4bdf6a65d3f2bd73a6ce3734af191ab5ed1 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 28 Sep 2022 15:34:47 +0800 Subject: [PATCH 134/204] update infer process --- det-yolov5-tmi/start.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py index 8cd13b4..daaaebb 100644 --- a/det-yolov5-tmi/start.py +++ b/det-yolov5-tmi/start.py @@ -176,7 +176,7 @@ def _run_infer(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: subprocess.run(command.split(), check=True) monitor.write_monitor_logger( - percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) + percent=get_ymir_process(stage=YmirStage.POSTPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) if __name__ == '__main__': From 122862f537b4e66125373cbe594d79ce2a17b4bd Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 12 Oct 2022 17:10:20 +0800 Subject: [PATCH 135/204] fix barrier --- det-yolov5-tmi/README.md | 1 + det-yolov5-tmi/mining/data_augment.py | 19 ++++++--- det-yolov5-tmi/mining/mining_cald.py | 17 ++++---- det-yolov5-tmi/mining/util.py | 5 ++- det-yolov5-tmi/mining/ymir_infer.py | 15 +++++--- det-yolov5-tmi/mining/ymir_mining_aldd.py | 15 ++++++-- det-yolov5-tmi/mining/ymir_mining_cald.py | 17 ++++---- det-yolov5-tmi/models/common.py | 47 +++++++++++++++-------- det-yolov5-tmi/start.py | 5 ++- det-yolov5-tmi/train.py | 3 +- det-yolov5-tmi/utils/ymir_yolov5.py | 9 +++-- 11 files changed, 94 insertions(+), 59 deletions(-) diff --git a/det-yolov5-tmi/README.md b/det-yolov5-tmi/README.md index c2ad3c2..6bf9151 100644 --- a/det-yolov5-tmi/README.md +++ b/det-yolov5-tmi/README.md @@ -40,3 +40,4 @@ docker build -t ymir/ymir-executor:ymir1.1.0-cuda111-yolov5-tmi --build-arg SERV - 2022/09/08: add aldd active learning algorithm for mining task. [Active Learning for Deep Detection Neural Networks (ICCV 2019)](https://gitlab.com/haghdam/deep_active_learning) - 2022/09/14: support change hyper-parameter `num_workers_per_gpu` - 2022/09/16: support change activation, view [rknn](https://github.com/airockchip/rknn_model_zoo/tree/main/models/vision/object_detection/yolov5-pytorch) +- 2022/10/09: fix dist.destroy_process_group() hang diff --git a/det-yolov5-tmi/mining/data_augment.py b/det-yolov5-tmi/mining/data_augment.py index 42af914..cfafaa7 100644 --- a/det-yolov5-tmi/mining/data_augment.py +++ b/det-yolov5-tmi/mining/data_augment.py @@ -8,6 +8,7 @@ import cv2 import numpy as np from nptyping import NDArray + from utils.ymir_yolov5 import BBOX, CV_IMAGE @@ -22,11 +23,13 @@ def intersect(boxes1: BBOX, boxes2: BBOX) -> NDArray: ''' n1 = boxes1.shape[0] n2 = boxes2.shape[0] - max_xy = np.minimum(np.expand_dims(boxes1[:, 2:], axis=1).repeat(n2, axis=1), - np.expand_dims(boxes2[:, 2:], axis=0).repeat(n1, axis=0)) + max_xy = np.minimum( + np.expand_dims(boxes1[:, 2:], axis=1).repeat(n2, axis=1), + np.expand_dims(boxes2[:, 2:], axis=0).repeat(n1, axis=0)) - min_xy = np.maximum(np.expand_dims(boxes1[:, :2], axis=1).repeat(n2, axis=1), - np.expand_dims(boxes2[:, :2], axis=0).repeat(n1, axis=0)) + min_xy = np.maximum( + np.expand_dims(boxes1[:, :2], axis=1).repeat(n2, axis=1), + np.expand_dims(boxes2[:, :2], axis=0).repeat(n1, axis=0)) inter = np.clip(max_xy - min_xy, a_min=0, a_max=None) # (n1, n2, 2) return inter[:, :, 0] * inter[:, :, 1] # (n1, n2) @@ -49,8 +52,12 @@ def horizontal_flip(image: CV_IMAGE, bbox: BBOX) \ return image, bbox -def cutout(image: CV_IMAGE, bbox: BBOX, cut_num: int = 2, fill_val: int = 0, - bbox_remove_thres: float = 0.4, bbox_min_thres: float = 0.1) -> Tuple[CV_IMAGE, BBOX]: +def cutout(image: CV_IMAGE, + bbox: BBOX, + cut_num: int = 2, + fill_val: int = 0, + bbox_remove_thres: float = 0.4, + bbox_min_thres: float = 0.1) -> Tuple[CV_IMAGE, BBOX]: ''' Cutout augmentation image: A PIL image diff --git a/det-yolov5-tmi/mining/mining_cald.py b/det-yolov5-tmi/mining/mining_cald.py index 1588665..ab458ff 100644 --- a/det-yolov5-tmi/mining/mining_cald.py +++ b/det-yolov5-tmi/mining/mining_cald.py @@ -8,16 +8,17 @@ import cv2 import numpy as np from easydict import EasyDict as edict -from mining.data_augment import cutout, horizontal_flip, intersect, resize, rotate from nptyping import NDArray from scipy.stats import entropy from tqdm import tqdm -from utils.ymir_yolov5 import BBOX, CV_IMAGE, YmirYolov5 from ymir_exc import dataset_reader as dr from ymir_exc import env, monitor from ymir_exc import result_writer as rw from ymir_exc.util import YmirStage, get_merged_config, get_ymir_process +from mining.data_augment import cutout, horizontal_flip, intersect, resize, rotate +from utils.ymir_yolov5 import BBOX, CV_IMAGE, YmirYolov5 + def split_result(result: NDArray) -> Tuple[BBOX, NDArray, NDArray]: if len(result) > 0: @@ -33,6 +34,7 @@ def split_result(result: NDArray) -> Tuple[BBOX, NDArray, NDArray]: class MiningCald(YmirYolov5): + def __init__(self, cfg: edict): super().__init__(cfg) @@ -101,8 +103,10 @@ def mining(self) -> List: idx += 1 if idx % monitor_gap == 0: - percent = get_ymir_process(stage=YmirStage.TASK, p=idx / N, - task_idx=self.task_idx, task_num=self.task_num) + percent = get_ymir_process(stage=YmirStage.TASK, + p=idx / N, + task_idx=self.task_idx, + task_num=self.task_num) monitor.write_monitor_logger(percent=percent) return mining_result @@ -114,10 +118,7 @@ def aug_predict(self, image: CV_IMAGE, bboxes: BBOX) -> Tuple[Dict[str, BBOX], D return the predict result and augment bbox. """ - aug_dict = dict(flip=horizontal_flip, - cutout=cutout, - rotate=rotate, - resize=resize) + aug_dict = dict(flip=horizontal_flip, cutout=cutout, rotate=rotate, resize=resize) aug_bboxes = dict() aug_results = dict() diff --git a/det-yolov5-tmi/mining/util.py b/det-yolov5-tmi/mining/util.py index 5c9b669..54ef5dd 100644 --- a/det-yolov5-tmi/mining/util.py +++ b/det-yolov5-tmi/mining/util.py @@ -19,10 +19,11 @@ import cv2 import numpy as np import torch.utils.data as td -from mining.data_augment import cutout, horizontal_flip, resize, rotate -from mining.mining_cald import get_ious from scipy.stats import entropy from torch.utils.data._utils.collate import default_collate + +from mining.data_augment import cutout, horizontal_flip, resize, rotate +from mining.mining_cald import get_ious from utils.augmentations import letterbox LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html diff --git a/det-yolov5-tmi/mining/ymir_infer.py b/det-yolov5-tmi/mining/ymir_infer.py index 7b86684..61d305f 100644 --- a/det-yolov5-tmi/mining/ymir_infer.py +++ b/det-yolov5-tmi/mining/ymir_infer.py @@ -12,13 +12,14 @@ import torch.distributed as dist import torch.utils.data as td from easydict import EasyDict as edict -from mining.util import YmirDataset, load_image_file from tqdm import tqdm -from utils.general import scale_coords -from utils.ymir_yolov5 import YmirYolov5 from ymir_exc import result_writer as rw from ymir_exc.util import YmirStage, get_merged_config +from mining.util import YmirDataset, load_image_file +from utils.general import scale_coords +from utils.ymir_yolov5 import YmirYolov5 + LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) @@ -42,6 +43,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): with open(ymir_cfg.ymir.input.candidate_index_file, 'r') as f: images = [line.strip() for line in f.readlines()] + max_barrier_times = len(images) // max(1, WORLD_SIZE) // batch_size_per_gpu # origin dataset images_rank = images[RANK::WORLD_SIZE] origin_dataset = YmirDataset(images_rank, load_fn=load_fn) @@ -59,7 +61,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): pbar = tqdm(origin_dataset_loader) if RANK == 0 else origin_dataset_loader for idx, batch in enumerate(pbar): # batch-level sync, avoid 30min time-out error - if LOCAL_RANK != -1: + if LOCAL_RANK != -1 and idx < max_barrier_times: dist.barrier() with torch.no_grad(): @@ -123,8 +125,9 @@ def main() -> int: ymir_infer_result[img_file] = anns rw.write_infer_result(infer_result=ymir_infer_result) - print(f'rank: {RANK}, start destroy process group') - dist.destroy_process_group() + if LOCAL_RANK != -1: + print(f'rank: {RANK}, start destroy process group') + # dist.destroy_process_group() return 0 diff --git a/det-yolov5-tmi/mining/ymir_mining_aldd.py b/det-yolov5-tmi/mining/ymir_mining_aldd.py index 5397372..8151a1b 100644 --- a/det-yolov5-tmi/mining/ymir_mining_aldd.py +++ b/det-yolov5-tmi/mining/ymir_mining_aldd.py @@ -17,18 +17,20 @@ import torch.nn.functional as F import torch.utils.data as td from easydict import EasyDict as edict -from mining.util import YmirDataset, load_image_file from tqdm import tqdm -from utils.ymir_yolov5 import YmirYolov5 from ymir_exc import result_writer as rw from ymir_exc.util import YmirStage, get_merged_config +from mining.util import YmirDataset, load_image_file +from utils.ymir_yolov5 import YmirYolov5 + LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) class ALDD(object): + def __init__(self, ymir_cfg: edict): self.avg_pool_size = 9 self.max_pool_size = 32 @@ -138,6 +140,8 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): with open(ymir_cfg.ymir.input.candidate_index_file, 'r') as f: images = [line.strip() for line in f.readlines()] + max_barrier_times = (len(images) // max(1, WORLD_SIZE)) // batch_size_per_gpu + # origin dataset if RANK != -1: images_rank = images[RANK::WORLD_SIZE] @@ -158,7 +162,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): miner = ALDD(ymir_cfg) for idx, batch in enumerate(pbar): # batch-level sync, avoid 30min time-out error - if LOCAL_RANK != -1: + if LOCAL_RANK != -1 and idx < max_barrier_times: dist.barrier() with torch.no_grad(): @@ -188,7 +192,9 @@ def main() -> int: # wait all process to save the mining result if LOCAL_RANK != -1: + print(f'rank: {RANK}, sync start before merge') dist.barrier() + print(f'rank: {RANK}, sync finished before merge') if RANK in [0, -1]: results = [] @@ -203,7 +209,8 @@ def main() -> int: if LOCAL_RANK != -1: print(f'rank: {RANK}, start destroy process group') - dist.destroy_process_group() + # dist.destroy_process_group() + print(f'rank: {RANK}, finished destroy process group') return 0 diff --git a/det-yolov5-tmi/mining/ymir_mining_cald.py b/det-yolov5-tmi/mining/ymir_mining_cald.py index 63022fc..343a501 100644 --- a/det-yolov5-tmi/mining/ymir_mining_cald.py +++ b/det-yolov5-tmi/mining/ymir_mining_cald.py @@ -14,13 +14,14 @@ import torch.distributed as dist import torch.utils.data as td from easydict import EasyDict as edict +from tqdm import tqdm +from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, get_merged_config + from mining.util import (YmirDataset, collate_fn_with_fake_ann, load_image_file, load_image_file_with_ann, update_consistency) -from tqdm import tqdm from utils.general import scale_coords from utils.ymir_yolov5 import YmirYolov5 -from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_merged_config LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) @@ -45,6 +46,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): with open(ymir_cfg.ymir.input.candidate_index_file, 'r') as f: images = [line.strip() for line in f.readlines()] + max_barrier_times = (len(images) // max(1, WORLD_SIZE)) // batch_size_per_gpu # origin dataset images_rank = images[RANK::WORLD_SIZE] origin_dataset = YmirDataset(images_rank, load_fn=load_fn) @@ -63,7 +65,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): pbar = tqdm(origin_dataset_loader) if RANK == 0 else origin_dataset_loader for idx, batch in enumerate(pbar): # batch-level sync, avoid 30min time-out error - if LOCAL_RANK != -1: + if LOCAL_RANK != -1 and idx < max_barrier_times: dist.barrier() with torch.no_grad(): @@ -98,14 +100,11 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): pin_memory=ymir_yolov5.pin_memory, drop_last=False) + # cannot sync here!!! dataset_size = len(results) monitor_gap = max(1, dataset_size // 1000 // batch_size_per_gpu) pbar = tqdm(aug_dataset_loader) if RANK == 0 else aug_dataset_loader for idx, batch in enumerate(pbar): - # batch-level sync, avoid 30min time-out error - if LOCAL_RANK != -1: - dist.barrier() - if idx % monitor_gap == 0 and RANK in [-1, 0]: ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) @@ -188,7 +187,7 @@ def main() -> int: if LOCAL_RANK != -1: print(f'rank: {RANK}, start destroy process group') - dist.destroy_process_group() + # dist.destroy_process_group() return 0 diff --git a/det-yolov5-tmi/models/common.py b/det-yolov5-tmi/models/common.py index b7b6d16..35bbc69 100644 --- a/det-yolov5-tmi/models/common.py +++ b/det-yolov5-tmi/models/common.py @@ -3,9 +3,9 @@ Common modules """ -import os import json import math +import os import platform import warnings from collections import OrderedDict, namedtuple @@ -246,11 +246,12 @@ class GhostBottleneck(nn.Module): def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride super().__init__() c_ = c2 // 2 - self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw - DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw - GhostConv(c_, c2, 1, 1, act=False)) # pw-linear - self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), - Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() + self.conv = nn.Sequential( + GhostConv(c1, c_, 1, 1), # pw + DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw + GhostConv(c_, c2, 1, 1, act=False)) # pw-linear + self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1, + act=False)) if s == 2 else nn.Identity() def forward(self, x): return self.conv(x) + self.shortcut(x) @@ -279,9 +280,9 @@ def __init__(self, gain=2): def forward(self, x): b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain' s = self.gain - x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80) + x = x.view(b, s, s, c // s**2, h, w) # x(1,2,2,16,80,80) x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2) - return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160) + return x.view(b, c // s**2, h * s, w * s) # x(1,16,160,160) class Concat(nn.Module): @@ -334,7 +335,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): stride, names = int(d['stride']), d['names'] elif dnn: # ONNX OpenCV DNN LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') - check_requirements(('opencv-python>=4.5.4',)) + check_requirements(('opencv-python>=4.5.4', )) net = cv2.dnn.readNetFromONNX(w) elif onnx: # ONNX Runtime LOGGER.info(f'Loading {w} for ONNX Runtime inference...') @@ -345,7 +346,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): session = onnxruntime.InferenceSession(w, providers=providers) elif xml: # OpenVINO LOGGER.info(f'Loading {w} for OpenVINO inference...') - check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ + check_requirements(('openvino-dev', )) # requires openvino-dev: https://pypi.org/project/openvino-dev/ import openvino.inference_engine as ie core = ie.IECore() if not Path(w).is_file(): # if not *.xml @@ -400,9 +401,11 @@ def wrap_frozen_graph(gd, inputs, outputs): Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate, if edgetpu: # Edge TPU https://coral.ai/software/#edgetpu-runtime LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') - delegate = {'Linux': 'libedgetpu.so.1', - 'Darwin': 'libedgetpu.1.dylib', - 'Windows': 'edgetpu.dll'}[platform.system()] + delegate = { + 'Linux': 'libedgetpu.so.1', + 'Darwin': 'libedgetpu.1.dylib', + 'Windows': 'edgetpu.dll' + }[platform.system()] interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)]) else: # Lite LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') @@ -573,8 +576,13 @@ def forward(self, imgs, size=640, augment=False, profile=False): t.append(time_sync()) # Post-process - y = non_max_suppression(y if self.dmb else y[0], self.conf, iou_thres=self.iou, classes=self.classes, - agnostic=self.agnostic, multi_label=self.multi_label, max_det=self.max_det) # NMS + y = non_max_suppression(y if self.dmb else y[0], + self.conf, + iou_thres=self.iou, + classes=self.classes, + agnostic=self.agnostic, + multi_label=self.multi_label, + max_det=self.max_det) # NMS for i in range(n): scale_coords(shape1, y[i][:, :4], shape0[i]) @@ -615,8 +623,13 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False label = f'{self.names[int(cls)]} {conf:.2f}' if crop: file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None - crops.append({'box': box, 'conf': conf, 'cls': cls, 'label': label, - 'im': save_one_box(box, im, file=file, save=save)}) + crops.append({ + 'box': box, + 'conf': conf, + 'cls': cls, + 'label': label, + 'im': save_one_box(box, im, file=file, save=save) + }) else: # all others annotator.box_label(box, label, color=colors(cls)) im = annotator.im diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py index daaaebb..c250745 100644 --- a/det-yolov5-tmi/start.py +++ b/det-yolov5-tmi/start.py @@ -5,14 +5,15 @@ import cv2 from easydict import EasyDict as edict -from models.experimental import attempt_download -from utils.ymir_yolov5 import YmirYolov5, convert_ymir_to_yolov5, get_weight_file from ymir_exc import dataset_reader as dr from ymir_exc import env, monitor from ymir_exc import result_writer as rw from ymir_exc.util import (YmirStage, find_free_port, get_bool, get_merged_config, get_ymir_process, write_ymir_training_result) +from models.experimental import attempt_download +from utils.ymir_yolov5 import YmirYolov5, convert_ymir_to_yolov5, get_weight_file + def start(cfg: edict) -> int: logging.info(f'merged config: {cfg}') diff --git a/det-yolov5-tmi/train.py b/det-yolov5-tmi/train.py index f84f343..6b5e8ee 100644 --- a/det-yolov5-tmi/train.py +++ b/det-yolov5-tmi/train.py @@ -39,6 +39,8 @@ sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative +from ymir_exc.util import YmirStage, get_merged_config, get_ymir_process, write_ymir_training_result + import val # for end-of-epoch mAP from models.experimental import attempt_load from models.yolo import Model @@ -57,7 +59,6 @@ from utils.metrics import fitness from utils.plots import plot_evolve, plot_labels from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, select_device, torch_distributed_zero_first -from ymir_exc.util import YmirStage, get_merged_config, get_ymir_process, write_ymir_training_result LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) diff --git a/det-yolov5-tmi/utils/ymir_yolov5.py b/det-yolov5-tmi/utils/ymir_yolov5.py index e2b4c7d..e58c81d 100644 --- a/det-yolov5-tmi/utils/ymir_yolov5.py +++ b/det-yolov5-tmi/utils/ymir_yolov5.py @@ -11,16 +11,17 @@ import torch import yaml from easydict import EasyDict as edict -from models.common import DetectMultiBackend from nptyping import NDArray, Shape, UInt8 from packaging.version import Version -from utils.augmentations import letterbox -from utils.general import check_img_size, non_max_suppression, scale_coords -from utils.torch_utils import select_device from ymir_exc import monitor from ymir_exc import result_writer as rw from ymir_exc.util import YmirStage, get_bool, get_weight_files, get_ymir_process +from models.common import DetectMultiBackend +from utils.augmentations import letterbox +from utils.general import check_img_size, non_max_suppression, scale_coords +from utils.torch_utils import select_device + BBOX = NDArray[Shape['*,4'], Any] CV_IMAGE = NDArray[Shape['*,*,3'], UInt8] From bb0b572722c95716bd6552f4ce3a2efcbb11c4c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E4=BD=B3=E6=AC=A3?= Date: Thu, 13 Oct 2022 13:48:24 +0800 Subject: [PATCH 136/204] Add files via upload --- docs/ymir-docker-develop.drawio.png | Bin 0 -> 56330 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 docs/ymir-docker-develop.drawio.png diff --git a/docs/ymir-docker-develop.drawio.png b/docs/ymir-docker-develop.drawio.png new file mode 100644 index 0000000000000000000000000000000000000000..706a95e4e3d9e5a1ac665e4d73163aa49bc002ae GIT binary patch literal 56330 zcmeFYcT`i|*DgvC5KvJB3rH1&sPq5{NDUzgp@tf&p@fKy-AB3`dy(%d4H?%4vzp%ECRI9DGThMBoyz6FePV@vcsefA7gk%gRc~NK44bnMi|0T6f6L>XorwQ5BrhoqG$3y7N_6z3_yCvsCcqCFX}~O}30wg;6oC4Nm;MT1 z5b#UG%ge*b+{qs8>MJU%Eib1iDW?FK#nDI;JzY^*7;x?3>h1*mLO40NdmdJCB;hEM zuAV?~8DJQaGJpx@Lh>X4g|ua)fxaZc5U7Hr3|J1h^Iwm4SX%ba_>6IQ9Le=Rx;uPo zCtsZN-zO%ydqNz1wY*()v`7|?3eI+DN95lpB|DLPT!Cr&Gim>JepuNzz{}~c)zOLU z>Hv&HR#XlSOtzyd&Y6TG07YE?nNM#cO>&?S21W^h`d}>m90_C-(%;sJIN%ZfG%X5O z6xDzo{sSft_%8~FifYJ+YRdm@SJ5mGZ?B*Sh8xP0P=2zGGB7tyXJcbSdwaYi-qA(T zn`EpgXNgB?V(~`KUeYpN?nFOQAVka2QwytOXlxFK;q=Y4&A`A6Xy_}F$pi!fYiOdO zh{uS^YC8LA2S7}LTmEo;mq33nu!6rLSWef#2khV);N(ZdQ}oI1?w%fQ@)R^)(@Wmd zOvBm98CW`+7`TQ4TwlW-qOI+K({tB>X}boHjJzD3-L(D9-A%R80XP!Q9AXBbTkd* z6~H=JV+t8f!XagKDIOSkQ&*@w85pe}PS#Hjj@Co_7-R8B1yeb&zde-T?(YEwZt7|S zGp9|qcf^xjWb_?8kWN@L7nBRuN5{uU*3{V$1*UjnFs8be&SYme1t<#hbJv$QawljQ zdf7WzLbP=;@-99u#sOdl2S0cqmOycnMw#lMWCEnI+Hg;tn>1Ee5m>F7IAa9f2uZS# z^)yABno^7vyirhZe*;~trIv!LfsB_vR1rpTlE%6lIs5zhA)#muPZyYs0to`vf_WPH z`}#@i0yC)%M<1?kBWWXZO;41fv!NdFmmI>L=#po!8h{&*8< zB1~3SQNayPa6I>N^5%H{D4kn;hK)_p3;`O zfiBv>pAZsW+uz~vP0Gn*u!^q!8gdp0pdE83h?gNAxbUE8>*yHE1(WWVjoBVl-7xRJfCqQ99rP}(2q;E6)Y5s}6^ zP_hx!Q%BB8fnuORax#oa7DVWFZD7C!Ivn&W8)hoaEsnZAtW_U?BnKI3H;RO+_7F894BJeme zFlJ3%X@Y^5JXs&-rb98)C!sX}n}rwfFaZQhFR(ovi!zorm$OG3E9yHCOq~J<7%c=2 zp-pguDcJjBWxSAzSOsG_2X}4Y}a$Dj54}X$5$I$pNyO-Ujx1I6c=uc@o^uQXZ^;rNA6L zJW#q$0ZuYLa2c|RhYJB`rsry6ig1BS``P0h^(Zn3ih-OD$y^cZ1*`?x0HkLiOhW^Y zCCb1}bpo_?9f2cO+g?r+D^2#0@o)No2+AGW{3$i^duSU!i<5baZ_+bxZ-32?PZ;Sb**XdqUWoJGr$?+ zy!DJ6@Gb$yK2W%>f+5*S3rx0<39yH_NZWfKb`RW_v#^Jn`4J9RpuL`q0Y%>tN0Eh? z%gB=Cz;ZhBihiyZit>JD9%im)W&|u&*VxV67y-jNX}Vbujpd0%H@Fr--dNWN>!*c= zYGW`Mumu@wDy^Vx?r(3ZjX?NQWDSfQwTwv4<}f#uvxTWS;CD1NG)3GpVH zxe*<3M2HU(@Il%nqKuZCjy71!#KQ|=ZtAW`qUZy|am0~P#$acXeV{=A($ztc=!$kV zbTgMF>Uqg~8)j%Qy$PDPWNVteZ355lhk}%7fu9z(gouU|KTTaEyWd;TT*j zH4M$n{uT4$y^q7?bSLIHIeow40N>v#hRXpo@{CvAYRTQxW0mp$h{;oltNcH*>&; zqJU1l@iKs$*VF-PdlO&?sHK@F(oF{CNJN1lrur6!a`-?!Y57310Ujq0SCn@)wgHZE3q;Eh~g^B@R+#Z@;Sq|cGg+fT(m+? zo@_k#fu7}#hS%+oRMtspS?P<^Ya@)3mDO;Z>h2x9z4DRm+tsNvqpgw3 zuTd@ioJ1BEKDIH72pN^%W0eSB7A9HqUxIv?n4Pqg0QUS-tW0kFmipm|vo?*?wjyKi z_2W-2alJrBh}jkismH)iM1V5b=rC-KBs$Eu3&)W%?Hpxu7&@wbirV%26%65t43GH2 zx#SRxm~X2vLJb!k?dLd$1tSu)38h2Iq%r{a_6++Zndm^|EygaOq0kdRL%DQ7ir;{{+6=1+EOPl>!e^Z<5KFtcvZ1mu_Ucr2^C66gI<}sLHf2pvg*WJzr zeV6}$@qQX4fd>rv<}AU5&Y=@zTDf?c?ZRu%q52SySwd%1eq~F5(Mw)3KA%od6!!K) zBRy#ACYM~x)AGZm_U&%I_(@)5PYs2`^qxe{m6CY`U-QH>Y#TvCcdApyZKFjVSSmCo?(7)woEOrE--V(cfJ?9_w7`I_Wjr= zJJY`l_Mw(?1s<}`?J6>IyY_Dfu1;%kqN?h4$<=uy{JmR^DXX*XEv<}q&xrK;uFU5i zUof6}gXv7Z_gv%sZ8vLs4n%Zrq8{q!^X_5qPx66`NSPf~^|^i!Ffm4U{sz~yQq_JU zZ{E3|%hxyK8aRHA$4ntwp}ry`aO0ZYTkTZx9QQYmo?TVbP6@-zPBR1j-EY9*M4I(h z!w?#WnSdFvzaZ4O6(0IK+Bp$Uo8V6aSGCbf(z~`6<)4_HyQQ<6P+K zTLjqUr{IXHvq{&FAEoxatAn{(HF*{8*}o!iQ5w-|H5Gy65$D_IBN83rl(ox}rgNl> zWG+mN_S85Vyt~bfHZyEGZF%{p_w-x8-Fqz_`0+diK~c5p?Cx*eQQS0--WM7CL0S@sMuA;>t>G5>kSQ*1m0=}$iXP27NuJHZ7YVKLF*m(*LP|a6 z(OGtSjFyn|D(t{UFo&>HzS=oo()Ny_;!A6^m_YCi=D2fN;m(~griJ_n`+jrC=y5Zq4bYdZSf6f4y}*ZqPJoK7=Y%Q4{Q#p4O$O{bW} zXkJ@aO6oMdR<>l7S`8IZslbfqgDEgPqXc6#?Yhd;9lMjoZw zuOqn4k*%ijPLFu+GK-88D&ZJ^3WrqEXIXtEzp6PxrYD*q^@WgVXRM&wI*r#jQH_PB z)e{uy6f`Rsnc{i-L~X2O)o!7dDytw9)qdNV4}tWSjF^Oi0!`jt5sQ@Xe4uaadX=@{ ztX-&)eYChP()%}U(f^z!WXNf!oUgdaz{8<2pY3ucrZ`LDLeH>y_3qjBs-(E>@WkN! zSk+fXf@JDQc@qnj0r};39IWIt0y(_2^gd|pmvX1!+Q8N30hLP*I>8)uJd0rC zx7PpT7XOb^|KCMC_YIH{hD&DG@u&)a4(SiAGPGYzOe3^65BYsYz!&a`=KkXhW%zh+ zGci4(;|gamq+`>vpjX$qmCn-s^dG|qEMQ2hyZcguUYzc*Skwz3R6DB7kqi6R@Cq5uh|x}w`E~F7Yj6e(-s_Sdng8iW@Es6_3C~`8c;a6}Ghi5*wYhiv zumPYS^;_I@G_9Wj(tQ6K(g4GUo|<#Ke+Q}_ar6ouGWx~Jb)|m|kVC^Oij2bFfrc}j zGNYq%;wIku{%i0)G&nEVd;A?J9h2}g!1=#xzB%=;VdT)zJlOd2{~r@V4NvSVOlb^A zJI8zK`_(CCzEYFxZE_Wk*wdX)!qeIAP04)He}A2u>jxL|M&gL-ghtxK%Z4jINa`^W zp64-vewP(o=uMx7tFT>LvOG?YnO$PJx&+K2M(`CQ)&92&;QB)I2`NClRI^L(P6uLA zV*xycF%Z+|3b^$FL06+>*c%>oowqGOblw9*^be=*^pb(-^JSC33uY>2(FZ8104xst z_S143Uh&KU-ODMqx%(BXil;wh1ECBT zw$FE!5v;-1h6z3|l=qVJgQ=o0$;DC7Zu;aYq{RmdsQ4){Jq{Cfi?e# z=x$Y>8Ke#Fv0TbJ-cMY}m?%;h3)!!-_WYDT;G*Xg$qQ5Vl3BQ*Eg5YHf-pLp9s- zTjN)2s%xfd=g+qPN@xYwdqnZH43w>$UOR zuYlB}l~($taJHj&$S}@UC4sq#qkhytNp)#o&fr?Ms@mwY%)+CvzIB4@bfxRpWSOMw zsln-sZ*j#V)2+gii*K@+I`-4nX?+)+ zAR})xr+OzxlTY-2^;sSJJ!04XZs%;_XRf$x5TIhaj#L)cGPL_vbJfy z&}?5tT&&9cxxnf8yrgtfpQ6v_^>7EzY|Ad?l=rmYh1FxC1M^eSOe6*nC%0>FZ2%`t z`ugJgTD_We))&9V`BaumDJ|yUr_Ub7ESxK%(^h{vhvnn0yjPNzWMX&y7JMw{LB)`D zQFu6gKD_C)soJqiZDI$(U*oKdU0c`K_{TyDN~G#0TTkq#rM!S#eqVyhCg#Z)&l@So z*4Ea$!L!09AQc{=nT6A5#0`s{XQXYOrzSKNWMb_!U~C)32cD0^oCR^}KLk2THc_=J|Re;>O& z_cfG1@3i_RZm*R^KvW_!K!Z(c2~6k`nHlHjPZ6-mNqh4|RXZ=!v6i@>{(W`jdD@gdkf|xG{^OgxV9~0L0@Fg%estQ^Vp|2rw1-OXDjqH2hS9R@OgHI55dlrUPy5z>wWOl7jH?Z~@Eaan`L&2qU9fhu_K8Y);!Ip6xbHgiA*7A}h}| z`=f+`Qh9pe{*n_+`cZ8{jmK}L@#tvX%D#5YR=@JP)p-*LNu)|LG}-ChEiSccB@IIl zepMpmdvQj#u;RAAGI{=quvv9j`#6o38kd{Nh|%TN+Or0oRH989#Xq-FT@4nWp=M2F zPa3hR6b=X5@o!RMQ*#C3$=aVA?r_PpK0DOdN9n#B2Ocr+YD0CHhsUwqx|nOI99CZ8G>Lv)cM*1eeNJ$Lju3gt@Z_ z{Eb`NYBcB~Na+>>W@M=@+~`(`dOmd|nQo@6R*3peIdJv)r(+6%^(+YC%|=vZ0SO}; z!PNi-^-Ce}kg%L_RxvTr#jgqaH*eo5sy!Qu(my5)8a5bv5%gJ}hU9v8MXa&qeFrP3 z4G*NRm$IaB+8B|9HwPqJrVY^m#pj{@mtVd@*@gDjILGOTC%fFTVkpqUK(-S5#%0sg zzd>cIZg73Ei5Izk-k~s!OI_z@SdNQYEZZ$MP~Zs`#JOE9jQ_^52FLU8th3EF*GjLB zmeq)zMz&Hm7A3Rm6$f=@1}%#OVRl&wuWZt4g3o<)oH*GnRqs&S`%m_!HVJrO~cy|cH{5}$Cx zgK(gemua`0e1FdAy#6>R2vzyiSpLckGj-#A|2MqeIjTM0=y%yl-3Mw_Bf0AbMJIqz zSKhw7@O=43V!AQ~Gy3s9K~Z3xn+gOhYieB!EjZlJjPuJ$AC0dA*5GTPw$qVY3;-;F z=@5bobCh=e)V;spzQUNAqS|j;o|%>UoY^}(QAkwrN8259G1QOeR*HVNn-qtFVdxae z92kcR*ecqPgY5_aNs3B?=7>O6{5vxr7Sl6nq)cSU0XBAbX`GbJy_UBBhx&$Nb;THfL`U71G+US3ksOZS_+5yqguYZ%jHVvJS5S22Q)*A|*gSwyigPz%ZeE;|cfUYLI8z|7vNOD*uRBH6J z++(6`8hxqJtrvuoRBPw`Wq6Y)go@G0_tRG<$xai;GFPW---}Pyude&r`6*SLE+ZTm z+wIM4h<`!7!HJFK-sIx%HZ`|_n_GJ`4S2mjj{ADM{HeMQ5W!(}PeQLmE^O1&9EF}l znnIqbc>PjuNk6`IgDW6>d>Pj?58P2jy;|ZH1J7Jf(Bpk%HN9}-E%p_@wa>Fb&(F`V z$2UUBQ1B#^Fe}Jb@l4ZH@%H`q!EDbiy!cHf>l6|`G_x~}U*#Iyd!KTI0mJa63Tvk`D4?WE97ix`vf6Stv4n9>szEOH5 z?CIcwk8$RQQ?s_3)1TT`9`V=I_ZKvC4WD*i8-S7q?^s_K-^;E36-!Xvje76eSeg`V z^sDOPzM@J{&x+t(2`SF)n!k$}{3VXA^2;D9W zJzctQ*v*tI@n&11ZhPvPwV7nys8#g&@Ks~$^a_p|Xk4G|O0T8gjBMtGCb6%!!CUZY z5ew&rBJ<})cQTf8Zj+Xu^^aWLBMJ2MHsvi&lWBd#xoe8r^QT6F$hEURye0kC&vy(P z9(;Dc7OXUX?<)_FrJ1f074lZ}o*IuDa3+RS`%Y={DtH`yf8;eQQ!wZx6zN^_3Zd`o zHwc-XXwy)OPi$NMVqz+zO-Tf0<&!Dx&f{{YD}gqEG@^bJe6ufVe-%$l9zd&BD^pm zos*|-`T~B`WE*Ywj`Qc&kG54mTaMqrj4xLXuzy&G4t1BDxQ;xvxx@&%kQDEdWE?g@ z=&Eknnvh}EL+%3s@(eGgQZIzUu^_m;m80^KsizzKs==_Hvf)3FQKUAxCY#1p?cPn)NqO+@ z>r4%LmG@?PO^{>3od*yn3c7lS0ytnt3bnol(d%bb4!pJKjKz6zendCXCcmfkdz<7V z|G5zq;FFD!D0%)=$_9`k9}HeT!hVqzRPC_Jd$;a{-IXJVT;7YE&c;_9XP_WkGv#c$ zsa>he8PpRT(RS#S z0B2(Qe(=WZCnXimx8qy7@xEx4=A(MKV%@*DlO2qM{P@xdQ<+YMHw6wVla}Q8tqjtb zXpS>}9qgEh&l)<^n^LrLAeA31r^kHL4(~Pm6~weVKd&)z6|YwhxVN!BM(DHvkPd_Y zSH^A-$ottG$^V@c{J-Mt9H6w%UtWw}cF#w(|6Vjd3e^9m#YDT1pShy>!53AF=U1N- z1UfuYaH*HgnEpy~5FH3t?H|?$Jm7tL(3kBmJpvL2E(Mau6H}wUn`qBj1Bk?zq_)XV zCJ||{n6z^>!BgzMwi|Z~3W@ZDp>-&b;l_(3yc2tOLZIL7{(TOQ2M=)LCly2Orb%Os zGa*vmM`Cyc0=Oj7)Xcdb7`tsbSL|E8zGlSto2=cpzWQr#qYcP@O)4Zj_pe|4{!l^n z3x8GLr{@IK?#QMU_sOWG=_CEtwZP0>K6{x4srWIyG56W#+IEN<>Fmjr(VLz9<)|5! zM(^{XQ+GZtC0n~9hAz)O`{Xy~`xD4fi^u@8qs{m0lGCRl`B?t?FrlRh>kPYo=i+j~ z)bG`cZj6|B?Pnxdiq!Rm8Uthm-qWd#N!5%zK0@;d_EZ5_8MDQAbi#Ma*7&@x=!sv^ zPvdOz+w$9wOsT9NdSxzPkTVvXn0oj2>6B4bRUieAzPK>3ePC)-aDyv!NoFg!ngAs8 zR}8B*m+cB$n&}l?%K*L~UoB*je4|Z+ErnZrxK!`4Vor69o2$vV`1lJT zaYt=a_v`eF##QYsc7*}1Lb$#*j*wJp&jC9&l$Tv$6=fs{U`-qJZWHfP#`!k0GaK_Ui0!e{jz&QU_#e<=nlTqR>5R=npo47RC2HFf4VYRk zvl%n6R-J@2W3Emuw(d0Z2BZT=Q8x!d?TC&@LOdI0OwP3kmbcF4wV8u32B^B{mY6mn zR7=>fvDJAh#8UVaX9U;O&AyrvVg|#*GZI!mI$j!^szp|t0L<549*t^@jJJvw)e(N* zA+ULzhd-n8B0@FA>D1mWi;|##-JAG}iT`Q=(zs@h9OJ`O8dPN?7VHGazZUHj>;wXH zB;`CM63y=ut6Fo0#V`D$fKa;#fhLByXrCO;h|!Nxzx5QcV}*Tob-9X5d&xs zr}XeGu87nYwLR;%EOsBo73eUS7RrB4TPE^r6FS47U*TITbF?GGM^QG!E(Od@o2=lQ z*>h$9g$gS-=ajs#If0Zht~@E!DDiG7P zd26<4I2X`?-8HwqEMO_Z6Ywl4_sm+4GT-kufBr;O@kAC>-q>{H*$DuJ3(iT_SMuG) zVg}hJ3X%FceXo@GslH7kxlO3n(|&8nm=mDD1AzG=S^s2az7o9;U=wfwLa5#Nuf?5X zpwFn6<1K=9e84O`W1Aoy1$zvY!z7Bl92*~XKb@fUTY%NyGKqDOM8;Ri1Zx~D+1vL` zPia6Cee21RaekGyi|a@eCV(eG9hl$eG%^&-qrST_iB=%i1XUip89lpSHGf-Ubva?e zr1Uc5t=a9?6u(tbu6AIJopwIR8GlevXb;Y@n#Tb-(0sfTU&)4#jL}JyjB%BrP$PlD zF>$|=pwYF0TpducrDiSE#@j&K=D+4#F%3nn2q=ivtKGhMC3Ubb5!s)V#+0NP< zq_VT>!**{|6jg711`7JJBG9Dr4;@E30jlEV32XySyNBQJXhw216hc?beZ_xW;NDBR zT+rniChq)?LgJ7`yv61GK{LfzjUq~sooEmP)(Qt5XlkuJo5$KR>tgfc@WfTi+cpeT z&hmFIYT``xtRRi((|P2>L;c7<*zHMYWY1EMejsJ-()MEd^Vg%Vq^45CLfum~UtD_n zM(7Tou3o6WNV>lz`!?#6S;yF&`y7TW0APF?*iHWh7<)%1ayQuNE!5V>>wD~7RQkR7 zyQ30&QEBYQ%{fkiE)lNH-!q=B6XiCmQ;4t_6bM4DfdbX7PX6Rs9m9VqQUcJ4JrUQi@xr;sz zR1^E3)mSn9(GyLCnyI{r{ST7yIK3{m-L;m5w)pR{hG?}Ynxlln>&pOhw+rm}zX_cr z^&P4-ZfNC#U&C+ft4lxS8}B_E&dJP)+j~@8zlTweiBC9Hcnm4ybLJl(8BwYJwWy^I zrK~!D%1aL^v|)J9swD=MQ29z#7T>n*`Z6a$IQS%_b$6Iff^IX@t zD=+3o`Bpa+)#zyFuAGl&2Hs{Y!@qCyEOKig|I>>OhwcuxZ#>v#)K?ViGe59n4uc%J z8tZD#bQMAU<2x~wdZ?gyJ-%%EYjBvOeO%G6#e8E@(UIxQP{ryFQs;BX z(|~_?qK5!26a~GC|IL80|801TiTehe^RmAlujF_&caY6FTaRFYyLI?JX05kUs^l0tM}>ZrT(1 z1x0_K__+9o`MCrH6}lZT8)KkkE?buBA;%>6kxloZbi~K|yv}n+y|31n6|;VjR@V`f z%{q11y815H=IpDM$;#2#?6AEVyRH)_sP@m@8Dx%8Iiv2m9QpJ7YE0#xL;4byubsXF{^->VW@DUQ{N-M4HbDmGz$vzHKNmEEJ-y^P3nDGUsmMv*7z! z=6UR61=q&`jL>KF$S+TSPO|+OoIF$D+BEzo^YN^^lGWl;x#p&(t#2Y23%o}^7Vj}) zWGU=|qCVdZ84nB7FxQRdI3Y+wdJGVAIskb60D4~?9SgFY8=xU3fV{xyq=Pn zDDpODj!Fty;B}o%BwYsa=;yswGJVV5P(16+E#tC?R>!Z>WDAQPXAe3&d3Y@sqj7g7 zhwtCd~z%cGd}OQI`@8 zo(hz?6g-)#bg&Xn_3)?1cq}q_{2^N{3g2`+x0<@aF8QRoT4{T$B}`wLGQVma$m)Ld zd#a+rthCbV57>k2;}NVaKY*>qQRF@}7YN6sQH75ahhEx5z?PoB=ZZpd-O|XyKEShj zU4PMP7YOCMp0?1mFyXsv;70lQh?P2gfs4;$|Fu1+u6p}`v-R`@B$)jqMnhc(`7R6l zXXRRR`A*-Ik$q8Jr(Pv||IY?azOvX7wr>nK(pR1X1HC6GMJl9}WXGv0(Ibtb zX_IJZwOOgjC^v4nR7nd$8M<`}72p5Jdb`8ZE}W}D=jnyR?fFEV`xmX#riR>_2HjZ^ zC2fuTlO?(b>Bq>ijLn{yFpNV7UXm#DAEzN-N3Iz9S0R>l`^%5c(21bR(~OtDt=zTt z!H9Iug zJRtL0H~qm#*^YR3J$Z(MYJc z|B4^~?7wH2C;9nNy{}ziu-nI8ewCE@m@M(xEao(0HXTUDOZYZ$B++plMMn?^s#_C5 z#`yq9iSxsCND4M0RcEeNL(kWzEF0isaNxS>5F3p^lgrxFjpn9ZEjWevf}M{hF=g+UqnrkgL=3)i?i5^r=QA zbr>O9*XR1C4)z)joa5JqQW^f^tlHrP{LmHvlm20g!v&B%ubktQY}Nrlu&l}dJI{X{ zj@sF$0N0xd_^v-LAAnv|c_W3K05Ko=JO^+>3TW`+nf|AWE1fbdx;mTnDL>erTi)e| z^tgq4o6#ih3&gTK!`0u-Zi-T!KA78yYvG6GMS0hXE) z>3E-;S~?khC-FBR>08R#i=A5o*O87Lzml4`!cr&2Xv_sX!{vt!fY4~yH7?`kq1UO+ zb&>IBdKS(V9kaNY5x7gwjoQkrzT8{qe)johJNs&LwPFF~AR_wcT`k)f#69CvXN%Pw zdiU=>2b6;)*AdsBEX5;Q-);GHIs<}1T|#eJUd+oJ-|@~pPNQ?hVee3RB7&~pi%LXh zf32_Wug`DA=-e&N8%Nt25>D{RWH58^rcajG?C6_7iI8-%wla?$^sZZxSW#&+DT;PP z?a_PUslO3k3gF~O1q$0=%_3@HG5p*;n}gdTNbE}Q6%*5n@N&T0;ccl(;7}8!TQ}n`r(2OeDAA&Y!S&TR~{0P51vFc zp-pH2S9$vSo5iOUd+Vw0rnhd=BUvu0aA;>K2Zk}ojIiu7<5d0yURgE44zUPJPWM_p|p;V~#-1xrpeD=sxl5!ttKh zz0EOrkRvly9OOD+<=&|;q~8G-5&hNUiuJ*%v#I6d|0+U@1a_0Fgma*O>lhE_C(mA3`>rx0E8~s*C%;*)GP0<30GbTrwIADnWY@SMqq~S@ ze`>BbU-Z62YK3^zFpC7^?Q3ACj^*p)+q;G73U`aA`_Oye_6AMBf~_s=ckN6#^h1|! zmIdonZDW&zDmj>iEAA8-JC&!})h0G7AXRK*PVtgEJk8fvAnA1PIA-EhH^p5+-`Ihq zE6?@SC9KrOV&(4SUYwT=Wc~F3=RQv9OV2$94_7HMHV%{3YBc(dY|L1D4;4xhzHrz4mMB zIuhm8pu`0c(z(N>c#?Ga>Zwz)%hLYP;Tr@e!{N{IW7V73WY*J|-O1@A8ZQGjD~G`~ zfw3|DlcRG#t8T_vOzrEXJg`@=YVVF^r|l9u96I(M`%F<`9QtZ5iBX9RL&19z5+SoK zzCY^2?7n$ft;t@V_pzlh(>v?|TDGNMqX0+^`#TrJE;NyFx$8W zxJH8s9!wh?vwKZ9y~gVtUGlXGE!MYa72>JXCqj+xsGEdEhd2RbYqoA~lS-(spEl;m zvrBrJ4`mawVB)|FSTJ&H*zQk#J=&h`+uM`J+%9POQagfTJEX1`>*^su`@993$mqVG z5$qUI0s!R7kvA|sRdYX!Y%0>#HF{2(C<8+f^*IjWj9mPRT3V6z(~NoDvwSNVkO4o8 z=ZY3tY)CVI8-g0tD-x*Cne}K($y}fB$>xj^z2yetOjjMo(>&4NBTD^D#2WxP)?SZw=U2FFfU0r~zMz~gLzLK71lX4CIfZxy5# z9usUddAr%s0`T3W|3~=lxw)|5X>4`*K^f~Lx~v9qhYR$Hyp2*nu`LCXgo2(m#SE3? zN>$9|W@hX>c;ciP6oC0v5*C^g>p?!^x95b|5rnHH*f#SIE0YhES7VmD9T%sQajLNy z9N2b);sIN`5=oFp$0XA4M{mG@l$qFvSSj8tqH(fSLKWX$RM4KGV$Z<)Pjf3fRtgQ7 z5T|7N7%9%N@=8MeK^}mtv-BSqK}^gzEsxD;RYcJq1_%Pc#4GWY-Yr}E&;9nw?vu(n zwdJ;bu3H;sjQ|v4l9t*w`!IV_tyyXD`nA3g`ke&s%^kbJJVIuuhWSuv^!A(T57_9K zNfw$6ZSiK85EpuWs3M2Q zx$EVeEqX-ksXs1)7|va&`XROGV~fd8Y3FckUs&-T4E<)rU2HD1S7BnM0#6riLL;{K z)So2}mRkksBU!(vV!_9dUo^qbbvlF^Jy!Gi6q!cOpiUhuwzVjic+yT=9Yn=K>!I?* zuA^`4m&Tl=d#VU$L$-}nLMrzK4);dBDF2?5sR%qS#$#X^$4T>|X`V1|KlGK}HAq9r z%N}kv@FBPkWB_}vtDvz5>7JX_$VH^(}JUqNv z{>6HLkRu8DXnmkFb@Dx>ye+n+p60o~_1tFU{o`8p+a52!FB(0u$=5f?qZXth^VQ4Oz0~TAWV_m zSRLgMS++TMeY+nL^}9n?&m&b^Kc-Pm4c5NQ1WH&L_WOjs{eID=e-08jl(b@PNN!$4 zl{72gdk5CrFwOH(HtJhfTkC8HjY~*7a)GQVDJc5w(Mj9B@Wg8wkfMofFOv^I-QcO>G@QWLh0HGMQs`|<21N`c@=1f3ETkY5^hMhW!E zL71Hi-22#v`;dPNIRQ7^m6RlJ*m&0 z;_5n7x8KDQ>Hv#Kc_r~lg&u=pS9SP!tQt_WOw}yZ8=oCaI>c0Teo{xSebYU$Z~4*@ zK62*z-m2-|CC9y|#c`N&x|xdA>AY7YY}IP4rzoDT&A{7lP<1v}u%hmFS=n@#lPWaX zAhB+9`ECd5MRTZmiDmW)l~MHZ@iA;wBX)aSwX`h7V7PM*SuxNXK)ly~@x#Wff6s)1 zZB>=!#?w%-_-_AA-~RJS^5ECc9{LJ;-$0+U;|@=hHaXs)cR27}Y;qny0C`7x4%R{m zedT)G*Zv|b8EVJ#Y|~@f6%*jB6OjRdr{5RX~8ZL_6f$^};|Fb$I~*=e#vy@|*^ zvb`m6;rt>ys9zBlz4CP7wN1;Z_&b$ps#h|heT(i3dwwgN@$B?`dyY1WA+^oQ-t{GR zft6l^nw?y6-BXH+$f-bFH|+r_T@hU~{$$Ml3$SJy1qzu==|JIJ?729|C286>o-P}C z)zDpk9z|l>cEGz^tkWBsVF$Y_EqUWjDFTA~|F@L_-;pZz)Dsy0M>=6sXbT3bo}X9F z?o??Q7FzRH1V#K*o!Hs#q(25?Pzx1|%$DVRPMTh*{cgDdJ-C49=M)r8Tj)7VndmU@ zG8_Wj1>zG|+XO;ZTTSfjO4zhP2?=ghcZM&SiZoua98vUEIF0nayA2Uooe~Dwu1}M1 z@g4H)CsC*a${o&qfU}^(?6xFFB^{yB$mb?#iAhNXV?DuRw8;SWKyXBgU2!y)GVbw1g5V_`Fz_IX|~gcA39rraaxejTUE z@PN^^*y4d|!&ljc34+%2*TYD>ENxb9Ib%LI*Pd@OSYNZWyjiH`Y` ziBdMNR+gA;c`O;niOe?-pLRsod#dC#(8?v&0_@V;8S(PeAB)$D>oCHZYv)?x6@=4}dCfa52;KasBb+7Quig|J0-G3Hv?u zjj}h129rGYw$9p)Y^!EEHjx+a?p*f*=;2#H;{Wimg0hkk;^0xlm)z0C@fJYAw;qWH zByjiX%AUU2V7pE+06Q}ZHgY0c<33LWk=^DW=aUYl=Bli!tL(d&h=@?3r&j#Y-EGGk z>@0ojW}bh}+pR1Uqy2>J0W6Iz{eOON5c-^+X2Ae_Pv!_f7=SLfA==^&mx@4RmDc;y z*8vtjI;!a(p}NQ(k#EnIKOk%&qlAlg)oxp}?Lu`8SrwWsMO@qa-L{$jcJpAkhS_ZKFjEu&Xc@15uJeb}0Zuii*+f&~tkM8C3~ z`@Pb_06<@=!vFF0!yP-|D~dDfF>>^Pt4!-#)7)N(5xtCgC&h;m;c0u?-hwoE$Ng}4 znb0!*Fsf^EK^cH6EoJLTXQrA|`fp!FL1%W-gK;Uq41$g!m+~?*TvCZUr448HQW8W= zi{Da?d}+M7ZzZ0V5T59=%|o@Pr%i5&@4ne!1c{~j#ys{u#=V)m%Q((zK=ek0VowtebH+rQnO(3J-Sf{LId^PS?X6O+S0jz*9dwfJxwc! zIqwBOzjr`!p|J&Zn*cn&?j2>hDHTxC81| zN!zZK1=Ef$I^w{`*Z5KfOntY3-Q(f$b1J2!fWK@>O9VdK&F|+#T~8Xb!p6pvp}g@limg%E+V``Znv$Ic>D_K+0m=Rmq&E??K3e5LR9|l= z7RvUh$IM;N0s=RC1=!)TtD2jS4?Y#bUiv@Sd(W__wk~Z~K%xXC2#DlZWRZ+ykRn5o zB!dJ|l$>zuXrb;tNJVQH8X9h6q_-3T`_ijx z5Y)TX#1ZMh)Xf@27mvX&w#zbTOB#puddiPI{$+Bh0ldqeyqY2sXn-PFTjL0g z9|)RFxbfYZhk9M=iZ>Y9h%6p09DBM)Y+*Ybc42;8SpY&Zxud=!^&=c1TUcDV_Qws) zL%W!cp58)ByD$atTFIh764sWc^ge-{1me}BC$9zHa5%m#FPdeJav0Il86eRykQA7* zldhUhH~I{g2Pr>dp}EVsF5f(=QB;VRZ*Pxm)5sUA^YqL4pT4n6T&LdqoKF5p;`$@0Kd5S;Q&Y)bZx?bPm3D;@tb#NGDADg|m^RxnF zrB(gpSv{-9#-gWWn-52ok|f6VDbri;?B4*h8Ve1h$i;rhjq1+>#MMJ4LV8~nB^?%V z@LzVRfaL!FDOaAcROd7YvjU2ASgJnVEqs4sN>o8sIs$94R-?vXxuY7RaN4XY4<6tp zhfa-^p9H*RP_uJcD#nr77jYrRIzM}8A=IP2Zs)T&QbLNbo0n}ohrh5mWBo(C2u~6f zM!fPSrv=G~N#I=jRd0|GhSn^^=>t>GLrpC$i}+b|8$23D6X?L6`W@Z}4N+mO!zOgfIdciITyq!ou2(gFqaY6509I(6AUz~ODr^RdBPZ+h&R!~} zp&8`rUL)Kj!_IqFP7#wAF+DR7@6Oqc>F_a!Gj4gCbMdy9uO|e(nFZ!w~CJHPIk)eI8#5L?Pg1lz`fc zIbBF7sCTgB7on@UY`?33`!r1vbwe&s*a&ZN|i^OrAL!so-466(Otwrbjf#ok{xabrT&;} zw2Y+q($tqE5)3Vk+BS^8)fz~1;jiTtR16&%9l-i+$qClUe;5*x@c~%9dSlyT!SNd0 zKOd7{CdT7R44??w5z(R30Y&{q9xyZhWqh0+OMDqj+$iUsrIz{)oF36Ip{qN+&YFq(rwW_zv&)flY8=ZqxJ)!SCPyfA@fZ z+gq*0^}5dYhL0Ce&X9b<_=dy1lAvE!6y~A!SpmIu|D9QuA$O;~|I1*c(bm+bS<^o; zIF5*h(Q}ChvA?RB?2VR-jm{4H5jyq^JlK)aUWuq6hB!`U9bm%%rWEqu(*}}rvu%wB zR_Qs{VwvK*APHsxmWaRDI87ydNU7aS4PlJ~I3EA|z5K<~X$0ULAZ1!onC}F~4Zwx} z0m(VnLqmRG3R@%_CXf{U?+5;u52-FcyzFqK5*bzg@}BZlhv}twlqKAv9ktXa1$Cr9 z3qFrRM6+x?{@EHF)gUFo=<<9!C(?>4yfIJ^th{cI0gz;Lg%JplU=CYFO=Z@kWUC}v1v*G|{EW(x=9IKZ<8f4~_oCE6R@Xe8HKBxk#id60d?Z(-gjrZOPE?+!|p&#-uyHgg_p26`YdKThlRZT%V% z4>sPBFIKYZh$P8rxNA5kmb?dv z#v()3jcAO))&T%Sdg}Yn&=K$ClOYH@TU=IL(sP=*V4MJd_Otc?k3Y5#6bEWcnB&MC zUf=nkWRT>-QoJ!jY;C-z31xu1o-63(pE;>x+CG|Q0?U;rU5gwzPRHdSYr*Ve`R*3f zRDXO^lWX&d%m5T!6ju)SVVc)@UYX}sswg&vHoLAQQn21B=pVRt*LQG7{-?cEnDJT7 z-L>ZamW|!lZRwLOCM-Tb`pB=1W7<@$n2~Nnb223YsT)^a+IQm`rkXCw8*tNjm0?m=->VA*F)eVj22K@I~y^M%rk0W!dYioGs>~mZ$N? zX9o_0t=slZv8jX>QMot~TW`vJ^64N88l77B3P72?^k`^>rAE}qXXNa$;WSN4LK%2hjY>XYb_IPmJS~&5OD((GT{AzHu!i zE3y!7%w8h!f`~;W1pvM*H=jFxw;{#2{F_}9$y3)#t{H}baZHFE| zePD0#;B#$z-z{DS#W)E;zz(RweP@A`aaaE98-$=Ej7;J9OGlUZxu3JX&tN>eTc~Uw z<*fs-&x3A;&9~Wkf@trLzf1rIp|n5iWt1%wela(JxUc`uxHuEgxhx-WCBq+27-eHz zM4(+P!&xY=V;l;{=)|937n#s$<{x0!(&4VaN8W z>lg3J{O-9ND)krL@6Les!961kVKBOP=k#Yro7L=j4W?J?){ZqOapg4$iEEf8@vgcY zP^mJIQ%I`-n|vn|m19!X#!6dJpPE@@$Xk8!Z!#%7ZxP$^>aw!vT0nM5;IoZ30n||S zzA5fi>=e+B%8nXgGy1WgyF+PduMEUDh!x1IlZm~k$G5yC6tLi5BjG^alpCjfZ0g-I zaxsjSICBNIdMw=TXFZk0@UQUMHwdUF$kge4c61!rmhlAA8<-?UBN@HruH+qlDwf@G-37>-RhZp(vdu3Qx5R=6;9KX7fC zz$;bFLoB4+IQ$cISbAa`cyU(960e-MK+J^>z!b;8WCh zG6{vxz~CNXS0RImdhvdp6t;g@Wpl!fE%u2faI&`oZ1o~@y8&P#)GAcQgADd5%52&J zmV7J2K%#Rrw%lqULw3JMU~xboYW2 zF(!F-DLMxjE$a54;a`7z<}&KFd(8x8XvT(|Ut5a59~G5f|C}CTb1NDA3%rYa&x|~5 zBJX6bdvv0MjS?)8&-&rP!9gJJ)_p2AItaO95YBBjIBV3)Ei_NcV5ov0d~S-B9UEaJ z1c{oY9SFlu@rk<*AvM12LKJnYEbqKT&J5uF=K6^MrFAsOgNp-3?r$d*00AL1`SK0l za(5$0dL&o}-8p3SOD6X8j_%}<09OTr)BcUdsLpd7a=s15`qKo;P~~I`0G}D+z{teh zg8Q1EI`1DEUuEp-e-s6oF_XxZVmi?%I_yec-3woN5nr&yTZU(FEn%g5gA^3&y`zZ+ z0p_^Z$?n{P1TIJ82aHj+z=ysmNN~~_2s_ynkG{7mUvV_nBl5#0U1`E0uJS2ntU;ii z<4(znD_^5l4&GUP8)luSlBMM3D)0xtiaD>jC0Yl8uW}0{zlQtt&QrjgZtPv8ce`G# z)E{_bm(+#xJZ)myXt{lH=Q;+_#U_(@HEoXYZR#1!Z@uB$YbYR2p6x4paNtMuq~{gL zknV~f|H;);M{+^%zIaV;oI0-&3GA0Ko^K}$y@=ycu-S~|X!JrW^>P8Csnuc4B|}~i zTxf3Oh-?@9L)X*YEGpSc5q*&K#satAc3s(3^^^X$Hsd$an`BBi7CF=Z6l?4bHO9~J z^zy$@%?aKze%Bvl`)1L;>*KLWg%f5epK#edQQ<4~5t13T!Wo_6Vrh{(-e;HPQ8GU@ zF_$;a=M+5-SC$AjK7QMB3C-C1!5?&8=xw|+Qf*MN)|G+;2$HPyzc9J#>}hpC$sCik ztMz`KWIzXT*-efTNgC{NnVODPp%np`Hn4L}PC$7CObkQ=T!{98bwJ&d)@W17LJEE0 z{amkqDlYH;{COfX05B)=s~@twzOv3(cKd1YRV$EhxoB`UrmZeJ0W1jG^3~e+T8o0( zciUygMh{7iJZSHI-hRzBLsWPCWvg`forQEpnWN}65rMFwD8nHwYC6@44)iJqdr+!(3|5RM9!DFlc}ITV_r>pD$u012D^k5HOa6f&-<#xd!H$lN zXcw&KR3|psslBUKt=cDze9Pj6!$f^(Th@5KsFN)4TCWz$%rlH^wA3ndaroR@!LT?{ z;`AmyOI8$pY|Er5_8)z-Ee-pCnAx8}L+h(Tp->ck#x5>Y$jYYc0MU~aIDFMVmkmU4 zzSbN1PAc(^R^M)PEf2DBW?iM+H=a&kcR>TGvG(0p6=|EidP|hjM)1Lduf?Ic0m&I` zi4F+4M%Ok1#D`vB^Ok4#ly96P8kpun*vWJk)vsY1mBbZw=EM{%P);+iUGD1W+?;JE zm+l0`YbNJDMdRpfRZYj%7e;Qf?Sl(hKnc)5u&YIZgzb$@-Ka;R3YsM)J99hLV>qLt zun`}>ypkaU+AdXgPeE$EbG56p)5ZcXlJInP52ssv4?ev=HSF^@^%N3lwytR-SKiJn zGlt5O%6AYWRY)Kk#Z3oEj{j-k;IJA94NKF#-N%EB%pN@9H^$rUcSzJ7lqTD{&qlZJ6LuHm(se z9-rbo&9PhhHdB>oWq+_tZ>B~PJ-o?PAmn=VL3?a^Jn6g7nr$c}i}zB*y2CS2-cU#$ z*>F^hYT2YJZ`@%SMh5U1+^Xeex8PB+&qEb_Xj%#}hrNld3WOB-jIE zurBd!3gnZ(2GON;;e!CUA>%-bx<5pZ+MDQbsA$Qtm|bz-A@gXJSpgvzHh+G*KJ|IP zqNC$+%adHY!RNIyI@JT1-gD1wh#ztxy)mHrwb!;<3-SxVlKe;B@AD>A-TCa;9T2Lw zY#D?a2!Rjj`YqW0Z_K{^?+5-%3C-*uv@IoSETqclmT^w6vuAHV>iOTC<^RufmXg@W zoaM`BI2#fuY{Xs8h#id-63 z`lpy|{f+vsZy;XLh0DH01@riAx%(MjwS@Oe!}&6|ocZL_vEx@7)gO{j{r- zG*;Bz`Dj(9&Sdw+g#C8yFf9-3&c&rtGSt0y~!6xghh`^rDPv5M8VY6 zB^MSJhRZAz?})e*vNm13A;16dqI;p%Wh2qew=0gr{cQts^_lfCTP$^!Q1~#1i3d4{ z-%;m{(&jXjFY2>WX3SKyx=cRlw7Pu!FRS($P>#R2p3Ao}fAfxm)$p;!FrdE>; z9Lkge%HMdh9eAhpMfJZi-J-gnZ3IFAO@Rxe{Hwy$-gP&o-{5gAJ5fCZmBj1YYG;lU z(KZn=vmuzi+18m6`oeZ>eFM=+uYHwoY#Ha$X*>w;Oo&}|ZHy#F(&nc3NoU;N#vc@r zX}u5nU^^oweE899IZrNC+eI7j4xW6_S$+6h3&0zAQSLF47&DRBZ8u31*p|U{#LbRN z_27{|bZxYV{hwsi@UCcPOQM%?b(sRnYYe$QnJaj1<Cv+C`7URKD&jR6(vFYjHT0!}T54767|3@u2NANHZh{mD@v^kF3}^#B7& z8E1_Q;*t9O&9A1{9_>yMGU9R_e3X3-9;^~Zys9q|qCP-w%#*-*1cruO{?>;?Uhl`9 z0Qc}7&`N0+3Q8JaihRj8Da;wrg&<#io>)%ap<&ctpwlJ*k7tv^e}h3F>eLqIa@@X2 zB^vd$iwBpfPyFhEp+g6w(^^sWV0oWd67!uK=naPmhUMK&s_&wjnwkhg>pk}> z90%BkDWdi0?hG&pUxew$;9p0(q^GTc60l43bT?rA5}N{+TxaN(w*-m>F@lXUakBeZ z;gcV~xrnl6#*+(~Uazk_EJBp4e}8oGaJ0^IaHTUcCV0~ms{G+U0Og4!g+A|=$M$%W zz0rYB_bv7Lgc1J%vV`$`QJG=&8tkRB1{oNtz!m+jBSLA98=2}-GmwSKgNeh)YcTx% z{?miC&}#0+omPs~CViV4#u9bnDipAbE4`>=us#EjH^n8rd#iA|NX&1~?#2k8e_qTY zsE*&rS+*=dLEZ^9E~H=HFGgue$-EN{e4N%WE#o7;e!XE(jc2?=bsf21xu;S7fe;=Q zXa&g!-)MxWXh8>5oP7?CCPn^H(`0-E>X!fJB%>-ECgRPL0%UQkcD;S)r>yEWLqUJf zeCuo_Rm6AtA=AK>BY?zVBbmYVnZyARng-E(@naxnVw67tKi94T^dP#=Od$~>T z%nhBtd0j&(mu64zo=+q4aUJ<&)gC@5BW*K0e+p_B-wG|46d{Z`}1+>;^_4^-a)XVzivihoh?iUBKB z?ET*rr1i=SfJU`E|I{Tp{f#+s@HWn{3l0fG*SJ)sT*Lw!Pftxj>Njg>96HIL{SoElh$KINU8m-So zBvbtKc;3n@)MNe6eAe*ClXQU^+KGsILTC7)Y|;(RTACLO(&DzV;#w2>(Vm65oQxf>?M9Y#2KFjB3Hi{ zS=Dv%>v>N~#GWNAMn_z|r#eLTfoP4#dk-*V+G%Tw!l)1f8>DnpEc%eM;XEG4?U-@n zo*H*<`;#ZGn~7yJKqHeqn6b7JR&d7HiH+dKRlega@;1SN09hVbT^s-DBh$#rs?2_5 zH-oEz%EdRt)X(RQsaBw&zvN)FHMaGBfwvrybgJ9nbv!TAP#cPpRaO=igio31SeG$j z`X8z4*Vy#^4)i?^hIDHR4LsVa3GZK9$@wU3OYO2p`QN9i_q{KS_;E&y8yN7E2p(ky zoy7`z@Nq$stL84@vK-bJ z!c3Ds^s&$O6$v~ZsAca0U7Gk;s-@f(WY+(c z1`0OFX}^H_+(3Js`gs){vSjQQ`lPWf&CUUqQAh*>HKjCOb@z_E$EUV=RUQg-_Hpzg zu<%U5yZ1WVUej8vb?R1F4bYt~CfV+qN)?_a9|!v*Ifg7|Q47~D4h>eIR9^L%`zp0V zOeEvW4_ALd|A=7bLh`FlE8k^h>G!cz@+H^)b><%#?EA+?su_a!=dzfSEOL!ER$|9) zU}va%t(*ww6G)hGzG0VKsbRAbf+U;KRjqZ2 zB1un-t2nP{nHkr`XQwKunGo&Lu~)H^FBg1gJ~7Y}Se7Ic%h&G1npDFvuegjGzX(JF zu9KuDCLN`d)8s<7SNyO;zRyJMiyF_fn z@$|F?|E$dfupBqLJO##pbfYo(%Tdmv)i$X%{+=z$@+SS$vs3)@27< zE;g)#7^>hzJeo1gm?2xAnxTzN=?_jf{$!4#s|x_gsKMgNGA;f?oaLSWm=d_Y z*>P!GcJmo3%mE7$$o>BL9kcwVDLZ_r#Z(*$7*Az8Ir_G~@xNy+DE#m%+yD5C6x2Kd zoQv>)5uVP?$txRLn??2H71L0(wWD`eX8kRMJ~z9$=T)n%jZ|RuCY;p8 z2eEP`KZrQ#rK|%BuZ6cn1RWtm5UcKC_Cm1w(dvbc^4##}@<#P#+;YGcL6oDxKGs0xMTzoD?vppw#I7|zo>c6rmN1+SR;$)gYBKA6 z5Y31-;vB(^+9?LjpT@I6sRpjkkR`vDy}GYgy3rl}L=PnrjC3RQ*JpSj!o#+n)p6Kk#c-2tTLZ1i!RJ^- zyc5yO=fJtH>5GcvwYQD5cB_5n+Kh=5i|Hz%10oToSWN{X~D z)~Z&zSUp4rbCN{QspJxzrV-**Nn76{OS>R7Hsw%>$S*QuvMK{3@tz28{U0)|V0&|T zMq3DiX}_2ci`w5O(Gl0zP$_@|h02S#VK2yF$$?;|zG(o(KT6Y`@bpPxB9Rw^)Q@hF zfp{I5q`%`zM-6*4eiXuYQ@Z2~h(sc<;mR5A_Z|0Uf>v3KG@;`90P!D{E)`4{9s<>5 zneMho{qF8h`451Rkiit1_YP;)2FP)`Iv8%2a`lJtsO1B(OyGez_T{q$WgHfcn!#x zrjY=Qd~qcTY-(4|JcbDn^a7}dDZWpqCyE>}=j1ceJ@EyX)U{C{k&j$p>U9OE)qy7g zq#ruYR>HeUDb3|^A_4V$w1490S8BfW!V}Oh*Yv>Hm`Kr40+ASW-XAI!sISrq6iQa0 z4FxMCHeOK0cI@FrFV{D01am?sAl<>gUMyeiz+$KtcJwwQ4HCH>Vfzalq4~>-P@8g= zB*0#^>2aYzi}QSW;e%DchkRiQYhsUFz&O-?z74gXoX(UJ-REdOEhYhz9t8pm{^40! zc`F9=7I$H>g7HG2sp1~`s8(F`uR^>ji}=MJrv# zDmPLF?uy94Cs-vEct2r)DMrRD+tQSghVjP0V+_a_R=?3X1Q5gC0cg9w?@GWIl9jd| z1;rA;-I5PNmVpP}<>@}cM*4ceZOVAhXAPvfkJj!;bRwH2mf`^6XXaz@A(qc>Yoo%- zZsLI-VP(~$2ev|^wMgA?XzM(0B_aHAzKO#H3sWyf!8N_t(Lh6M@w2e z>_C5FB{*B?j7JASUeqB`4}iqAZh}XHM8=vQ*>$CZA_TFFa~C14z&mxFty~M)9O!vL zMyfQ5N^;9@%3JeO!P2{ml3Yj4gPCN+zs1MzSb!Qil>Q#>Us_|(=7mro;_-vu(&EQR z+XYj2Gxu+4aWm-PqA>L!Z08rQ2pkOw1wZ=2%ju?gUb%8zvEK){N zEg;r^9|pRQ7fBGJ-NIr@Mq@ztw-UsvU|leY%-7G<9xz&Fdw-jRWjE|0+i=l0sN$>J zBvnR@uVLMf!GXMqQb%{&!`i{&BR_9lt*snvuBpNMmXQ+38*=$ zztYk3+2Tpm6NnXdT8Dp0oC*{3TITL4NR(rYR?M@mE>@U%-{cGIc&=}r`W8=QO}cNM zELA+^ovd6SDCy6=mpOGEyFH}@9(Akq0V^OnLU(dSvd4hLF)1~{kC-ZVv&1@mM{B`| z;M}IuLcq2EM;aR$_A~DXP~B2DbnnBJCr*PM%mZgz4QaQy_bFkcCB~}NQx31AY^Q6T zoqC0q)RdL+CaGZGIcM6?*af_NgT2Gg_&KU8?`5E%RJlIaLKO~PF zOLIRbKk$iAS{dhKbfwD*KltxBmR=^?>ziCUHXD>>@2pta+2DCBh(mXPn zC3l7ond0}Z4K{sWQK9J%J=&ZGm4q|g`=E)2cH`dtr-KL=RoI zLX7L)>5C|F{sEW=Xw+BAczPh|5^O^*48uDn-T$Pf{ItSrJcA|(Qx5}|oHuKP=YxZf zPaMZ1g)veBjYh}%>ex5o!+T%Lx$^y^si__nENKP;$x!{xs^jqHsFUl*-Uo4?#tn64Yx3CxSOdrnFyxVsDS@$$-iemMI69h?WWyFh0s zDk{>{*3PEm#w4J6qgOrGBzzkwhj_uTCn!ME(j)W@NgkrA=w87-QBC&u$8@HY+0T{f zuLu&WY`&GqJSwh|6>Cw^XyJQZ=A#iMjuL3z!+0Aj0Y~wV<~>XXABu2U&s*564WDOQ z@L)suYZGZ$_*D~yLbZoWG$SeM>%Gs8)Nip(H-?TPJ|2uV*EC^4PNj5oY1oDYryi$f z-=l}%QUDU5*1&B_9wlgGV3jc96S$KVz&qK#k59vB-`FV=W_x#Ooc`9d))h}Iz<$0j zCAC1_H#i=2fHZs8cj(kFj{sB<9&peOvGc>f76H`cjF241@oak_HhL92V~iS6DE1;V zXCNoD`zcHLbZKWV17ducbx10s(drR@)|*AGvHU!NAmV}KoNwlO$|kt*sN+2=6(9)w z8c9S+^O6q234aMycuZ|Uj}Dw{te$G1)nyO%x1boANaCa~!@pYdoSgJ1V5P2xl9b8| zAg(Jv{eZ`RI4%)JW#N#rSvoNC27&dcN7sNz;1`65ac61ZHVl+@9Qm}GZaf|$m;aI& z(=68c5Jy6RU-K{>AIwukWylHwmR|JZ`|lFrbQUfm<@yXb(H+n*8Ny@cl7b(#=F>ej zGeQDol1Qv{#Bc<`QGW)$Upe@G&x>$ZCF?w+QZ@&2sLhX5vuRtVHoj_ziZ>kQx1TWb zaAe5YJ+4f8F_y{Wkq9YgdwSG6t^Oo)eWHPFVOliQX#v_vPav8hJiL|W6>dZ3EPefS zZew$RR(7y7h5rV8+QW8o%Jflm)Fyvj9J5G>xo4TRd)_=!0|;9Yc;Ai&wRhBD0{0?g z-f_tzjMn}2Mfdxy7@P@@jhqEzeRQb&`uoL2%Q=qjI+qPtXxkg|=ph!INu=hYwGR6IU>4r$W+=W~pB z@Uowk;)4asNqPI5PpfL>1}Vjc70L@b^SpL(g^$iRSDhhSiEBP8ct@lC-L)92{iTkB z7CH1y4M$1k&(Ny9buSF`D!hsulfY^!N&|?1)?zO=-R!Nk>^2{@4Lr#&c8X{FRX*6m z&vY8giqAP)9o#ZjAU$em&-b~YVidv_9J=8Eyu9n!32YSbC=94>l^dsbk9*i#@m4`KqWin4Uu2v6MPU_}=l9cv&hZTocj3 zKJ%P3XL9G1`aUQ@aM3rQI{_`V(^qWfn=`@w%vGph0u{r0-tjW4^IF3LctlAW%p>A7 z$+dGzk4GudG7(caKikx}DGRT9122h0-{@zm75{p0;Kx#52xwm+rOSKzWIY3m?e0k9s<@Lky?YQuxfSCP0EpGYn~ zML(*w6E$dilIHVJ|7weRjv!ux)R+O5lAB z0QbHHbL+pDJ`K+Mv_V`c#uU?ONVWpp~dWW&_QUW;8eN&G`(1n z<98WCsOTSMh{S4wHV69n`kx0Wf5ClDCkg=E$Fd%zCDi`bPUygaGVhOeLJSi&FH$M! zkFa(`MI;Yfp8ako#F8rt+6i4a>O-+d$RBSM)U{<;X+xJhSzoRNv zrGoe4t}^_9r1G_|Y}e|Nu4I@|s@O=`QT<|W<(-}HKY7H?O}G-^bcWCj7F6Y30qGXd z3tcOc7y~D~dD-ua@7jgQ(xlobFxGqhN*;E-(4onSZ_w`)CtH z=-4mtVr(p=orW`~uhY|#yy~#n=+ce>bRVLRd_(i%Wv|FR<1sGLsqJkHt-IA@`}*uq zNNpL*D#BpTa^;b=TG_pYCOFTm+>IUYn`K<05@Ga+b}y1}1nA4|k@J z_KL9mdkJ(rrjj(W**fN7(CoBu%8LiAtW68{^-og0Q(wlo^{1n?ZHn7Q$Y|{!?n)U4 zu}&M6#;y%^36OSXQ@{>w4He$LeY?L9ZydCqnc(SYo!Sr`WT=;VOWIsmqES?bHOk+% zdHI&Q>qIje#WeI{S2XqeJxuQem8b`3#boIjw>+*h8)Y7ZNYX)PpR!&ILPN>J>u z!t1gy%VB;sWoLKuXPs{l%>n67(h6z}SsFPi&7>}~*iRT3zdI2$DSv`0s8uw(%s%IV z6SA$E;qQmiK_mgX$Xqxo3K#L}9@1Fh+lYNj-X0L~^dsE;-c~aOj^(u1h8uGk=Z^Y; z2qvJRAy$|AL(Pp9YZ#ees>kybJ(EuL8#|QIY;aR?1=QwYacO%eRoj@0w?a_^;T*f?Rc@}4; z^ z^_tt-XfSq}u;}aXxom2OGh}OIF5mHAhU@Xe*#0au#796J_}9*{C&3>Z@xHUNFVyeK zaO?{74Bf1cH2Cx(j!UY0ekHeRBU&~;Iduyw>NVzzy_`gqDzc-RifUobnZtW~9ds5+ z=mS;G*OA|V^W64(Tn=iD;epCR!?yVmF58;%b1 zPOoDeW6YAKdaI*gi5pO>f{imWLPuq;4YOpBr8du^G-#aq!M?EU%uUvEC)uJFlB--8 z=+0W~mo_|9s)3p1pO+QWqxSVP!-mlxNgKY5lzF17cRBMs@0LB7gj00ZZ*-C>20Jx{ z9uvUlfO)lG3VCOXPmNmFz@3#(2E?0OOzNk$#S7eE>cpoESHg;E)~`-)Y%AjXB6m3? zF#GjxAqYUq9vAI!cMbTzq-Q{VOq9>u?_GJ6XBB|0Cj9(eEH59!MO9dH@V%-A1+t>eE3$yh2Yg$3DwGQ# z2238pw2a^N!XLpi2nu&-WP_<2V(i-1M9iclnUJJL9xcXadlkLOEqoOjK3N{2rgEHQ z#DXEtJnYg5O76?A-3)0ghBW%*cuhkFXES`&ub@s(y|jd;jmK`5g4=ZXO>%P|B)?G* zh!O#JZ9=f8pj~+t<96jl|YWhgatnX^`!u&cFQ`pj`) z*Y(H}%_ZXbc+ZW%N>G~Zd40ky7+-ub{4S{6GzjtmHr=%7A}5`<>szA;tLD1c00EY*r4|^aalcfO71HwIWo?fJS3RX3!my>Nms2- zI3awYu98jl@6R!g*btjIdI4f9BqTn$9u-->e9~9>0r1RdqjCUFy(^Lelo58b0Fo5o z^TAq{~46MwgjK zvSYlCxF`N1!yv#_rT$KanE_vZzAkv+FU$=s5_1Fl8|LN&39*?GeRKF5QG=BSt_uGf zqNW`QpgGwzt@sNs^MCn}uvkHB3N9DCj%>eF+sd~rpqEJIqjtMnD&^}&65zPyecq>K ztFO565S>kJ8xHf?lh}`Vpp`xw$#ESjKV5?hY2xHit-m(^3Y7}7eY}Qu9lFy_8<&lI zKjI7-y*fb3TMPsME5TDfV5;2dqOSVIiK+5tbXo!D04V8y&bgZy(1iO>@KXSFOVhyM zImh>bOu4*_jF%6?@5-ZL;9_-GJFjW$)wyttjE!lW9c~nL^*Ku+r?@e1ZnFb4*c1XR zpua{6UIL$s?_8-y)_CQ8OLh@5VBV((<8?|~ZH#k_4_O;4$pJcA+0ZJRX<=c|ktsJj zI|%=-phUc(OG236gPmrgNZxvngA6%p@0ecUwL&OE7D&$WT92{|FT`kAEp(P_LY_T) z2D^1T6j1s{VZby32wrb3yWhPZv=bf5QPpX5tj);CXkX*t;1K%58sXy|jTivEQLX;sZP*GiUeL!RRz=4O$hsZ_kkPtv*|E` zHKfL#xUoTg&j&agC0pNe#%bMLLhEFg#$jZ`S&>OV*i^A@xS?h34SqV zZI{7|eeYB$#-1bUSi7v&r_H9{ben8?Z(YU4&D6cg9#zi5rxY>&z*L<M9(dE&dBRdQBhhO=)djl}0OWFRU}en%eROLW z_jGh5*L7+f@O-+{62ZTsjr@@;nOgIoQLb5E#J_Xyh)yqA6oCn0yIqnF@X&8mgijbe zKmc?fHOLf*><^QhD}O1+lLh5YHIUQ8%9d7Y3>F9|WNK*xG4nd3AQcB$MZ@)SuKDk@ zC9SoK)gSJX4!m60Tj^&Gema*zly_2OdB?8h+QA*UdoB%C(RZjl6M->^!NcmLX|>*- z#O$fch(WB&Wts6H+*CT_PB+>N`IEF;?iUGdY3f@?ALr>vOh0}+YT@-=M78MJuYI~N zae-rzB;&R(=`-VbW_2=$nR~ERG+bcIl?b~ZI8k|KspNlPb&L7Y*vB}Jqc!DP(WoGL zs`J$1x8H5@XtX`TQ>WA~Z%lmf2nbWM7g>!P&TD)@4*{)!4b4`#8nB;nt?7tVYV-NW zqolf==IzgCszO9ai}YkHk=`2b$?%U8wfj~Q3XcyGlahcseruvqk&)`YrIJb}%kK1c zi3K8*0?rHpW6vC}G>j{uKo>aIm}Uc=o%okZTw4Fh5?9%OX^9Jzc^U{DVkshtWnc~? zM?6Mc;H{i{E)jMOg+Om{|J6Bj@8{0}wfJ-%nsior(rJ2gSUp5*)H#ElJQFiz;;3{6 zCHUw}GAnSx8?TfC8R@m&ZB`5z7Pr@r=3Pa_a#7WFO$uexKI{EEy zzkhGIILi%*-JAh|@05r2+>~ET^PQ7-$jEM|)Hu6^3B09mKHN=hf3R3isSH0>TrX1Y9_$Hav|BdI>hgB$6L5FIf!SHvS3|FD(r1B4+q2P z>mUn_PAenlqC^if=&c_2MCI&}>#bnMPHU6u!2^4Q`4=;oU@Oi)36EllLe_zh{Vw>$ z8+Eb2Xo!~5@vngF1q4`CTB#_I{Vs}!L_rBIk{G2x=(Y9SLzoj5c=x=iZt%GRy~Xx! zmea#CZPt2Ey}?UyuiRKmomh>Nd2*-ClWDC>%94vnqZbp&F!vqYAGJ(zZxA50JDf8^ zfEw*^0aBRZ`PR49>s-F**gxr1nf~CtC7{t%&&OL{><^Og+`FE^;VIFh!m+ppzPjHG z_g7z2jC8zqi_)Q)gwv^qr1L@e1M;vTFw5%ub+Q@U34s2>;P1Ozi&LyPI-Pu$;z%!& zw7-&LCKr*9-a_~`q|#554)W{|GR+E3EKWKAE*0O^0j#F=VZK?ud~2kKuzzP zs}W30N3T0YNCY$hz$EyIlnj^8gE9nWUjMoENj|p4f=jO0tnLrN4q?Dk{H?mN%kVTsDJho~r zlGd6t>gHn=qAivdLGf=%}URR%xuQ~%EsW=eUaLy^1dIU#~US3j-0~kFw@#CJKmIJH7 zzOrZfmx_3&55^70%d79Wsft|8tQEs^OoYd3i}er8mD zNA1Kl6(dxOi=g%HhT!&&ube+xyz6(@S~=y5cPnoFTzT&l_MYfG%+9z&eYGl1gF!!} zg25cbj=|?++dGWS>;2sur&cC-Zs2Fe1nYvK_eLL~l)|SKD#y-l_9O9AQrYLX>xe&0 zxf3B1sP%0dGyCgA`FQ8cD^AczE_yMc$fK$Ie-$i_R&d-qK>ZNeALOS~O#tTe)YFiI zW6&KW|IR!G@GsDeq{1(}&~HGz&bMt-J(J=%5-Cai;khUQG8=Qo17)Y~FX$k5A>6dG zm?+`GSRr<=0x$>f_la@w{OHuf!?-OPMRU^a!I=N`7NyUA*+C^WEEOB#Q62V#x$&sA zD!V=Lc4+tY*qq5Mxx{^_LIxj%=b+h+}?n4f0deWqmWkCc*&I01n*^x4!>=fZi!N4yOb`l59le>CMXGY@Diy` zLY+eXpZ3l=EXsCW`yiqSNJuInjRO+W-N?{7l+q}ONQrc}l;F@E;?RwB3W6X=r!(SRgeUdk!O8o?B8Gud0cPmuIME5Zz4Y^_*mK_p`-x833 zul&nBe{0aF-GZ%f=Hx$*u1)^(1`Y_fbYzTLstB>sc;MAY(mmhS2Xux^AQtTWz}N2$ zf{&k{X12kV_iSU)R})l5mE?>wNu-A9twYzugmz^f8D3<_;wo_O6?Ifa#<*C*_EsaB zKDX;fw;}<%H!y9)!}{7&9g!7#eRt%*C`8~Th1kMZ`|lHZvRBlzI^E(kc1%^3?s{6$ z2M!_Em1+zDGMbJKM|6^#ewAjuU=Bd0E%M6}c$4*0^nxp5B$*gT#)K%;tKJmC+s&oJKJk zzDvbf4%V)pK_bHX@4CcB>dYcucbMKIpj-dGkpy(Q_vUQQiB zUQoH5DwuMAb9a9Bm~;~GW5v`Nk-1RJwPBs6_ISeR=o2TVD4ut2m!@^xjoNxM>h?eN zjBnO4I|5nN>s$|N`tKUgs+%gb5;^qZ!7y@HpQJp@^skfe>=hg}cE8M`suPk*XEcz{D0cm5SSY)GWW&U}!xS_+t;-|QHCtzN zY{Rm9@N`>4?1E+JlU<83&x_+|a`Y}JM=<*vTI`U4^dY6ebt!%P;Z?YZmXpVbPW@}N zvaXB_%ogK%wdme(QR6!IJ?QQ0l@_=!!vHdCd0I@Lb*ztwPNC7Le^-XzXGcCjjCIDiLgv()%FZwcvx{`Rz2%ND0&&qgI;NLWS$ich?|R)xzNvc+l}?mYN!{Z4M1{ ztL?@^Z>)+B&1#;3Gx&(~BlU#4p`63JaxX^Cyz|n=UzEJ z!Yemuu6fWkWZlmwu9}?80MCq%2fH{KT-g+f5k@nlmvTo8Azk~%37rlA(O}-=e&d@l zJg8l?kO7>*zo&A{NV&w?Gn{kNvn=#c=hGR>XdIYnsN7C>g~|iV{e6ODaBra~_CW!> zZ2STbBG$>0Dur1i5civnFX8l z#NB(Z$RI7aIF>`$5d3Rm#$Jc5TZ1da|!ttZEpRV|ISa zXn4`m-wJQl;6DB72A6{~T-4gT8D6D#-Ua%^IPP+KixRZhC<)()E)X-6VsmFA31D%U zL3E~U=rv6%b}XEd$#0-@9W|NN!w)Ifh8p3KT!wtt33;hl(w;ulK zt?Q~pu>*s9&WB7%YsA=3ij2N6A)uaC)xSz&)3NK5g6lJh73PPN zxPzwEy~A~RFSd)7nF1Ma!w9$Z+3GxeLQU_@VT+gbS`dF+m=;HAzQuT>(u}N6HN6Eb z(Fr0QDjA<%IC$K%@|)D#{<`RBpj@+3CCt>*Nbapn^t}8r2*1}}M5V;&SKM@*_|-ia z-;W{6zJ#a|=C}*byvVVwI(q$3upcZv$g25jW-$%so0VUAW%e4}D(Dk=hEhlMQ@YK| zpnS5uiQ8W`^;3kBZF(LWq5#{(fDO(Th(U|i?;F^KN=RerazhC?BzAosrcfEIT&J^6cc{O4wuicILWpQcm zz}0b8ZkO|e1{O;*h zcYodlo*$r{rfO-)u9_+m3$kDpfT+EyXW3c=E!3XuMZN&h->TB!*>?5Z3@>hQzx~td z)NY@hCFv`3dC<&4nT;|+4C)--J^t`)0Fsj;Lw#>PCoeCg>9BF@p`G15>xqhx-?oHY zx2?}qwg~h@`V49R=Vz#WTtmuT9m-F zXB9cPL&V$QPHPN%-Twm134sQ*O zTVlC=c}dD)+8nU(70msrsDU>ByM(J;Vkb`)#jgafxsE&>NcxHU+1UHw zW8d47Mx~akk}yInZ2!=Pv1;%1&MmR&Pw>`#UP|U9)O?H44tk9p)(%|g=39L%SQ6zxuaw0Bv{ z{I(C=`8b!`IZ~Df0Zl&x`(K(ecaNG9?t?=R3UC$XXH6K=(~eiT>Q%?K;&)I zwr|2Haa=O^#(X+wPthilQ2f$M)HedTP2_Y!O|rcodXv42v|z!!nm0xrAlwJ=`oI;~ z6W<4SOSoTJ%~mm%JITFWed$)UkSgAz`RY&zTdc~`2z}f}gLcz{0;3v6=9&H@^P{qC zGRy-EK>A|dcLjD?z;ulT2NoI{ni(2xL3f4n%fbTRTS_!EMO+LmDu#d^iNX;+q?|j} z#Q&oHw$43)8$~#`H1|7Y#h~-Po_a$9w;ZO8`A0N#A7=*)Q?yOUgl>F)SY@q*6n{v- zo1?LZU0OCl(>=+82(}S8P~KD6`{DGg`VP?mzbSNJ19Tug4+gJ6r0Fshj6hrBFkU%i zR>9ra8QJb4j!078Cr^1hcOBGu z;F%@-1LEdS)!{iBfuy`J=Eyu3bQb&M3dh6EM|kG06D)>IXaF!IfzNRoo#PHB79!ki zX09R@?;jS9fxc3-7(Sd~h*;VZ2Jl)4(kMZA8=@ zZ3adV;vo`5eZEeMztRq}W(w5ebv`z|1w#OF9QX@R;bc1PuD)av4f$%IS~mvH#jv8g zKJIA1`FcB0=UIw`?y^pdERHH~*arbwho|x`JM$>QxQ?~itpe;OLABxOOIodsh|?Q2 z=^ULLR=Q@yIf-36Z+R08Gks%t^5W#YS^Igw2TaJLaHtY)X5c+AJ-f#;5C_5o?RHS8 zQ>z+bB+AzD5CNIq-wzUj%m%0oq?wq{8oQ!zfDa<#jvUlZ1HaZ&8zYi5y^+L<3T<}o zO|vxNr)!r%2r2&ADFst^c7lq2O)arDx@I5(4h;l{{?SGK^R<}Ee=puoy8jR2{gTc5 z@SP?j2WIi(agqGu$8sAJ*hXs=R@?e@-Ys`;-COGou!t7mzJ4=;MpUWt5Ba;yif4{{ z@_`cBluhzMb_)VZ3~pk6aOfyI8_#$mTjpgAi=jqJo!RYA9pjT}J>O^QUd95=Lws|a zu9VUxs0q|KWA&GEUx*yRq(O@%^~vws-tPAS;!eehV0`n^z!>~w;txkI@K=kS#JZI) z$W&8=?onh1W~xbr)+d)^Lh52A()QMhpSURn|B~;&{hW9QT%@aDRh)bNOSA6<@4Rxs z8F~52P0Z+fOR;;MYq1q3J!~k$w`mR+`LnPw8*}-Lg$I@ep?6gZ)0D$rC67c?#A!8i$udv2qc`&W#`>9TIyGY3 zc}8xlVV~uC!}m2PtuH~I^L#l!pTl?$+a-j8%&ptr0YEry+tzFO`aH#J_NYGhKxr@d z`oQj%tG&5ghl`_qa+W9BJkjjA9&<fpkBm!+66TNuG=G@CI3Et(c5@YbunuoE2+Z+1+JCHNMw~l<8T=m9 zzr!Sis3Z6G1ENjm&!8H*Y>zTU&d6dbEA+ ztz>U%LN6WaTEL^+%Ua7r*;&+uI^#QS(wFFLUe~x=Mi;1VTba0NG8cK`cT}NMypWV` zAge>r#Pf=g*Jv+iwwRi4`jZn9{yzOl!7O=C&KP;?ds*Bj!qx>lDJ5#?z|{x5Q6Igz z3euH-E93L{Z-&q@YrEc1JbrsF{{mw)Cs=4Fv*fV9MJD|ja=i9KC_*LO+32eUJ(zNO z@}yo)|Hv~!^J}4H2AV8Oh0cVN$irZPqcg&ht9xAOa&PFim5L{PG%-4TzJcl4e31yC}Aw8&*_^8WtOdeoz&P{ZsUfsB1pN5 z)|CVIXPl+4NgKs(KF5YVkyg74{kAiR_t4-?&Qlj6iq1I;+Q(J}3`DUXR)NolcE3r& znmO=G45|9S>akfDvjMzy;8uvzwq~?SGL0z8RntM?ET_(eGu8FL)p{ZxAMsm$AWgT-3qYkynq%Ee#p&RN}CFgP7mwP6ImuzW_ zg>-MQ?=U)EZj3K65dQ<;W(^Cay53BU7b}{$ zJmH?lyZDZ&vLnt+EnLdNOzp)}bQy73DpbNH^m&=n;m=l8%qwY4xk-)inW^JCT}rkg ze(#xc);b%Pd!8r5#V>xYu#!QqO}IXlfdco#p=0uX9xWziqIMLLwwG~8s(~?Qb*BKi z+b0q3N9rqR!zDYhm5wsF?6S9dm8&;ZQ$#5-Fa6?4RUe1~GLBYZlBbjBYkA>y!j&%* zNV=HZu#58riSl=`I#CxwVx0e zy^7bOtDCCGKt=Y&r7sre4Q=cfmn^ve67y-zD?kWE5tIC$a@kK?k=?w+G<&&~lZ>_Z z_0R=cZ7k$o95@1IT1S3o!u)R*kad$GOc$2tGCs(=DBB98IgHw-B~h6+HwB8|<2jJ* zV+kk5Pu3S%B#Vr(%wR#k-0xHKra>Pv$~~5q8K@wrBm3Z)>`&8Vzh~?2)PB&COwV@r zHt-kGmDJ;oYq({!0v^|=X#^~n?s|+H^aESu_WjKrbewMOy64SeAUk4t33#^mqQd#M zy4z;niiN>~ZdA0kVDYzHwxu!Ck8_x&pqyQEJSLo3Zc0w`bQ`R=Hl!&b{D4C{?1-)s zU67rzS{oAG!I}=%Q|NC^3S<-YM#idgW>pr9!oJ1uB3v2gbqbl@?~dgQeWj##dokS z2{eyTTevqkCGD@|-#$H>C}7Ar>l(pEZ4CT~e)P{_IWZyy2xTIPME!9j@o~ixK#4WE z%pmGzB&lTB?0x^DMEKi@%^v0p?`|)x4t~k{*TvKxS(k9U-1J{X-h$_hsao1sGQ z6~yt!Fg?=ginLPIiS%8%ff5K#o&B|cs&L1FEsi_!y1gSzvXU+g`(ecZsfyo0#$Vdm zXabd)+}5V6jzRC?T+~%0>br?j@I+pvrg%w`3sIP}{ZFiYc1^i3b8XmRbWvsRzY!H&|0XD0%S8Ka|2}ts{%o+^A4z?`>jF=XF zlzn6GmRaci&C2=1#YzVm+<11Z;my?gyjdTwy&XKRp}F-dL$*LgWy!twJmlK#<(`&h zi&7r(RxJ1ao|bR9l6Pc?w(c*xz@wBz9WRpboUpZBl_1e)Qt5_6>)=ZT`sKfLD@PN8 zsT+V!D<+o;*l~L?>i14B=PsgCF(@IjU&`! z2vgd3#7fZ~?99~j3eRzgGKg+HrWA4EP#Uu_$g3zKc##e#J79aN(Y%||m zc=k;1=Y}54&b7r7o{wRlb~E~g_M-nKLpE;DP9?4lzlSZj;C6|b6(c%hR404xVv2_e zgE@1`c9kl3Yj9x*zw=SR6+3ss!i0&v3eEz;VX06voohyDqkrE&@&fSMzhnvjn_mOE zrGLcdWz{y*N|s~AStyj-i`k||Bek+pG(ay~WtmsG1mI}0I=uYJHm+{DF=QGz=YGp( zv-K8uVot`(F5VsUE)Vt3OTL>qb2HB+KY8N(hiq45J!!-JqNL(<#*DAhhRtzxyk%w% zKl`g7^h<`OAsKpob{U}`vcLzxT=ic9^EDpj0EBwFM1{xkE5JD^iu9@@4mPKid7pg+ zRQ$>S$ec#Dk*L3sgVi}8Wj;LX`87O2yZCg?D-&e2lbL{EA8Tt4Egc;VaGsjNry$g) ze-Ak{vXxStn|o)+4G?3B0SfdgyRRO-A`Iva0q{dH@st7i$4EjDtvfu;c1rHA8?m~K zl|VK(dW$3eh$o#=u6A2t7Uy5F(VFHYL&0#`Tq9|8>W{Nl+`xE!$Ndn~`?NK0(Ag}` zF&BQD1&wbx8;&laHrB2WpK9~=7#%31R{h^<2AF>&!+PGbb39z^c>;9&vMUjqKvx{w zXH;?O)sTB>%(J^clC4qh&5W15x+c>iTJG!^I(@_ByV#&jqx4|fwGoo2@>#1Dpo-Qk zE9__!Wz9cOPOeqe@7rl7e6T*Nhx7~M%)mM!>@O&3xE1@kNU&~&KpB(;fqGvx_|S^| zpC2<`o$ZgYbKevy;o&yLz#z6&l#|wcX}sv1wrTQz5_^E_LB)RgE4?p%Zy#FZ?^c_Y zm9@9}{%!tg%3I&S`QUZ9Lv0!ZI&5!B@+>V51E{~>{O{rGif12K;jMJ7bNy3Og_%si zPmu-YQLr{F&PG0Aw054v_d#)4&i@qodkv?Cct5xBqY@sQ+>$#0*QSrmX;x zhH$4j)P!!%cTgXT!F%@vC^GKwe8hbTC2b8LHh{cN1o1#x0r#^LFH<87?TMDf%u_t? zQ$a(pWy%^_Kb8RjdFKSNc;+YMw+QD0K8-Ka3#32Ci#_krw~u-$b~q|eK2rBg4I$7D z(j&~niIoe(faG}}e8nyUIXeDt2yOyDDEY)1dZkYN0^8^NnPk%b`LwQ}S`jO?wVb%`Urbe)?FI_o7&yxB zksh?nK&-1u71GGsWwPuL+JqsXUH{Z|GJtfT=d)MHd2rfv?+q2DHnH1)V3Fr*9|H#y)Z7>V`ni-iP}P~3uN zn>%0l5YW_ebd8)OASl1FJTMP+x{+9)lzTXi4vk`dGb^YqfCVcw#Z^Jm41d0`sNQJn zeVhPDje5>1xa<gEBM<39R@>2#DRA*NKI{E6s zj%dSaEfw`^+vtI?$N?b|@CozaLpma8vR-3_a|*h+s(^X&{}lwo$)#O`vhz*4p4m_+`Jua3TISX@b# z(}wz+$6N~5NAu9G0LX{D?7EbyhP_LQ5+T`q66ZLoY&2PeocdL-i<=J2^Tn#hJX9w> zmzcAoZm>tS8r}DxTFsOXTMZylqm!5>R=2{&1x_Hru%4o>!YHrx(y2@8Xk(CGL9$)DnIlQP zgd;#92u#Wi=vV*^V_9HjDfu>crz;xpMPkAbo4bF5;gul>?z;)_CvhFe!fdSr*S)sF ztyLc`*?TgRX;_^9=*o5En35OnB4PK7CV&n|UYfEgv)^*>d3eio=Y1<2=B{8wCQK}J z{5_0IGuC5C*eE}c^^q?)o!WxW4K?bLf&sCE>RMz=Xc7Su8z!7rqhE+Ly zYh5^}n~mf0y<`{I9}em0Z=&JiDES1>{|v+ymE~+cedUJCivMwAMKQn7fIC;+^(Eqy zKvbssB#1^kH^_eQBsNFAx@|WW!JCt~=O)|?7FGKeu20JGw(0k8|8SQqmtY+_gBbow zZ5f`JNU|W2F#w~7VXp};uA##a_n`M1EQn}qzD`EmG_A6od^hUanFR7-aGfdBI8n;G zMrh+0o8MqfS>#0exZxQ`{I#TN;_g|Ik1s2jv= zY_#!kv=vA5L(0}vL7p~tSs6w+1%CGvleEpQJ%4_P4K4-4--DF+vwMhDOBhoRwm268 zY{=Q?K|eR*&C72F^^RGPHn_Vy#MlDZ!!ucWR zs4H>u>~MbQz}Va1YiL02!-4T>=-!_ZFP2bK%pd917a_$|hAOiXm6nStH%MSL1dSe^ zf)NsA2!ELejR4GVfd*GT%WtXpn?l>jSFX-|geBC7!EH4uS zEt|B}Z@u2hfR0tU%KGKTUbgOkiDo{TDTIzA6#VrMy$jzdA+0^tr zwysX=konH=Y4Q1=B!x^}w-8c4rr3zKevR)P7YqasQ!S}bkB1m5h{Z70-DVn}nA5|u z7xZc=j`n<@0%^{eqHgD=s8G@sc4y}0r*O&>va2dRyPZ9X1#4a8xT=QcGc)^i{J7GOTdML46M(AS}MLxi9r$Oxs9Oh`Z*{S zh7$#LIzt*y5391RBM(6k#_D!1BMmOt1&L_lOWy?lm~{yYtCu4B zA89k|AGCQ@#c%-!FTT=a8*7acW$5XiC;JV!FtbHUIjHupx~eiTQ5bq)o;mGbdEr&? z7}+jRwmJCPX=AcL!}d7WTe$oeh)$Fs>0!}S&p3Ip#n^OhV(<23rfuT0X4<18TVmBG z-`Jd<)y?A5iX_-jdFdgV(en6LxTX3Q8MAC>KQ?3e057N}#C|2~dl$q~fAQ;-&;-eX zyM9u*goZcN%!%fME|yy?)kdZSC`PwWP?6gxabA>n%a<9;_k=*^ zN-uSc8D@P9P|u5<#fCk4JE?Sk2o!A-713|MP=G}__P`lH>da`@MQ;Yt4+FM)NZM)1 zC&^x(EfMOsKzV8ea&no$zU}c1k+Bi*;fc|~89F{B58!!jV2zQkw~Bz&qm3nZ)RMPH z*w#CBf`NMX8LjxA;x&uXbxke_FJ5j8>*lBFsNz#@+u(R*cm;;$CMb08HVUBJGoMtg z5$bY9uhw{6DdO#wcz+$S>el8b=Sw=>_lFgEP@tN5vheA>jjyK71N7}FYsAf8V)|xm zt%k+SKsUL19ijhTBB3iOIL}c-tIh9f6v(PYc*t@S zMtNJDhv{;)632WF4<9~nJ6N-=tsDnN0V$>#sg!V(e?uG|^qwz=7W9i|vourt25-ca z7YMG{9^$g7#s!TUV3WM@WiI{F zf9DyMtVs*C<(lKP4>Z7nlkTNhuLZu{ich6OPX)O|2R~@UQvVC1dma;*YCvDocIt204m^W)L ztp+4(-3c#D55CI0tV;q4b*)qGH{2E6Qrnz26W8W(B565ipbzRew?LJYMO`7N9 Date: Thu, 13 Oct 2022 16:45:25 +0800 Subject: [PATCH 137/204] add det-demo-tmi.md and docs --- .gitignore | 1 + det-demo-tmi/Dockerfile | 25 ++ det-demo-tmi/README.md | 269 ++++++++++++++++++++ det-demo-tmi/app/start.py | 223 ++++++++++++++++ det-demo-tmi/img-man/infer-template.yaml | 12 + det-demo-tmi/img-man/mining-template.yaml | 11 + det-demo-tmi/img-man/training-template.yaml | 13 + det-demo-tmi/requirements.txt | 4 + docs/det-demo-tmi.md | 1 + docs/ymir-executor-version.md | 4 +- 10 files changed, 561 insertions(+), 2 deletions(-) create mode 100644 det-demo-tmi/Dockerfile create mode 100644 det-demo-tmi/README.md create mode 100644 det-demo-tmi/app/start.py create mode 100644 det-demo-tmi/img-man/infer-template.yaml create mode 100644 det-demo-tmi/img-man/mining-template.yaml create mode 100644 det-demo-tmi/img-man/training-template.yaml create mode 100644 det-demo-tmi/requirements.txt create mode 100644 docs/det-demo-tmi.md diff --git a/.gitignore b/.gitignore index 5563689..2c245d8 100644 --- a/.gitignore +++ b/.gitignore @@ -27,3 +27,4 @@ yolov4_training/yolov4.conv.137 yolov4_training/build_docker.sh yolov4_training/dockerfile_tmp yolov4_training/yolov4.conv.137 +det-demo-tmi/voc_dog diff --git a/det-demo-tmi/Dockerfile b/det-demo-tmi/Dockerfile new file mode 100644 index 0000000..9a742a9 --- /dev/null +++ b/det-demo-tmi/Dockerfile @@ -0,0 +1,25 @@ +# a docker file for an sample training / mining / infer executor + +FROM python:3.8.13-alpine + +# Add bash +RUN apk add bash +# Required to build numpy wheel +RUN apk add g++ + +COPY requirements.txt ./ +RUN pip3 install -r requirements.txt + +WORKDIR /app +# copy user code to WORKDIR +COPY ./app/start.py /app/ + +# copy user config template to /img-man +RUN mkdir -p /img-man +COPY img-man/*-template.yaml /img-man/ + +# entry point for your app +# the whole docker image will be started with `nvidia-docker run ` +# and this command will run automatically +RUN echo "python /app/start.py" > /usr/bin/start.sh +CMD bash /usr/bin/start.sh diff --git a/det-demo-tmi/README.md b/det-demo-tmi/README.md new file mode 100644 index 0000000..4d08199 --- /dev/null +++ b/det-demo-tmi/README.md @@ -0,0 +1,269 @@ +# ymir 用户自定义镜像制作指南 + +## 目的 + +此文档面向以下人员: + +* 为 ymir 开发训练,挖掘及推理镜像的算法人员及工程人员 + +* 希望将已经有的训练,挖掘及推理镜像对接到 ymir 系统的算法及工程人员 + +此文档将详细描述如何使用 ymir executor framework 开发新的镜像。 + +![](../docs/ymir-docker-develop.drawio.png) + +## 准备工作 + +1. 下载 ymir 工程 并构建自己的demo镜像: + +``` +git clone https://github.com/modelai/ymir-executor-fork -b ymir-dev +cd ymir-executor-fork/det-demo-tmi + +docker build -t ymir/executor:det-demo-tmi . +``` + +2. 下载voc dog 数据集 + +``` +sudo apt install wget unzip + +wget https://github.com/modelai/ymir-executor-fork/releases/download/dataset/voc_dog_debug_sample.zip -O voc_dog_debug_sample.zip + +unzip voc_dog_debug_sample.zip +``` +运行上述脚本将得到如下目录 +``` +voc_dog +├── in # 输入目录 +│ ├── annotations # 标注文件目录 +│ ├── assets # 图像文件目录 +│ ├── train-index.tsv # 训练集索引文件 +│ └── val-index.tsv # 验证集索引文件 +└── out # 输出目录 +``` + +3. 配置 `/in/env.yaml` 与 `/in/config.yaml` + + * 示例 `voc_dog/in/env.yaml` + protocol_version: ymir1.3.0之后添加的字段,说明ymir接口版本 + + ``` + task_id: task0 + protocol_version: 1.0.0 + run_training: True + run_mining: False + run_infer: False + input: + root_dir: /in + assets_dir: /in/assets + annotations_dir: /in/annotations + models_dir: /in/models + training_index_file: /in/train-index.tsv + val_index_file: /in/val-index.tsv + candidate_index_file: /in/candidate-index.tsv + config_file: /in/config.yaml + output: + root_dir: /out + models_dir: /out/models + tensorboard_dir: /out/tensorboard + training_result_file: /out/models/result.yaml + mining_result_file: /out/result.tsv + infer_result_file: /out/infer-result.json + monitor_file: /out/monitor.txt + executor_log_file: /out/ymir-executor-out.log + ``` + + * 示例 `voc_dog/in/config.yaml` + ``` + class_names: + - dog + export_format: ark:raw + gpu_count: 1 + # gpu_id: '0,1,2,3' + gpu_id: '0' + pretrained_model_params: [] + shm_size: 128G + task_id: t00000020000020167c11661328921 + + # just for test, remove this key in your own docker image + expected_map: 0.983 # expected map for training task + idle_seconds: 60 # idle seconds for each task + ``` + +4. 运行测试镜像 +``` +# 交互式运行 +docker run -it --rm -v $PWD/voc_dog/in:/in -v $PWD/voc_dog/out:/out ymir/executor:det-demo-tmi bash +> bash /usr/bin/start.sh + +# 直接运行 +docker run --rm -v $PWD/voc_dog/in:/in -v $PWD/voc_dog/out:/out ymir/executor:det-demo-tmi +``` + +## ymir 对镜像的调用流程 + +ymir 通过 mir train / mir mining / mir infer 命令启动镜像,遵循以下步骤: + +1. 导出镜像需要用的图像资源以及标注资源文件 + +2. 准备镜像配置 config.yaml 及 env.yaml + +3. 通过 nvidia-docker run 激活镜像,在启动镜像时,将提供以下目录及文件: + +| 目录或文件 | 说明 | 权限 | +| --- | --- | --- | +| `/in/env.yaml` | 任务类型,任务 id,数据集索引文件位置等信息 | 只读 | +| `/in/config.yaml` | 镜像本身所用到的超参等标注信息 | 只读 | +| `/in/*-index.tsv` | 数据集索引文件 | 只读 | +| `/in/models` | 预训练模型存放目录 | 只读 | +| `/in/assets` | 图像资源存放目录 | 只读 | +| `/in/annotations` | 标注文件存放目录 | 只读 | +| `/out/tensorboard` | tensorboard 日志写入目录 | 读写 | +| `/out/models` | 结果模型保存目录 | 读写 | + +4. 镜像启动以后,完成自己的训练、挖掘或推理任务,将相应结果写入对应文件,若成功,则返回 0,若失败,则返回非 0 错误码 + +5. ymir 将正确结果或异常结果归档,完成整个过程 + +## 训练、挖掘与推理镜像的通用部分开发 + +app/start.py 展示了一个简单的镜像执行部分,此文档也将基于这个样例工程来说明如何使用框架来开发镜像。 + +关于这个文件,有以下部分值得注意: + +1. 在 Dockerfile 中,最后一条命令说明了:当此镜像被 ymir 系统通过 nvidia-docker run 启动时,默认执行的是 `python /app/start.py` 命令,也就是此工程中的 `app/start.py` 文件 + +2. 镜像框架相关的所有内容都在 `ymir_exc` 包中,包括以下部分: + + * `env`:环境,提供任务类型,任务 id 等信息 + + * `dataset_reader`:使用数据集读取器来取得数据集信息 + + * `result_writer`:写入训练,挖掘以及推理结果 + + * `monitor`:写入进度信息 + + * `util`: 常用函数, 如`get_merged_config()` + +3. 使用 `cfg=util.get_merged_config()` 可以取得默认的 `EasyDict` 实例,这个实例的`cfg.ymir`来源于文件 `/in/env.yaml`,如果出于测试的目的想要更改这个默认文件,可以直接更改 `settings.DEFAULT_ENV_FILE_PATH`,但在实际封装成镜像的时候,应该把它的值重新指回成默认的 `/in/env.yaml`. `cfg.param`则来源于`/in/config.yaml` + +4. 在 `start()` 方法中,通过 `cfg.ymir` 中的 `run_training` / `run_mining` / `run_infer` 来判断本次需要执行的任务类型。如果任务类型是本镜像不支持的,可以直接报错 + +5. 虽然 `app/start.py` 展示的是一个训练,挖掘和推理多合一的镜像,开发者也可以分成若干个独立的镜像,例如,训练一个,挖掘和推理合成一个 + +## 训练过程 + +`app/start.py` 中的函数 `_run_training` 展示了一个训练功能的样例,有以下部分需要注意: + +1. 超参的取得 + + * 使用 `cfg.param` 取得外部传入的超参数等信息 + + * 每个训练镜像都应该准备一个超参模板 `training-template.yaml`,ymir 系统将以此模板为基础提供超参 + + * 以下 key 为保留字,将由系统指定: + +| key | 类型 | 说明 | +| --- | --- | --- | +| class_names | list | 类别 | +| gpu_id | str | 可使用的 gpu id,以英文逗号分隔,如果为空,则表示用 cpu 训练 | +| pretrained_model_params | list | 预训练模型列表,如果指定了,则表示需要基于此模型做继续训练 | + +2. 训练集和验证集的取得:使用 `cfg.ymir.input.training_index_file` 和 `cfg.ymir.input.val_index_file` 取得训练集和验证集的索引文件。索引文件中每一行为图像绝对路径与标注绝对路径,以`\t`进行分隔。 +``` +from ymir_exc.util import get_merged_config + +cfg = get_merged_config() +with open(cfg.ymir.input.training_index_file, 'r') as fp: + lines = fp.readlines() + +for idx, line in enumerate(lines): + image_path, annotation_path = line.strip().split() + ... +``` + +3. 模型的保存 + + * 模型按当前正在进行的 stage name,分目录保存 + + * 在 `cfg.ymir.output.models_dir` 中提供了模型的保存目录,用户可以使用 pytorch, mxnet, darknet 等训练框架自带的保存方法将模型保存在此目录下的以当前 stage_name 命名的子目录中 + + * 例如,如果需要保存 stage_name 为 'epoch-5000' 的模型,则需要把这些模型文件保存到 `os.path.join(cfg.ymir.output.model_dir, 'epoch-5000')` 目录下 + + * 之后,可以使用 `result_writer.write_model_stage()` 方法保存训练结果的摘要,这些内容包括:不带目录的模型名称列表,mAP. + + * 也可以使用 `util.write_ymir_training_result()` 方法保存训练结果,它的兼容性与容错性更好。 + + * 需要保存的模型实际记录在`cfg.ymir.output.training_result_file`中,ymir将依据此文件进行文件打包,供用户下载、迭代训练及推理挖掘。 + +4. 进度的记录:使用 `monitor.write_monitor_logger(percent)` 方法记录任务当前的进度,实际使用时,可以每隔若干轮迭代,根据当前迭代次数和总迭代次数来估算当前进度(一个 0 到 1 之间的数),调用此方法记录 + +## 挖掘过程 + +所谓挖掘过程指的是:提供一个基础模型,以及一个不带标注的候选数据集,在此候选数据集上进行 active learning 算法,得到每张图片的得分,并将这个得分结果保存。 + +`app/start.py` 中的函数 `_run_mining` 展示了一个数据挖掘过程的样例,有以下部分需要注意: + +1. 参数的取得 + + * 使用 `cfg = get_merged_config()` 取得外部传入的参数 `cfg.param` + + * 每个挖掘镜像都应该准备一个参数模板 `mining-template.yaml`,ymir 系统将以此模板为基础提供参数 + + * 以下 key 为保留字,将由系统指定: + +| key | 类型 | 说明 | +| --- | --- | --- | +| class_names | list | 类别 | +| gpu_id | str | 可使用的 gpu id,以英文逗号分隔,如果为空,则表示用 cpu 训练 | +| model_params_path | list | 模型路径列表,镜像应该从里面选择自己可以使用的模型,如果有多个模型可以使用,直接报错 | + +2. 候选集的取得 + + * 进行挖掘任务时,所使用的数据集是一个没有带标注的候选集,可以使用 `cfg.ymir.input.candidate_index_file` 取得挖掘数据集的索引文件,这个文件中每一行为图片的绝对路径。 + + ``` + with open(cfg.ymir.input.candidate_index_file, 'r') as fp: + lines = fp.readlines() + + for line in lines: + image_path = line.strip() + ... + ``` + +3. 结果的保存 + + * 使用 `result_writer.write_mining_result()` 对挖掘结果进行保存, 结果将保存到`cfg.ymir.output.mining_result_file`,ymir将依据这个文件进行新数据集生成。 + +## 推理过程 + +所谓推理过程指的是:提供一个基础模型,以及一个不带标注的候选数据集,在此候选数据集上进行模型推理,得到每张图片的 detection 结果(框,类别,得分),并保存此结果。 + +`app/start.py` 中的函数 `_run_infer` 展示了一个推理过程的样例,有以下部分需要注意: + +1. 参数的取得:同数据挖掘过程 + +2. 候选集的取得:同数据挖掘过程, 也是利用文件 `cfg.ymir.input.candidate_index_file` + +3. 结果的保存 + + * 推理结果本身是一个 dict,key 是候选集图片的路径,value 是一个由 `result_writer.Annotation` 构成的 list + + * 使用 `result_writer.write_infer_result()` 保存推理结果, 推理结果将保存到`cfg.ymir.output.infer_result_file`, ymir将依据这个文件进行结果展示与新数据集生成。 + +## 镜像打包 + +可以在 `Dockerfile` 的基础上构建自己的打包脚本 + +## 测试 + +可以使用以下几种方式进行测试: + +1. 通过 `ymir-executor-verifier` 进行测试 + +2. 通过 ymir web 系统进行测试 + +3. 通过 ymir 命令行启动 mir train / mir mining / mir infer 命令进行测试 + + diff --git a/det-demo-tmi/app/start.py b/det-demo-tmi/app/start.py new file mode 100644 index 0000000..2b8e877 --- /dev/null +++ b/det-demo-tmi/app/start.py @@ -0,0 +1,223 @@ +import logging +import os +import random +import sys +import time +from typing import List + +# view https://github.com/protocolbuffers/protobuf/issues/10051 for detail +os.environ.setdefault('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', 'python') +from tensorboardX import SummaryWriter +from easydict import EasyDict as edict +from ymir_exc import monitor +from ymir_exc import result_writer as rw +from ymir_exc.util import get_merged_config + + +def start() -> int: + cfg = get_merged_config() + + if cfg.ymir.run_training: + _run_training(cfg) + if cfg.ymir.run_mining: + _run_mining(cfg) + if cfg.ymir.run_infer: + _run_infer(cfg) + + return 0 + + +def _run_training(cfg: edict) -> None: + """ + sample function of training, which shows: + 1. how to get config file + 2. how to read training and validation datasets + 3. how to write logs + 4. how to write training result + """ + #! use `env.get_executor_config` to get config file for training + gpu_id: str = cfg.param.get(key='gpu_id') + class_names: List[str] = cfg.param.get(key='class_names') + expected_mAP: float = cfg.param.get(key='expected_map', default=0.6) + idle_seconds: float = cfg.param.get(key='idle_seconds', default=60) + trigger_crash: bool = cfg.param.get(key='trigger_crash', default=False) + #! use `logging` or `print` to write log to console + # notice that logging.basicConfig is invoked at executor.env + logging.info(f'gpu device: {gpu_id}') + logging.info(f'dataset class names: {class_names}') + logging.info(f"training config: {cfg.param}") + + #! count for image and annotation file + with open(cfg.ymir.input.training_index_file, 'r') as fp: + lines = fp.readlines() + + valid_image_count = 0 + valid_ann_count = 0 + + N = len(lines) + monitor_gap = max(1, N // 100) + for idx, line in enumerate(lines): + asset_path, annotation_path = line.strip().split() + if os.path.isfile(asset_path): + valid_image_count += 1 + + if os.path.isfile(annotation_path): + valid_ann_count += 1 + + #! use `monitor.write_monitor_logger` to write write task process percent to monitor.txt + if idx % monitor_gap == 0: + monitor.write_monitor_logger(percent=0.2 * idx / N) + + logging.info(f'total image-ann pair: {N}') + logging.info(f'valid images: {valid_image_count}') + logging.info(f'valid annotations: {valid_ann_count}') + + #! use `monitor.write_monitor_logger` to write write task process percent to monitor.txt + monitor.write_monitor_logger(percent=0.2) + + # suppose we have a long time training, and have saved the final model + #! model output dir: os.path.join(cfg.ymir.output.models_dir, your_stage_name) + stage_dir = os.path.join(cfg.ymir.output.models_dir, 'epoch10') + os.makedirs(stage_dir, exist_ok=True) + with open(os.path.join(stage_dir, 'epoch10.pt'), 'w') as f: + f.write('fake model weight') + with open(os.path.join(stage_dir, 'config.py'), 'w') as f: + f.write('fake model config file') + #! use `rw.write_model_stage` to save training result + rw.write_model_stage(stage_name='epoch10', files=['epoch10.pt', 'config.py'], mAP=random.random() / 2) + + _dummy_work(idle_seconds=idle_seconds, trigger_crash=trigger_crash) + + write_tensorboard_log(cfg.ymir.output.tensorboard_dir) + + stage_dir = os.path.join(cfg.ymir.output.models_dir, 'epoch20') + os.makedirs(stage_dir, exist_ok=True) + with open(os.path.join(stage_dir, 'epoch20.pt'), 'w') as f: + f.write('fake model weight') + with open(os.path.join(stage_dir, 'config.py'), 'w') as f: + f.write('fake model config file') + rw.write_model_stage(stage_name='epoch20', files=['epoch20.pt', 'config.py'], mAP=expected_mAP) + + #! if task done, write 100% percent log + logging.info('training done') + monitor.write_monitor_logger(percent=1.0) + + +def _run_mining(cfg: edict) -> None: + #! use `cfg.param` to get config file for training + # pretrained models in `cfg.ymir.input.models_dir` + gpu_id: str = cfg.param.get(key='gpu_id') + class_names: List[str] = cfg.param.get(key='class_names') + idle_seconds: float = cfg.param.get('idle_seconds', 60) + trigger_crash: bool = cfg.param.get('trigger_crash', False) + #! use `logging` or `print` to write log to console + logging.info(f"mining config: {cfg.param}") + logging.info(f'gpu device: {gpu_id}') + logging.info(f'dataset class names: {class_names}') + + #! use `cfg.input.candidate_index_file` to read candidate dataset items + # note that annotations path will be empty str if there's no annotations in that dataset + #! count for image files + with open(cfg.ymir.input.candidate_index_file, 'r') as fp: + lines = fp.readlines() + + valid_images = [] + valid_image_count = 0 + for line in lines: + if os.path.isfile(line.strip()): + valid_image_count += 1 + valid_images.append(line.strip()) + + #! use `monitor.write_monitor_logger` to write task process to monitor.txt + logging.info(f"assets count: {len(lines)}, valid: {valid_image_count}") + monitor.write_monitor_logger(percent=0.2) + + _dummy_work(idle_seconds=idle_seconds, trigger_crash=trigger_crash) + + #! write mining result + # here we give a fake score to each assets + total_length = len(valid_images) + mining_result = [(asset_path, index / total_length) for index, asset_path in enumerate(valid_images)] + rw.write_mining_result(mining_result=mining_result) + + #! if task done, write 100% percent log + logging.info('mining done') + monitor.write_monitor_logger(percent=1.0) + + +def _run_infer(cfg: edict) -> None: + #! use `cfg.param` to get config file for training + # models are transfered in `cfg.ymir.input.models_dir` model_params_path + class_names = cfg.param.get('class_names') + idle_seconds: float = cfg.param.get('idle_seconds', 60) + trigger_crash: bool = cfg.param.get('trigger_crash', False) + seed: int = cfg.param.get('seed', 15) + #! use `logging` or `print` to write log to console + logging.info(f"infer config: {cfg.param}") + + #! use `cfg.ymir.input.candidate_index_file` to read candidate dataset items + # note that annotations path will be empty str if there's no annotations in that dataset + with open(cfg.ymir.input.candidate_index_file, 'r') as fp: + lines = fp.readlines() + + valid_images = [] + invalid_images = [] + valid_image_count = 0 + for line in lines: + if os.path.isfile(line.strip()): + valid_image_count += 1 + valid_images.append(line.strip()) + else: + invalid_images.append(line.strip()) + + #! use `monitor.write_monitor_logger` to write log to console and write task process percent to monitor.txt + logging.info(f"assets count: {len(lines)}, valid: {valid_image_count}") + monitor.write_monitor_logger(percent=0.2) + + _dummy_work(idle_seconds=idle_seconds, trigger_crash=trigger_crash) + + #! write infer result + fake_anns = [] + random.seed(seed) + for class_name in class_names: + x = random.randint(0, 100) + y = random.randint(0, 100) + w = random.randint(50, 100) + h = random.randint(50, 100) + ann = rw.Annotation(class_name=class_name, score=random.random(), box=rw.Box(x=x, y=y, w=w, h=h)) + + fake_anns.append(ann) + + infer_result = {asset_path: fake_anns for asset_path in valid_images} + for asset_path in invalid_images: + infer_result[asset_path] = [] + rw.write_infer_result(infer_result=infer_result) + + #! if task done, write 100% percent log + logging.info('infer done') + monitor.write_monitor_logger(percent=1.0) + + +def _dummy_work(idle_seconds: float, trigger_crash: bool = False, gpu_memory_size: int = 0) -> None: + if idle_seconds > 0: + time.sleep(idle_seconds) + if trigger_crash: + raise RuntimeError('app crashed') + + +def write_tensorboard_log(tensorboard_dir: str) -> None: + tb_log = SummaryWriter(tensorboard_dir) + + total_epoch = 30 + for e in range(total_epoch): + tb_log.add_scalar("fake_loss", 10 / (1 + e), e) + time.sleep(1) + monitor.write_monitor_logger(percent=e / total_epoch) + + +if __name__ == '__main__': + logging.basicConfig(stream=sys.stdout, + format='%(levelname)-8s: [%(asctime)s] %(message)s', + datefmt='%Y%m%d-%H:%M:%S', + level=logging.INFO) + sys.exit(start()) diff --git a/det-demo-tmi/img-man/infer-template.yaml b/det-demo-tmi/img-man/infer-template.yaml new file mode 100644 index 0000000..b3d45dd --- /dev/null +++ b/det-demo-tmi/img-man/infer-template.yaml @@ -0,0 +1,12 @@ +# infer template for your executor app +# after build image, it should at /img-man/infer-template.yaml +# key: gpu_id, task_id, model_params_path, class_names should be preserved + +gpu_id: '0' +task_id: 'default-infer-task' +model_params_path: [] +class_names: [] + +# just for test, remove this key in your own docker image +idle_seconds: 3 # idle seconds for each task +seed: 15 diff --git a/det-demo-tmi/img-man/mining-template.yaml b/det-demo-tmi/img-man/mining-template.yaml new file mode 100644 index 0000000..5927eca --- /dev/null +++ b/det-demo-tmi/img-man/mining-template.yaml @@ -0,0 +1,11 @@ +# mining template for your executor app +# after build image, it should at /img-man/mining-template.yaml +# key: gpu_id, task_id, model_params_path, class_names should be preserved + +gpu_id: '0' +task_id: 'default-mining-task' +model_params_path: [] +class_names: [] + +# just for test, remove this key in your own docker image +idle_seconds: 6 # idle seconds for each task diff --git a/det-demo-tmi/img-man/training-template.yaml b/det-demo-tmi/img-man/training-template.yaml new file mode 100644 index 0000000..f114648 --- /dev/null +++ b/det-demo-tmi/img-man/training-template.yaml @@ -0,0 +1,13 @@ +# training template for your executor app +# after build image, it should at /img-man/training-template.yaml +# key: gpu_id, task_id, pretrained_model_paths, class_names should be preserved + +gpu_id: '0' +task_id: 'default-training-task' +pretrained_model_params: [] +class_names: [] +export_format: 'det-voc:raw' + +# just for test, remove this key in your own docker image +expected_map: 0.983 # expected map for training task +idle_seconds: 60 # idle seconds for each task diff --git a/det-demo-tmi/requirements.txt b/det-demo-tmi/requirements.txt new file mode 100644 index 0000000..0517cf4 --- /dev/null +++ b/det-demo-tmi/requirements.txt @@ -0,0 +1,4 @@ +pydantic>=1.8.2 +pyyaml>=5.4.1 +tensorboardX>=2.4 +-e "git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.3.0" diff --git a/docs/det-demo-tmi.md b/docs/det-demo-tmi.md new file mode 100644 index 0000000..b7469a6 --- /dev/null +++ b/docs/det-demo-tmi.md @@ -0,0 +1 @@ +# det-demo-tmi diff --git a/docs/ymir-executor-version.md b/docs/ymir-executor-version.md index 247ee13..1c1c30f 100644 --- a/docs/ymir-executor-version.md +++ b/docs/ymir-executor-version.md @@ -4,13 +4,13 @@ - 训练镜像需要指定数据集标注格式, ymir1.1.0默认标注格式为`ark:raw` -- 训练镜像可以获得系统的ymir版本,方便镜像做兼容 +- 训练镜像可以获得系统的ymir接口版本,方便镜像兼容 ## 辅助库 - [ymir-executor-sdk](https://github.com/modelai/ymir-executor-sdk) 采用ymir1.3.0分支 -- [ymir-executor-verifier]() 镜像检查工具 +- [ymir-executor-verifier](https://github.com/modelai/ymir-executor-verifier) 镜像检查工具 # ymir1.1.0 From 81a20197f158a63335dc8c5ddf90f17f7357002f Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 13 Oct 2022 16:53:07 +0800 Subject: [PATCH 138/204] update doc --- det-demo-tmi/README.md | 5 +++-- docs/det-demo-tmi.md | 1 - 2 files changed, 3 insertions(+), 3 deletions(-) delete mode 100644 docs/det-demo-tmi.md diff --git a/det-demo-tmi/README.md b/det-demo-tmi/README.md index 4d08199..c299328 100644 --- a/det-demo-tmi/README.md +++ b/det-demo-tmi/README.md @@ -46,7 +46,8 @@ voc_dog 3. 配置 `/in/env.yaml` 与 `/in/config.yaml` * 示例 `voc_dog/in/env.yaml` - protocol_version: ymir1.3.0之后添加的字段,说明ymir接口版本 + + * protocol_version: ymir1.3.0之后添加的字段,说明ymir接口版本 ``` task_id: task0 @@ -260,7 +261,7 @@ for idx, line in enumerate(lines): 可以使用以下几种方式进行测试: -1. 通过 `ymir-executor-verifier` 进行测试 +1. 通过 [ymir-executor-verifier](https://github.com/modelai/ymir-executor-verifier) 进行测试 2. 通过 ymir web 系统进行测试 diff --git a/docs/det-demo-tmi.md b/docs/det-demo-tmi.md deleted file mode 100644 index b7469a6..0000000 --- a/docs/det-demo-tmi.md +++ /dev/null @@ -1 +0,0 @@ -# det-demo-tmi From 424cd4e8e0d9d5faa48ba82fbb2f4f86f62e1778 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 17 Oct 2022 15:13:30 +0800 Subject: [PATCH 139/204] add verifier to doc, add ymir_saved_file_patterns to training-template.yaml --- README.MD | 6 +++++- README_zh-CN.MD | 4 ++++ det-demo-tmi/README.md | 14 ++++++++++---- det-mmdetection-tmi/training-template.yaml | 3 ++- det-yolov5-tmi/mining/ymir_infer.py | 2 +- det-yolov5-tmi/start.py | 16 ++++++---------- det-yolov5-tmi/train.py | 22 +++++++++++++++++----- det-yolov5-tmi/training-template.yaml | 6 ++++-- 8 files changed, 49 insertions(+), 24 deletions(-) diff --git a/README.MD b/README.MD index e326aeb..823419c 100644 --- a/README.MD +++ b/README.MD @@ -85,9 +85,13 @@ docker build -t ymir-executor/mmdet:cu111-tmi -f docker/Dockerfile.cuda111 . ## how to custom ymir-executor +- [demo ymir-executor](det-demo-tmi/README.md) from zero to one, build you ymir-executor + - [custom ymir-executor](https://github.com/IndustryEssentials/ymir/blob/dev/dev_docs/ymir-dataset-zh-CN.md) -- [ymir-executor-sdk](https://github.com/modelai/ymir-executor-sdk) +- [ymir-executor-sdk](https://github.com/modelai/ymir-executor-sdk) ymir-executor development SDK. + +- [ymir-executor-verifer](https://github.com/modelai/ymir-executor-verifier) debug and check your ymir-executor ## how to import pretrained model weights diff --git a/README_zh-CN.MD b/README_zh-CN.MD index 3ca0c44..e1d9960 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -101,10 +101,14 @@ docker build -t ymir-executor/live-code:mxnet-tmi -f mxnet.dockerfile ## 如何制作自己的ymir-executor +- [示例 ymir-executor](det-demo-tmi/README.md) 从零到一,搭建自己的 ymir-executor + - [ymir-executor 制作指南](https://github.com/IndustryEssentials/ymir/blob/dev/dev_docs/ymir-dataset-zh-CN.md) - [ymir-executor-sdk](https://github.com/modelai/ymir-executor-sdk) ymir镜像开发辅助库 +- [ymir-executor-verifer](https://github.com/modelai/ymir-executor-verifier) 调试与检测 ymir-executor + ## 如何导入预训练模型 - [如何导入并精调外部模型](https://github.com/modelai/ymir-executor-fork/wiki/import-and-finetune-model) diff --git a/det-demo-tmi/README.md b/det-demo-tmi/README.md index c299328..715eb47 100644 --- a/det-demo-tmi/README.md +++ b/det-demo-tmi/README.md @@ -127,16 +127,18 @@ ymir 通过 mir train / mir mining / mir infer 命令启动镜像,遵循以下 5. ymir 将正确结果或异常结果归档,完成整个过程 -## 训练、挖掘与推理镜像的通用部分开发 +## 训练、挖掘与推理镜像的开发工具包 ymir_exc -app/start.py 展示了一个简单的镜像执行部分,此文档也将基于这个样例工程来说明如何使用框架来开发镜像。 +`app/start.py` 展示了一个简单的镜像执行部分,此文档也将基于这个样例工程来说明如何使用`ymir_exc`来开发镜像。 关于这个文件,有以下部分值得注意: -1. 在 Dockerfile 中,最后一条命令说明了:当此镜像被 ymir 系统通过 nvidia-docker run 启动时,默认执行的是 `python /app/start.py` 命令,也就是此工程中的 `app/start.py` 文件 +1. 在 Dockerfile 中,最后一条命令说明了:当此镜像被 ymir 系统通过 nvidia-docker run 启动时,默认执行的是 `bash /usr/bin/start.sh`, 即调用 `python /app/start.py` 命令,也就是此工程中的 `app/start.py` 文件 2. 镜像框架相关的所有内容都在 `ymir_exc` 包中,包括以下部分: + 安装方式 `pip install "git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.3.0"`, 注意通过 `pip install ymir_exc` 的方式安装的版本不具有 `ymir_exc.util` 包。 + * `env`:环境,提供任务类型,任务 id 等信息 * `dataset_reader`:使用数据集读取器来取得数据集信息 @@ -151,7 +153,11 @@ app/start.py 展示了一个简单的镜像执行部分,此文档也将基于 4. 在 `start()` 方法中,通过 `cfg.ymir` 中的 `run_training` / `run_mining` / `run_infer` 来判断本次需要执行的任务类型。如果任务类型是本镜像不支持的,可以直接报错 -5. 虽然 `app/start.py` 展示的是一个训练,挖掘和推理多合一的镜像,开发者也可以分成若干个独立的镜像,例如,训练一个,挖掘和推理合成一个 +5. 虽然 `app/start.py` 展示的是一个训练,挖掘和推理多合一的镜像,开发者也可以分成若干个独立的镜像,例如,训练一个,挖掘和推理合成一个。实际应用中,镜像可以同时运行推理和挖掘这两个任务,注意其进度与单独运行时不同。 + + * 单独运行时,推理或者挖掘的进度值 `percent` 在 [0, 1] 区间,并通过 `monitor.write_monitor_logger(percent)` 记录在 `/out/monitor.txt` 中。 + + * 同时运行时, 假设先进行挖掘任务, 那么挖掘的进度值在 [0, 0.5] 区间,推理的进度度值在 [0.5, 1] 区间。 ## 训练过程 diff --git a/det-mmdetection-tmi/training-template.yaml b/det-mmdetection-tmi/training-template.yaml index 902f435..c3f3e7d 100644 --- a/det-mmdetection-tmi/training-template.yaml +++ b/det-mmdetection-tmi/training-template.yaml @@ -7,6 +7,7 @@ config_file: 'configs/yolox/yolox_tiny_8x8_300e_coco.py' args_options: '' cfg_options: '' metric: 'bbox' -val_interval: 1 # <0 means evaluation every interval +val_interval: 1 # <0 means evaluation every interval max_keep_checkpoints: 1 # <0 means save all weight file, 1 means save last and best weight files, k means save topk best weight files and topk epoch/step weigth files port: 12345 +ymir_saved_file_patterns: '' # custom saved files, support python regular expression, use , to split multiple pattern diff --git a/det-yolov5-tmi/mining/ymir_infer.py b/det-yolov5-tmi/mining/ymir_infer.py index 61d305f..6b94381 100644 --- a/det-yolov5-tmi/mining/ymir_infer.py +++ b/det-yolov5-tmi/mining/ymir_infer.py @@ -27,7 +27,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): # eg: gpu_id = 1,3,5,7 for LOCAL_RANK = 2, will use gpu 5. - gpu = int(ymir_yolov5.gpu_id.split(',')[LOCAL_RANK]) + gpu = max(0, LOCAL_RANK) device = torch.device('cuda', gpu) ymir_yolov5.to(device) diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py index c250745..03ce300 100644 --- a/det-yolov5-tmi/start.py +++ b/det-yolov5-tmi/start.py @@ -8,8 +8,7 @@ from ymir_exc import dataset_reader as dr from ymir_exc import env, monitor from ymir_exc import result_writer as rw -from ymir_exc.util import (YmirStage, find_free_port, get_bool, get_merged_config, get_ymir_process, - write_ymir_training_result) +from ymir_exc.util import (YmirStage, find_free_port, get_bool, get_merged_config, get_ymir_process) from models.experimental import attempt_download from utils.ymir_yolov5 import YmirYolov5, convert_ymir_to_yolov5, get_weight_file @@ -58,7 +57,8 @@ def _run_training(cfg: edict) -> None: num_workers_per_gpu: int = int(cfg.param.get('num_workers_per_gpu', 8)) model: str = cfg.param.model img_size: int = int(cfg.param.img_size) - save_period: int = max(1, min(epochs // 10, int(cfg.param.save_period))) + save_period: int = int(cfg.param.save_period) + save_best_only: bool = get_bool(cfg, key='save_best_only', default_value=True) args_options: str = cfg.param.args_options gpu_id: str = str(cfg.param.get('gpu_id', '0')) gpu_count: int = len(gpu_id.split(',')) if gpu_id else 0 @@ -91,6 +91,9 @@ def _run_training(cfg: edict) -> None: '--workers', str(num_workers_per_gpu) ]) + if save_best_only: + commands.append("--nosave") + if gpu_count > 1 and sync_bn: commands.append("--sync-bn") @@ -102,13 +105,6 @@ def _run_training(cfg: edict) -> None: subprocess.run(commands, check=True) monitor.write_monitor_logger(percent=get_ymir_process(stage=YmirStage.TASK, p=1.0)) - # 3. convert to onnx and save model weight to design directory - opset = cfg.param.opset - command = f'python3 export.py --weights {models_dir}/best.pt --opset {opset} --include onnx' - logging.info(f'export onnx weight: {command}') - subprocess.run(command.split(), check=True) - - write_ymir_training_result(cfg, map50=0, files=[], id='last') # if task done, write 100% percent log monitor.write_monitor_logger(percent=1.0) diff --git a/det-yolov5-tmi/train.py b/det-yolov5-tmi/train.py index 6b5e8ee..54fd2e8 100644 --- a/det-yolov5-tmi/train.py +++ b/det-yolov5-tmi/train.py @@ -21,6 +21,7 @@ from copy import deepcopy from datetime import datetime from pathlib import Path +import subprocess import numpy as np import torch @@ -402,7 +403,7 @@ def lf(x): return (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) # Save model - if (not nosave) or (final_epoch and not evolve): # if save + if (not nosave) or (best_fitness == fi) or (final_epoch and not evolve): # if save ckpt = {'epoch': epoch, 'best_fitness': best_fitness, 'model': deepcopy(de_parallel(model)).half(), @@ -416,7 +417,8 @@ def lf(x): return (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear torch.save(ckpt, last) if best_fitness == fi: torch.save(ckpt, best) - if (epoch > 0) and (opt.save_period > 0) and (epoch % opt.save_period == 0): + write_ymir_training_result(ymir_cfg, map50=best_fitness, id='best', files=[str(best)]) + if (not nosave) and (epoch > 0) and (opt.save_period > 0) and (epoch % opt.save_period == 0): torch.save(ckpt, w / f'epoch{epoch}.pt') weight_file = str(w / f'epoch{epoch}.pt') write_ymir_training_result(ymir_cfg, map50=results[2], id=f'epoch_{epoch}', files=[weight_file]) @@ -465,10 +467,20 @@ def lf(x): return (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear callbacks.run('on_train_end', last, best, plots, epoch, results) LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") + opset = ymir_cfg.param.opset + onnx_file: Path = best.with_suffix('.onnx') + command = f'python3 export.py --weights {best} --opset {opset} --include onnx' + LOGGER.info(f'export onnx weight: {command}') + subprocess.run(command.split(), check=True) + + if nosave: + # save best.pt and best.onnx + write_ymir_training_result(ymir_cfg, map50=best_fitness, id='best', files=[str(best), str(onnx_file)]) + else: + # set files = [] to save all files in /out/models + write_ymir_training_result(ymir_cfg, map50=best_fitness, id='best', files=[]) + torch.cuda.empty_cache() - # save the best and last weight file with other files in models_dir - if RANK in [-1, 0]: - write_ymir_training_result(ymir_cfg, map50=best_fitness, id=f'epoch_{epochs}', files=[]) return results diff --git a/det-yolov5-tmi/training-template.yaml b/det-yolov5-tmi/training-template.yaml index 4bd27b5..daaf476 100644 --- a/det-yolov5-tmi/training-template.yaml +++ b/det-yolov5-tmi/training-template.yaml @@ -12,9 +12,11 @@ export_format: 'ark:raw' model: 'yolov5s' batch_size_per_gpu: 16 num_workers_per_gpu: 8 -epochs: 300 +epochs: 100 img_size: 640 opset: 11 args_options: '--exist-ok' +save_best_only: True # save the best weight file only save_period: 10 -sync_bn: False # work for multi-gpu only +sync_bn: False # work for multi-gpu only +ymir_saved_file_patterns: '' # custom saved files, support python regular expression, use , to split multiple pattern From c0d64ba38a0c1eb7142b1b02adc8df27dc7dc4a1 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 18 Oct 2022 18:03:59 +0800 Subject: [PATCH 140/204] add random and aldd mining algorithm --- det-mmdetection-tmi/docker/Dockerfile.cuda102 | 2 +- det-mmdetection-tmi/docker/Dockerfile.cuda111 | 2 +- det-mmdetection-tmi/mining-template.yaml | 2 + det-mmdetection-tmi/mining_base.py | 137 ++++++++++++++++++ det-mmdetection-tmi/mmdet/utils/util_ymir.py | 10 +- det-mmdetection-tmi/start.py | 8 +- det-mmdetection-tmi/training-template.yaml | 5 +- det-mmdetection-tmi/ymir_mining_aldd.py | 58 ++++++++ .../{ymir_mining.py => ymir_mining_cald.py} | 26 ++-- det-mmdetection-tmi/ymir_mining_random.py | 85 +++++++++++ det-yolov5-tmi/start.py | 9 +- 11 files changed, 317 insertions(+), 27 deletions(-) create mode 100644 det-mmdetection-tmi/mining_base.py create mode 100644 det-mmdetection-tmi/ymir_mining_aldd.py rename det-mmdetection-tmi/{ymir_mining.py => ymir_mining_cald.py} (98%) create mode 100644 det-mmdetection-tmi/ymir_mining_random.py diff --git a/det-mmdetection-tmi/docker/Dockerfile.cuda102 b/det-mmdetection-tmi/docker/Dockerfile.cuda102 index 6d07aa6..2fd8643 100644 --- a/det-mmdetection-tmi/docker/Dockerfile.cuda102 +++ b/det-mmdetection-tmi/docker/Dockerfile.cuda102 @@ -28,7 +28,7 @@ RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC \ # Install ymir-exc sdk and MMCV (no cu102/torch1.8.1, use torch1.8.0 instead) RUN pip install --no-cache-dir --upgrade pip wheel setuptools \ && pip install --no-cache-dir mmcv-full==${MMCV} -f https://download.openmmlab.com/mmcv/dist/cu102/torch1.8.0/index.html \ - && pip install "git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.0.0" \ + && pip install "git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.3.0" \ && conda clean --all # Install det-mmdetection-tmi diff --git a/det-mmdetection-tmi/docker/Dockerfile.cuda111 b/det-mmdetection-tmi/docker/Dockerfile.cuda111 index c811c85..2306105 100644 --- a/det-mmdetection-tmi/docker/Dockerfile.cuda111 +++ b/det-mmdetection-tmi/docker/Dockerfile.cuda111 @@ -26,7 +26,7 @@ RUN apt-get update && apt-get install -y build-essential ffmpeg libsm6 libxext6 # Install ymir-exc sdk and MMCV RUN pip install --no-cache-dir --upgrade pip wheel setuptools \ && pip install --no-cache-dir mmcv-full==${MMCV} -f https://download.openmmlab.com/mmcv/dist/cu111/torch1.8.0/index.html \ - && pip install "git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.0.0" \ + && pip install "git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.3.0" \ && conda clean --all # Install det-mmdetection-tmi diff --git a/det-mmdetection-tmi/mining-template.yaml b/det-mmdetection-tmi/mining-template.yaml index 5649a3c..ed97f01 100644 --- a/det-mmdetection-tmi/mining-template.yaml +++ b/det-mmdetection-tmi/mining-template.yaml @@ -1,3 +1,5 @@ shm_size: '32G' export_format: 'ark:raw' cfg_options: '' +mining_algorithm: cald +class_distribution_scores: '' # 1.0,1.0,0.1,0.2 diff --git a/det-mmdetection-tmi/mining_base.py b/det-mmdetection-tmi/mining_base.py new file mode 100644 index 0000000..5922955 --- /dev/null +++ b/det-mmdetection-tmi/mining_base.py @@ -0,0 +1,137 @@ +import warnings +from typing import List + +import torch +import torch.nn.functional as F +from easydict import EasyDict as edict + + +def binary_classification_entropy(p: torch.Tensor) -> torch.Tensor: + """ + p: BCHW, the feature map after sigmoid, range in (0,1) + F.bce(x,y) = -(y * logx + (1-y) * log(1-x)) + """ + # return -(p * torch.log(p) + (1 - p) * torch.log(1 - p)) + return F.binary_cross_entropy(p, p, reduction='none') + + +def multiple_classification_entropy(p: torch.Tensor, activation: str) -> torch.Tensor: + """ + p: BCHW + + yolov5: sigmoid + nanodet: sigmoid + """ + assert activation in ['sigmoid', 'softmax'], f'classification type = {activation}, not in sigmoid, softmax' + + if activation == 'sigmoid': + entropy = F.binary_cross_entropy(p, p, reduction='none') + sum_entropy = torch.sum(entropy, dim=1, keepdim=True) + return sum_entropy + else: + # for origin aldd code, use tf.log(p + 1e-12) + entropy = -(p) * torch.log(p + 1e-7) + sum_entropy = torch.sum(entropy, dim=1, keepdim=True) + return sum_entropy + + +class FeatureMapBasedMining(object): + + def __init__(self, ymir_cfg: edict): + self.ymir_cfg = ymir_cfg + + def mining(self, feature_maps: List[torch.Tensor]) -> torch.Tensor: + raise Exception('not implement') + + +class ALDDMining(FeatureMapBasedMining): + """ + Active Learning for Deep Detection Neural Networks (ICCV 2019) + official code: https://gitlab.com/haghdam/deep_active_learning + + change from tensorflow code to pytorch code + 1. average pooling changed, pad or not? symmetrical pad or not? + 2. max pooling changed, ceil or not? + 3. the resize shape for aggregate feature map + + those small change cause 20%-40% difference for P@N, N=100 for total 1000 images. + P@5: 0.2 + P@10: 0.3 + P@20: 0.35 + P@50: 0.5 + P@100: 0.59 + P@200: 0.73 + P@500: 0.848 + """ + + def __init__(self, ymir_cfg: edict, resize_shape: List[int]): + super().__init__(ymir_cfg) + self.resize_shape = resize_shape + self.max_pool_size = 32 + self.avg_pool_size = 9 + self.align_corners = False + self.num_classes = len(ymir_cfg.param.class_names) + + def extract_conf(self, feature_maps: List[torch.Tensor], format='yolov5') -> List[torch.Tensor]: + """ + extract confidence feature map before sigmoid. + """ + if format == 'yolov5': + # feature_maps: [bs, 3, height, width, xywh + conf + num_classes] + return [f[:, :, :, :, 4] for f in feature_maps] + else: + warnings.warn(f'unknown feature map format {format}') + + return feature_maps + + def mining(self, feature_maps: List[torch.Tensor]) -> torch.Tensor: + """ + feature_maps: [BCHW] + 1. resizing followed by sigmoid + 2. get mining score + """ + # fmap = [Batch size, anchor number = 3, height, width, 5 + class_number] + + list_tmp = [] + for fmap in feature_maps: + resized_fmap = F.interpolate(fmap, self.resize_shape, mode='bilinear', align_corners=self.align_corners) + list_tmp.append(resized_fmap) + conf = torch.cat(list_tmp, dim=1).sigmoid() + scores = self.get_mining_score(conf) + return scores + + def get_mining_score(self, confidence_feature_map: torch.Tensor) -> torch.Tensor: + """ + confidence_feature_map: BCHW, value in (0, 1) + 1. A=sum(avg(entropy(fmap))) B,1,H,W + 2. B=sum(entropy(avg(fmap))) B,1,H,W + 3. C=max(B-A) B,1,h,w + 4. mean(C) B + """ + avg_entropy = F.avg_pool2d(self.get_entropy(confidence_feature_map), + kernel_size=self.avg_pool_size, + stride=1, + padding=0) + sum_avg_entropy = torch.sum(avg_entropy, dim=1, keepdim=True) + + entropy_avg = self.get_entropy( + F.avg_pool2d(confidence_feature_map, kernel_size=self.avg_pool_size, stride=1, padding=0)) + sum_entropy_avg = torch.sum(entropy_avg, dim=1, keepdim=True) + + uncertainty = sum_entropy_avg - sum_avg_entropy + + max_uncertainty = F.max_pool2d(uncertainty, + kernel_size=self.max_pool_size, + stride=self.max_pool_size, + padding=0, + ceil_mode=False) + + return torch.mean(max_uncertainty, dim=(1, 2, 3)) + + def get_entropy(self, feature_map: torch.Tensor) -> torch.Tensor: + if self.num_classes == 1: + # binary cross entropy + return binary_classification_entropy(feature_map) + else: + # multi-class cross entropy + return multiple_classification_entropy(feature_map, activation='sigmoid') diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index 515c22a..24ef6e9 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -236,7 +236,7 @@ def _write_latest_ymir_training_result(last: bool = False, key_score: Optional[f raise Exception(f'please set valid environment variable YMIR_MODELS_DIR, invalid directory {WORK_DIR}') # assert only one model config file in work_dir - result_files = [osp.basename(f) for f in glob.glob(osp.join(WORK_DIR, '*')) if osp.basename(f) != 'result.yaml'] + result_files = [f for f in glob.glob(osp.join(WORK_DIR, '*')) if osp.basename(f) != 'result.yaml'] if last: # save all output file @@ -245,8 +245,11 @@ def _write_latest_ymir_training_result(last: bool = False, key_score: Optional[f if max_keep_checkpoints > 0: topk_checkpoints = get_topk_checkpoints(result_files, max_keep_checkpoints) result_files = [f for f in result_files if not f.endswith(('.pth', '.pt'))] + topk_checkpoints + + result_files = [osp.basename(f) for f in result_files] rw.write_model_stage(files=result_files, mAP=float(map), stage_name='last') else: + result_files = [osp.basename(f) for f in result_files] # save newest weight file in format epoch_xxx.pth or iter_xxx.pth weight_files = [ osp.join(WORK_DIR, f) for f in result_files if f.startswith(('iter_', 'epoch_')) and f.endswith('.pth') @@ -285,13 +288,16 @@ def _write_ancient_ymir_training_result(key_score: Optional[float] = None): WORK_DIR = ymir_cfg.ymir.output.models_dir # assert only one model config file in work_dir - result_files = [osp.basename(f) for f in glob.glob(osp.join(WORK_DIR, '*')) if osp.basename(f) != 'result.yaml'] + result_files = [f for f in glob.glob(osp.join(WORK_DIR, '*')) if osp.basename(f) != 'result.yaml'] max_keep_checkpoints = int(ymir_cfg.param.get('max_keep_checkpoints', 1)) if max_keep_checkpoints > 0: topk_checkpoints = get_topk_checkpoints(result_files, max_keep_checkpoints) result_files = [f for f in result_files if not f.endswith(('.pth', '.pt'))] + topk_checkpoints + # convert to basename + result_files = [osp.basename(f) for f in result_files] + training_result_file = osp.join(WORK_DIR, 'result.yaml') if osp.exists(training_result_file): with open(training_result_file, 'r') as f: diff --git a/det-mmdetection-tmi/start.py b/det-mmdetection-tmi/start.py index b570b2d..220d373 100644 --- a/det-mmdetection-tmi/start.py +++ b/det-mmdetection-tmi/start.py @@ -37,11 +37,15 @@ def _run_training() -> None: def _run_mining(cfg: edict) -> None: gpu_id: str = str(cfg.param.get('gpu_id', '0')) gpu_count = len(gpu_id.split(',')) + mining_algorithm: str = cfg.param.get('mining_algorithm', 'aldd') + + supported_mining_algorithm = ['cald', 'aldd', 'random'] + assert mining_algorithm in supported_mining_algorithm, f'unknown mining_algorithm {mining_algorithm}, not in {supported_mining_algorithm}' if gpu_count <= 1: - command = 'python3 ymir_mining.py' + command = f'python3 ymir_mining_{mining_algorithm}.py' else: port = find_free_port() - command = f'python3 -m torch.distributed.launch --nproc_per_node {gpu_count} --master_port {port} ymir_mining.py' # noqa + command = f'python3 -m torch.distributed.launch --nproc_per_node {gpu_count} --master_port {port} ymir_mining_{mining_algorithm}.py' # noqa logging.info(f'start mining: {command}') subprocess.run(command.split(), check=True) diff --git a/det-mmdetection-tmi/training-template.yaml b/det-mmdetection-tmi/training-template.yaml index c3f3e7d..f04e51a 100644 --- a/det-mmdetection-tmi/training-template.yaml +++ b/det-mmdetection-tmi/training-template.yaml @@ -1,13 +1,12 @@ shm_size: '32G' export_format: 'ark:raw' -samples_per_gpu: 16 +samples_per_gpu: 16 # batch size per gpu workers_per_gpu: 8 -max_epochs: 300 +max_epochs: 100 config_file: 'configs/yolox/yolox_tiny_8x8_300e_coco.py' args_options: '' cfg_options: '' metric: 'bbox' val_interval: 1 # <0 means evaluation every interval max_keep_checkpoints: 1 # <0 means save all weight file, 1 means save last and best weight files, k means save topk best weight files and topk epoch/step weigth files -port: 12345 ymir_saved_file_patterns: '' # custom saved files, support python regular expression, use , to split multiple pattern diff --git a/det-mmdetection-tmi/ymir_mining_aldd.py b/det-mmdetection-tmi/ymir_mining_aldd.py new file mode 100644 index 0000000..4115d09 --- /dev/null +++ b/det-mmdetection-tmi/ymir_mining_aldd.py @@ -0,0 +1,58 @@ +import sys + +from easydict import EasyDict as edict +from mmcv.parallel import collate, scatter +from mmdet.datasets import replace_ImageToTensor +from mmdet.datasets.pipelines import Compose +from mmdet.models.detectors import SingleStageDetector, TwoStageDetector +from ymir_exc.util import get_merged_config + +from .mining_base import ALDDMining +from .ymir_infer import YmirModel +from .ymir_mining_random import RandomMiner + + +class ALDDMiner(RandomMiner): + + def __init__(self, cfg: edict): + super().__init__(cfg) + self.ymir_model = YmirModel(cfg) + mmdet_cfg = self.ymir_model.cfg + mmdet_cfg.data.test.pipeline = replace_ImageToTensor(mmdet_cfg.data.test.pipeline) + self.test_pipeline = Compose(cfg.data.test.pipeline) + self.aldd_miner = ALDDMining(cfg, [640, 640]) + + def compute_score(self, asset_path: str) -> int: + dict_data = dict(img_info=dict(filename=asset_path), img_prefix=None) + pipeline_data = self.test_pipeline(dict_data) + data = collate([pipeline_data], samples_per_gpu=1) + # just get the actual data from DataContainer + data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']] + data['img'] = [img.data[0] for img in data['img']] + # scatter to specified GPU + data = scatter(data, [self.device])[0] + + if isinstance(self.ymir_model.model, SingleStageDetector): + cls_score, bbox_pred = self.ymir_model.model.forward_dummy(data['img']) + mining_score = self.aldd_miner(bbox_pred) + + return mining_score + elif isinstance(self.ymir_model.model, TwoStageDetector): + # (rpn_outs, roi_outs) + # outs = self.ymir_model.model.forward_dummy(img) + raise NotImplementedError('aldd mining is currently not currently supported TwoStageDetector {}'.format( + self.ymir_model.model.__class__.__name__)) + else: + raise NotImplementedError('aldd mining is currently not currently supported with {}'.format( + self.ymir_model.model.__class__.__name__)) + + +def main(): + cfg = get_merged_config() + miner = ALDDMiner(cfg) + miner.mining() + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/det-mmdetection-tmi/ymir_mining.py b/det-mmdetection-tmi/ymir_mining_cald.py similarity index 98% rename from det-mmdetection-tmi/ymir_mining.py rename to det-mmdetection-tmi/ymir_mining_cald.py index 506506d..fe437ff 100644 --- a/det-mmdetection-tmi/ymir_mining.py +++ b/det-mmdetection-tmi/ymir_mining_cald.py @@ -251,6 +251,7 @@ def split_result(result: NDArray) -> Tuple[BBOX, NDArray, NDArray]: class YmirMining(YmirModel): + def __init__(self, cfg: edict): super().__init__(cfg) if cfg.ymir.run_mining and cfg.ymir.run_infer: @@ -267,6 +268,8 @@ def __init__(self, cfg: edict): def mining(self): with open(self.cfg.ymir.input.candidate_index_file, 'r') as f: images = [line.strip() for line in f.readlines()] + + max_barrier_times = len(images) // WORLD_SIZE if RANK == -1: N = len(images) tbar = tqdm(images) @@ -282,9 +285,15 @@ def mining(self): idx = -1 beta = 1.3 mining_result = [] - for asset_path in tbar: + for idx, asset_path in enumerate(tbar): + if idx % monitor_gap == 0: + percent = get_ymir_process(stage=YmirStage.TASK, + p=idx / N, + task_idx=self.task_idx, + task_num=self.task_num) + monitor.write_monitor_logger(percent=percent) # batch-level sync, avoid 30min time-out error - if LOCAL_RANK != -1: + if WORLD_SIZE > 1 and idx < max_barrier_times: dist.barrier() img = cv2.imread(asset_path) @@ -332,16 +341,8 @@ def mining(self): consistency /= len(aug_results_dict) mining_result.append((asset_path, consistency)) - idx += 1 - - if idx % monitor_gap == 0: - percent = get_ymir_process(stage=YmirStage.TASK, - p=idx / N, - task_idx=self.task_idx, - task_num=self.task_num) - monitor.write_monitor_logger(percent=percent) - if RANK != -1: + if WORLD_SIZE > 1: mining_result = collect_results_gpu(mining_result, len(images)) return mining_result @@ -393,8 +394,7 @@ def main(): cfg = get_merged_config() miner = YmirMining(cfg) - gpu_id: str = str(cfg.param.get('gpu_id', '0')) - gpu = int(gpu_id.split(',')[LOCAL_RANK]) + gpu = max(0, LOCAL_RANK) device = torch.device('cuda', gpu) miner.model.to(device) mining_result = miner.mining() diff --git a/det-mmdetection-tmi/ymir_mining_random.py b/det-mmdetection-tmi/ymir_mining_random.py new file mode 100644 index 0000000..097c000 --- /dev/null +++ b/det-mmdetection-tmi/ymir_mining_random.py @@ -0,0 +1,85 @@ +import os +import random +import sys + +import torch.distributed as dist +from easydict import EasyDict as edict +from mmcv.runner import init_dist +from mmdet.apis.test import collect_results_gpu +from tqdm import tqdm +from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, get_merged_config, write_ymir_monitor_process + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + + +class RandomMiner(object): + + def __init__(self, cfg: edict): + if LOCAL_RANK != -1: + init_dist(launcher='pytorch', backend="nccl" if dist.is_nccl_available() else "gloo") + + self.cfg = cfg + gpu = max(0, LOCAL_RANK) + self.device = f'cuda:{gpu}' + + def mining(self): + with open(self.cfg.ymir.input.candidate_index_file, 'r') as f: + images = [line.strip() for line in f.readlines()] + + max_barrier_times = len(images) // WORLD_SIZE + if RANK == -1: + N = len(images) + tbar = tqdm(images) + else: + images_rank = images[RANK::WORLD_SIZE] + N = len(images_rank) + if RANK == 0: + tbar = tqdm(images_rank) + else: + tbar = images_rank + + monitor_gap = max(1, N // 100) + + mining_result = [] + for idx, asset_path in enumerate(tbar): + if idx % monitor_gap == 0: + write_ymir_monitor_process(cfg=self.cfg, + task='mining', + naive_stage_percent=idx / N, + stage=YmirStage.TASK, + task_order='tmi') + + if WORLD_SIZE > 1 and idx < max_barrier_times: + dist.barrier() + + consistency = self.compute_score(asset_path=asset_path) + mining_result.append((asset_path, consistency)) + + if WORLD_SIZE > 1: + mining_result = collect_results_gpu(mining_result, len(images)) + + if RANK in [0, -1]: + rw.write_mining_result(mining_result=mining_result) + write_ymir_monitor_process(cfg=self.cfg, + task='mining', + naive_stage_percent=1, + stage=YmirStage.POSTPROCESS, + task_order='tmi') + return mining_result + + def compute_score(self, asset_path: str) -> float: + return random.random() + + +def main(): + cfg = get_merged_config() + miner = RandomMiner(cfg) + miner.mining() + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py index 03ce300..3c8f483 100644 --- a/det-yolov5-tmi/start.py +++ b/det-yolov5-tmi/start.py @@ -5,13 +5,12 @@ import cv2 from easydict import EasyDict as edict +from models.experimental import attempt_download +from utils.ymir_yolov5 import YmirYolov5, convert_ymir_to_yolov5, get_weight_file from ymir_exc import dataset_reader as dr from ymir_exc import env, monitor from ymir_exc import result_writer as rw -from ymir_exc.util import (YmirStage, find_free_port, get_bool, get_merged_config, get_ymir_process) - -from models.experimental import attempt_download -from utils.ymir_yolov5 import YmirYolov5, convert_ymir_to_yolov5, get_weight_file +from ymir_exc.util import YmirStage, find_free_port, get_bool, get_merged_config, get_ymir_process def start(cfg: edict) -> int: @@ -63,7 +62,7 @@ def _run_training(cfg: edict) -> None: gpu_id: str = str(cfg.param.get('gpu_id', '0')) gpu_count: int = len(gpu_id.split(',')) if gpu_id else 0 batch_size: int = batch_size_per_gpu * max(1, gpu_count) - port: int = int(cfg.param.get('port', 29500)) + port: int = find_free_port() sync_bn: bool = get_bool(cfg, key='sync_bn', default_value=False) weights = get_weight_file(cfg) From 3230af1e72088444098ca06b48285a49418bfc6c Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 19 Oct 2022 12:09:19 +0800 Subject: [PATCH 141/204] fix training class_number bug --- det-mmdetection-tmi/README.md | 6 ++- det-mmdetection-tmi/mining_base.py | 2 +- .../mmdet/core/evaluation/eval_hooks.py | 16 +++--- det-mmdetection-tmi/mmdet/utils/util_ymir.py | 22 ++++---- det-mmdetection-tmi/ymir_mining_aldd.py | 54 ++++++++++++------- det-mmdetection-tmi/ymir_mining_random.py | 4 +- 6 files changed, 63 insertions(+), 41 deletions(-) diff --git a/det-mmdetection-tmi/README.md b/det-mmdetection-tmi/README.md index 5c1934d..8795930 100644 --- a/det-mmdetection-tmi/README.md +++ b/det-mmdetection-tmi/README.md @@ -7,11 +7,12 @@ # build docker image ``` -docker build -t ymir-executor/mmdet:cuda102-tmi --build-arg SERVER_MODE=dev --build-arg YMIR=1.1.0 -f docker/Dockerfile.cuda102 . +docker build -t ymir-executor/mmdet:cuda102-tmi --build-arg YMIR=1.1.0 -f docker/Dockerfile.cuda102 . -docker build -t ymir-executor/mmdet:cuda111-tmi --build-arg SERVER_MODE=dev --build-arg YMIR=1.1.0 -f docker/Dockerfile.cuda111 . +docker build -t ymir-executor/mmdet:cuda111-tmi --build-arg YMIR=1.1.0 -f docker/Dockerfile.cuda111 . ``` + # changelog - modify `mmdet/datasets/coco.py`, save the evaluation result to `os.environ.get('COCO_EVAL_TMP_FILE')` with json format - modify `mmdet/core/evaluation/eval_hooks.py`, write training result file and monitor task process @@ -29,3 +30,4 @@ docker build -t ymir-executor/mmdet:cuda111-tmi --build-arg SERVER_MODE=dev --bu --- - 2022/09/06: set `find_unused_parameters = True`, fix DDP bug +- 2022/10/18: add `random` and `aldd` mining algorithm. `aldd` algorithm support yolox only. diff --git a/det-mmdetection-tmi/mining_base.py b/det-mmdetection-tmi/mining_base.py index 5922955..27ba2f9 100644 --- a/det-mmdetection-tmi/mining_base.py +++ b/det-mmdetection-tmi/mining_base.py @@ -85,7 +85,7 @@ def extract_conf(self, feature_maps: List[torch.Tensor], format='yolov5') -> Lis return feature_maps def mining(self, feature_maps: List[torch.Tensor]) -> torch.Tensor: - """ + """mining for feature maps feature_maps: [BCHW] 1. resizing followed by sigmoid 2. get mining score diff --git a/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py b/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py index b2e7dff..81a36bb 100644 --- a/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py +++ b/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py @@ -49,10 +49,10 @@ def before_train_epoch(self, runner): def after_train_epoch(self, runner): """Report the training process for ymir""" if self.by_epoch: - monitor_interval = max(1, runner.max_epochs//1000) + monitor_interval = max(1, runner.max_epochs // 1000) if runner.epoch % monitor_interval == 0: percent = get_ymir_process( - stage=YmirStage.TASK, p=runner.epoch/runner.max_epochs) + stage=YmirStage.TASK, p=runner.epoch / runner.max_epochs) monitor.write_monitor_logger(percent=percent) super().after_train_epoch(runner) @@ -62,10 +62,10 @@ def before_train_iter(self, runner): def after_train_iter(self, runner): if not self.by_epoch: - monitor_interval = max(1, runner.max_iters//1000) + monitor_interval = max(1, runner.max_iters // 1000) if runner.iter % monitor_interval == 0: percent = get_ymir_process( - stage=YmirStage.TASK, p=runner.iter/runner.max_iters) + stage=YmirStage.TASK, p=runner.iter / runner.max_iters) monitor.write_monitor_logger(percent=percent) super().after_train_iter(runner) @@ -119,10 +119,10 @@ def before_train_epoch(self, runner): def after_train_epoch(self, runner): """Report the training process for ymir""" if self.by_epoch and runner.rank == 0: - monitor_interval = max(1, runner.max_epochs//1000) + monitor_interval = max(1, runner.max_epochs // 1000) if runner.epoch % monitor_interval == 0: percent = get_ymir_process( - stage=YmirStage.TASK, p=runner.epoch/runner.max_epochs) + stage=YmirStage.TASK, p=runner.epoch / runner.max_epochs) monitor.write_monitor_logger(percent=percent) super().after_train_epoch(runner) @@ -132,10 +132,10 @@ def before_train_iter(self, runner): def after_train_iter(self, runner): if not self.by_epoch and runner.rank == 0: - monitor_interval = max(1, runner.max_iters//1000) + monitor_interval = max(1, runner.max_iters // 1000) if runner.iter % monitor_interval == 0: percent = get_ymir_process( - stage=YmirStage.TASK, p=runner.iter/runner.max_iters) + stage=YmirStage.TASK, p=runner.iter / runner.max_iters) monitor.write_monitor_logger(percent=percent) super().after_train_iter(runner) diff --git a/det-mmdetection-tmi/mmdet/utils/util_ymir.py b/det-mmdetection-tmi/mmdet/utils/util_ymir.py index 24ef6e9..6cb9ae2 100644 --- a/det-mmdetection-tmi/mmdet/utils/util_ymir.py +++ b/det-mmdetection-tmi/mmdet/utils/util_ymir.py @@ -5,12 +5,12 @@ import logging import os import os.path as osp -from typing import Any, Iterable, List, Optional +from typing import Any, Iterable, List, Optional, Union import mmcv import yaml from easydict import EasyDict as edict -from mmcv import Config +from mmcv import Config, ConfigDict from nptyping import NDArray, Shape, UInt8 from packaging.version import Version from ymir_exc import result_writer as rw @@ -27,7 +27,8 @@ def modify_mmcv_config(mmcv_cfg: Config, ymir_cfg: edict) -> None: - modify model output channel - modify epochs, checkpoint, tensorboard config """ - def recursive_modify_attribute(mmcv_cfg: Config, attribute_key: str, attribute_value: Any): + + def recursive_modify_attribute(mmcv_cfgdict: Union[Config, ConfigDict], attribute_key: str, attribute_value: Any): """ recursive modify mmcv_cfg: 1. mmcv_cfg.attribute_key to attribute_value @@ -35,14 +36,15 @@ def recursive_modify_attribute(mmcv_cfg: Config, attribute_key: str, attribute_v 3. mmcv_cfg.xxx[i].attribute_key to attribute_value (i=0, 1, 2 ...) 4. mmcv_cfg.xxx[i].xxx.xxx[j].attribute_key to attribute_value """ - for key in mmcv_cfg: + for key in mmcv_cfgdict: if key == attribute_key: - mmcv_cfg[key] = attribute_value - elif isinstance(mmcv_cfg[key], Config): - recursive_modify_attribute(mmcv_cfg[key], attribute_key, attribute_value) - elif isinstance(mmcv_cfg[key], Iterable): - for cfg in mmcv_cfg[key]: - if isinstance(cfg, Config): + mmcv_cfgdict[key] = attribute_value + logging.info(f'modify {mmcv_cfgdict}, {key} = {attribute_value}') + elif isinstance(mmcv_cfgdict[key], (Config, ConfigDict)): + recursive_modify_attribute(mmcv_cfgdict[key], attribute_key, attribute_value) + elif isinstance(mmcv_cfgdict[key], Iterable): + for cfg in mmcv_cfgdict[key]: + if isinstance(cfg, (Config, ConfigDict)): recursive_modify_attribute(cfg, attribute_key, attribute_value) # modify dataset config diff --git a/det-mmdetection-tmi/ymir_mining_aldd.py b/det-mmdetection-tmi/ymir_mining_aldd.py index 4115d09..51b5c13 100644 --- a/det-mmdetection-tmi/ymir_mining_aldd.py +++ b/det-mmdetection-tmi/ymir_mining_aldd.py @@ -1,15 +1,15 @@ import sys +import torch from easydict import EasyDict as edict +from mining_base import ALDDMining from mmcv.parallel import collate, scatter from mmdet.datasets import replace_ImageToTensor from mmdet.datasets.pipelines import Compose -from mmdet.models.detectors import SingleStageDetector, TwoStageDetector +from mmdet.models.detectors import YOLOX from ymir_exc.util import get_merged_config - -from .mining_base import ALDDMining -from .ymir_infer import YmirModel -from .ymir_mining_random import RandomMiner +from ymir_infer import YmirModel +from ymir_mining_random import RandomMiner class ALDDMiner(RandomMiner): @@ -17,12 +17,12 @@ class ALDDMiner(RandomMiner): def __init__(self, cfg: edict): super().__init__(cfg) self.ymir_model = YmirModel(cfg) - mmdet_cfg = self.ymir_model.cfg + mmdet_cfg = self.ymir_model.model.cfg mmdet_cfg.data.test.pipeline = replace_ImageToTensor(mmdet_cfg.data.test.pipeline) - self.test_pipeline = Compose(cfg.data.test.pipeline) + self.test_pipeline = Compose(mmdet_cfg.data.test.pipeline) self.aldd_miner = ALDDMining(cfg, [640, 640]) - def compute_score(self, asset_path: str) -> int: + def compute_score(self, asset_path: str) -> float: dict_data = dict(img_info=dict(filename=asset_path), img_prefix=None) pipeline_data = self.test_pipeline(dict_data) data = collate([pipeline_data], samples_per_gpu=1) @@ -32,19 +32,35 @@ def compute_score(self, asset_path: str) -> int: # scatter to specified GPU data = scatter(data, [self.device])[0] - if isinstance(self.ymir_model.model, SingleStageDetector): - cls_score, bbox_pred = self.ymir_model.model.forward_dummy(data['img']) - mining_score = self.aldd_miner(bbox_pred) + if isinstance(self.ymir_model.model, YOLOX): + # results = (cls_maps, reg_maps, iou_maps) + # cls_maps: [BxCx52x52, BxCx26x26, BxCx13x13] + # reg_maps: [Bx4x52x52, Bx4x26x26, Bx4x13x13] + # iou_maps: [Bx1x51x52, Bx1x26x26, Bx1x13x13] + results = self.ymir_model.model.forward_dummy(data['img'][0]) + feature_maps = [] + for cls, reg, iou in zip(results[0], results[1], results[2]): + maps = [reg, iou, cls] + feature_maps.append(torch.cat(maps, dim=1)) + mining_score = self.aldd_miner.mining(feature_maps) - return mining_score - elif isinstance(self.ymir_model.model, TwoStageDetector): - # (rpn_outs, roi_outs) - # outs = self.ymir_model.model.forward_dummy(img) - raise NotImplementedError('aldd mining is currently not currently supported TwoStageDetector {}'.format( - self.ymir_model.model.__class__.__name__)) + return mining_score.item() else: - raise NotImplementedError('aldd mining is currently not currently supported with {}'.format( - self.ymir_model.model.__class__.__name__)) + raise NotImplementedError( + 'aldd mining is currently not currently supported with {}, only support YOLOX'.format( + self.ymir_model.model.__class__.__name__)) + + # TODO support other SingleStageDetector + # if isinstance(self.ymir_model.model, SingleStageDetector): + # pass + # elif isinstance(self.ymir_model.model, TwoStageDetector): + # # (rpn_outs, roi_outs) + # # outs = self.ymir_model.model.forward_dummy(img) + # raise NotImplementedError('aldd mining is currently not currently supported TwoStageDetector {}'.format( + # self.ymir_model.model.__class__.__name__)) + # else: + # raise NotImplementedError('aldd mining is currently not currently supported with {}'.format( + # self.ymir_model.model.__class__.__name__)) def main(): diff --git a/det-mmdetection-tmi/ymir_mining_random.py b/det-mmdetection-tmi/ymir_mining_random.py index 097c000..0bb5afb 100644 --- a/det-mmdetection-tmi/ymir_mining_random.py +++ b/det-mmdetection-tmi/ymir_mining_random.py @@ -2,6 +2,7 @@ import random import sys +import torch import torch.distributed as dist from easydict import EasyDict as edict from mmcv.runner import init_dist @@ -55,7 +56,8 @@ def mining(self): if WORLD_SIZE > 1 and idx < max_barrier_times: dist.barrier() - consistency = self.compute_score(asset_path=asset_path) + with torch.no_grad(): + consistency = self.compute_score(asset_path=asset_path) mining_result.append((asset_path, consistency)) if WORLD_SIZE > 1: From cfd2d3b61ec37d25e38ef07543246a0650f2f3de Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 19 Oct 2022 15:09:29 +0800 Subject: [PATCH 142/204] change hyper-parameter --- det-mmdetection-tmi/README.md | 1 + det-mmdetection-tmi/infer-template.yaml | 2 +- det-mmdetection-tmi/mining-template.yaml | 2 +- det-mmdetection-tmi/tools/train.py | 1 - det-mmdetection-tmi/training-template.yaml | 4 ++-- det-yolov4-tmi/mining/infer-template.yaml | 2 +- det-yolov4-tmi/mining/mining-template.yaml | 2 +- det-yolov4-tmi/training-template.yaml | 2 +- det-yolov5-tmi/mining-template.yaml | 1 + det-yolov5-tmi/mining/ymir_infer.py | 11 +++-------- det-yolov5-tmi/mining/ymir_mining_aldd.py | 10 ++-------- det-yolov5-tmi/mining/ymir_mining_cald.py | 11 +++-------- det-yolov5-tmi/start.py | 2 +- det-yolov5-tmi/training-template.yaml | 4 ++-- det-yolov5-tmi/utils/ymir_yolov5.py | 12 ++++-------- live-code-executor/img-man/training-template.yaml | 2 +- 16 files changed, 25 insertions(+), 44 deletions(-) diff --git a/det-mmdetection-tmi/README.md b/det-mmdetection-tmi/README.md index 8795930..f1c0ab6 100644 --- a/det-mmdetection-tmi/README.md +++ b/det-mmdetection-tmi/README.md @@ -31,3 +31,4 @@ docker build -t ymir-executor/mmdet:cuda111-tmi --build-arg YMIR=1.1.0 -f docker - 2022/09/06: set `find_unused_parameters = True`, fix DDP bug - 2022/10/18: add `random` and `aldd` mining algorithm. `aldd` algorithm support yolox only. +- 2022/10/19: fix training class_number bug in `recursive_modify_attribute()` diff --git a/det-mmdetection-tmi/infer-template.yaml b/det-mmdetection-tmi/infer-template.yaml index bf61d79..de78f9c 100644 --- a/det-mmdetection-tmi/infer-template.yaml +++ b/det-mmdetection-tmi/infer-template.yaml @@ -1,4 +1,4 @@ -shm_size: '32G' +shm_size: '128G' export_format: 'ark:raw' cfg_options: '' conf_threshold: 0.2 diff --git a/det-mmdetection-tmi/mining-template.yaml b/det-mmdetection-tmi/mining-template.yaml index ed97f01..693463b 100644 --- a/det-mmdetection-tmi/mining-template.yaml +++ b/det-mmdetection-tmi/mining-template.yaml @@ -1,4 +1,4 @@ -shm_size: '32G' +shm_size: '128G' export_format: 'ark:raw' cfg_options: '' mining_algorithm: cald diff --git a/det-mmdetection-tmi/tools/train.py b/det-mmdetection-tmi/tools/train.py index df4f184..78fbe46 100644 --- a/det-mmdetection-tmi/tools/train.py +++ b/det-mmdetection-tmi/tools/train.py @@ -99,7 +99,6 @@ def main(): args = parse_args() ymir_cfg = get_merged_config() cfg = Config.fromfile(args.config) - print(cfg) # modify mmdet config from file modify_mmcv_config(mmcv_cfg=cfg, ymir_cfg=ymir_cfg) diff --git a/det-mmdetection-tmi/training-template.yaml b/det-mmdetection-tmi/training-template.yaml index f04e51a..05b11b2 100644 --- a/det-mmdetection-tmi/training-template.yaml +++ b/det-mmdetection-tmi/training-template.yaml @@ -1,7 +1,7 @@ -shm_size: '32G' +shm_size: '128G' export_format: 'ark:raw' samples_per_gpu: 16 # batch size per gpu -workers_per_gpu: 8 +workers_per_gpu: 4 max_epochs: 100 config_file: 'configs/yolox/yolox_tiny_8x8_300e_coco.py' args_options: '' diff --git a/det-yolov4-tmi/mining/infer-template.yaml b/det-yolov4-tmi/mining/infer-template.yaml index dce6501..11c6502 100644 --- a/det-yolov4-tmi/mining/infer-template.yaml +++ b/det-yolov4-tmi/mining/infer-template.yaml @@ -14,7 +14,7 @@ write_result: True confidence_thresh: 0.1 nms_thresh: 0.45 max_boxes: 50 -# shm_size: '16G' +shm_size: '128G' # gpu_id: '' # model_params_path: [] # class_names: diff --git a/det-yolov4-tmi/mining/mining-template.yaml b/det-yolov4-tmi/mining/mining-template.yaml index aeee009..2ff8270 100644 --- a/det-yolov4-tmi/mining/mining-template.yaml +++ b/det-yolov4-tmi/mining/mining-template.yaml @@ -18,7 +18,7 @@ anchors: '12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 4 confidence_thresh: 0.1 nms_thresh: 0.45 max_boxes: 50 -# shm_size: '16G' +shm_size: '128G' # gpu_id: '0,1,2,3' # model_params_path: [] # task_id: cycle-node-mined-0 diff --git a/det-yolov4-tmi/training-template.yaml b/det-yolov4-tmi/training-template.yaml index 17810f6..bb276dc 100644 --- a/det-yolov4-tmi/training-template.yaml +++ b/det-yolov4-tmi/training-template.yaml @@ -6,7 +6,7 @@ max_batches: 20000 warmup_iterations: 1000 batch: 64 subdivisions: 64 -shm_size: '16G' +shm_size: '128G' export_format: 'ark:raw' # class_names: # - cat diff --git a/det-yolov5-tmi/mining-template.yaml b/det-yolov5-tmi/mining-template.yaml index 9011fe6..485c8bb 100644 --- a/det-yolov5-tmi/mining-template.yaml +++ b/det-yolov5-tmi/mining-template.yaml @@ -15,3 +15,4 @@ iou_thres: 0.45 batch_size_per_gpu: 16 num_workers_per_gpu: 4 pin_memory: False +shm_size: 128G diff --git a/det-yolov5-tmi/mining/ymir_infer.py b/det-yolov5-tmi/mining/ymir_infer.py index 6b94381..ad1e0d2 100644 --- a/det-yolov5-tmi/mining/ymir_infer.py +++ b/det-yolov5-tmi/mining/ymir_infer.py @@ -12,13 +12,12 @@ import torch.distributed as dist import torch.utils.data as td from easydict import EasyDict as edict -from tqdm import tqdm -from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_merged_config - from mining.util import YmirDataset, load_image_file +from tqdm import tqdm from utils.general import scale_coords from utils.ymir_yolov5 import YmirYolov5 +from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, get_merged_config LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) @@ -124,10 +123,6 @@ def main() -> int: anns.append(ann) ymir_infer_result[img_file] = anns rw.write_infer_result(infer_result=ymir_infer_result) - - if LOCAL_RANK != -1: - print(f'rank: {RANK}, start destroy process group') - # dist.destroy_process_group() return 0 diff --git a/det-yolov5-tmi/mining/ymir_mining_aldd.py b/det-yolov5-tmi/mining/ymir_mining_aldd.py index 8151a1b..dc7d5af 100644 --- a/det-yolov5-tmi/mining/ymir_mining_aldd.py +++ b/det-yolov5-tmi/mining/ymir_mining_aldd.py @@ -17,13 +17,12 @@ import torch.nn.functional as F import torch.utils.data as td from easydict import EasyDict as edict +from mining.util import YmirDataset, load_image_file from tqdm import tqdm +from utils.ymir_yolov5 import YmirYolov5 from ymir_exc import result_writer as rw from ymir_exc.util import YmirStage, get_merged_config -from mining.util import YmirDataset, load_image_file -from utils.ymir_yolov5 import YmirYolov5 - LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) @@ -206,11 +205,6 @@ def main() -> int: for img_file, score in result.items(): ymir_mining_result.append((img_file, score)) rw.write_mining_result(mining_result=ymir_mining_result) - - if LOCAL_RANK != -1: - print(f'rank: {RANK}, start destroy process group') - # dist.destroy_process_group() - print(f'rank: {RANK}, finished destroy process group') return 0 diff --git a/det-yolov5-tmi/mining/ymir_mining_cald.py b/det-yolov5-tmi/mining/ymir_mining_cald.py index 343a501..6dfb01d 100644 --- a/det-yolov5-tmi/mining/ymir_mining_cald.py +++ b/det-yolov5-tmi/mining/ymir_mining_cald.py @@ -14,14 +14,13 @@ import torch.distributed as dist import torch.utils.data as td from easydict import EasyDict as edict -from tqdm import tqdm -from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_merged_config - from mining.util import (YmirDataset, collate_fn_with_fake_ann, load_image_file, load_image_file_with_ann, update_consistency) +from tqdm import tqdm from utils.general import scale_coords from utils.ymir_yolov5 import YmirYolov5 +from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, get_merged_config LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) @@ -184,10 +183,6 @@ def main() -> int: for img_file, score in result.items(): ymir_mining_result.append((img_file, score)) rw.write_mining_result(mining_result=ymir_mining_result) - - if LOCAL_RANK != -1: - print(f'rank: {RANK}, start destroy process group') - # dist.destroy_process_group() return 0 diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py index 3c8f483..4ec6cc0 100644 --- a/det-yolov5-tmi/start.py +++ b/det-yolov5-tmi/start.py @@ -53,7 +53,7 @@ def _run_training(cfg: edict) -> None: # 2. training model epochs: int = int(cfg.param.epochs) batch_size_per_gpu: int = int(cfg.param.batch_size_per_gpu) - num_workers_per_gpu: int = int(cfg.param.get('num_workers_per_gpu', 8)) + num_workers_per_gpu: int = int(cfg.param.get('num_workers_per_gpu', 4)) model: str = cfg.param.model img_size: int = int(cfg.param.img_size) save_period: int = int(cfg.param.save_period) diff --git a/det-yolov5-tmi/training-template.yaml b/det-yolov5-tmi/training-template.yaml index daaf476..1cc4752 100644 --- a/det-yolov5-tmi/training-template.yaml +++ b/det-yolov5-tmi/training-template.yaml @@ -7,11 +7,11 @@ # pretrained_model_params: [] # class_names: [] -shm_size: '32G' +shm_size: '128G' export_format: 'ark:raw' model: 'yolov5s' batch_size_per_gpu: 16 -num_workers_per_gpu: 8 +num_workers_per_gpu: 4 epochs: 100 img_size: 640 opset: 11 diff --git a/det-yolov5-tmi/utils/ymir_yolov5.py b/det-yolov5-tmi/utils/ymir_yolov5.py index e58c81d..c463ded 100644 --- a/det-yolov5-tmi/utils/ymir_yolov5.py +++ b/det-yolov5-tmi/utils/ymir_yolov5.py @@ -1,8 +1,6 @@ """ utils function for ymir and yolov5 """ -import glob -import os import os.path as osp import shutil from typing import Any, List @@ -11,16 +9,14 @@ import torch import yaml from easydict import EasyDict as edict -from nptyping import NDArray, Shape, UInt8 -from packaging.version import Version -from ymir_exc import monitor -from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_bool, get_weight_files, get_ymir_process - from models.common import DetectMultiBackend +from nptyping import NDArray, Shape, UInt8 from utils.augmentations import letterbox from utils.general import check_img_size, non_max_suppression, scale_coords from utils.torch_utils import select_device +from ymir_exc import monitor +from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, get_bool, get_weight_files, get_ymir_process BBOX = NDArray[Shape['*,4'], Any] CV_IMAGE = NDArray[Shape['*,*,3'], UInt8] diff --git a/live-code-executor/img-man/training-template.yaml b/live-code-executor/img-man/training-template.yaml index 0ac8798..df87016 100644 --- a/live-code-executor/img-man/training-template.yaml +++ b/live-code-executor/img-man/training-template.yaml @@ -7,4 +7,4 @@ task_id: 'default-training-task' pretrained_model_params: [] class_names: [] export_format: 'ark:raw' -shm_size: '32G' +shm_size: '128G' From 59420c2abbafa15a07946c91b9b09eed42f8ce11 Mon Sep 17 00:00:00 2001 From: wxjf Date: Wed, 19 Oct 2022 15:38:50 +0800 Subject: [PATCH 143/204] add entropy,random for yolov5 --- det-yolov5-tmi/mining/mining_entropy.py | 82 +++++++++++++ det-yolov5-tmi/mining/ymir_mining_entropy.py | 123 +++++++++++++++++++ det-yolov5-tmi/mining/ymir_mining_random.py | 82 +++++++++++++ 3 files changed, 287 insertions(+) create mode 100644 det-yolov5-tmi/mining/mining_entropy.py create mode 100644 det-yolov5-tmi/mining/ymir_mining_entropy.py create mode 100644 det-yolov5-tmi/mining/ymir_mining_random.py diff --git a/det-yolov5-tmi/mining/mining_entropy.py b/det-yolov5-tmi/mining/mining_entropy.py new file mode 100644 index 0000000..ecf9262 --- /dev/null +++ b/det-yolov5-tmi/mining/mining_entropy.py @@ -0,0 +1,82 @@ +""" +Consistency-based Active Learning for Object Detection CVPR 2022 workshop +official code: https://github.com/we1pingyu/CALD/blob/master/cald_train.py +""" +import sys +from typing import Dict, List, Tuple + +import cv2 +import numpy as np +from easydict import EasyDict as edict +from mining.data_augment import cutout, horizontal_flip, intersect, resize, rotate +from nptyping import NDArray +from scipy.stats import entropy +from tqdm import tqdm +from utils.ymir_yolov5 import BBOX, CV_IMAGE, YmirYolov5 +from ymir_exc import dataset_reader as dr +from ymir_exc import env, monitor +from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, get_merged_config, get_ymir_process + +def split_result(result: NDArray) -> Tuple[BBOX, NDArray, NDArray]: + if len(result) > 0: + bboxes = result[:, :4].astype(np.int32) + conf = result[:, 4] + class_id = result[:, 5] + else: + bboxes = np.zeros(shape=(0, 4), dtype=np.int32) + conf = np.zeros(shape=(0, 1), dtype=np.float32) + class_id = np.zeros(shape=(0, 1), dtype=np.int32) + + return bboxes, conf, class_id + +class MiningEntropy(YmirYolov5): + def __init__(self, cfg: edict): + super().__init__(cfg) + + if cfg.ymir.run_mining and cfg.ymir.run_infer: + # multiple task, run mining first, infer later + mining_task_idx = 0 + task_num = 2 + else: + mining_task_idx = 0 + task_num = 1 + + self.task_idx = mining_task_idx + self.task_num = task_num + + def mining(self) -> List: + N = dr.items_count(env.DatasetType.CANDIDATE) + monitor_gap = max(1, N // 1000) + idx = -1 + beta = 1.3 + mining_result = [] + for asset_path, _ in tqdm(dr.item_paths(dataset_type=env.DatasetType.CANDIDATE)): + img = cv2.imread(asset_path) + # xyxy,conf,cls + result = self.predict(img,nms=False) + bboxes, conf, _ = split_result(result) + if len(result) == 0: + # no result for the image without augmentation + mining_result.append((asset_path, -10)) + continue + mining_result.append((asset_path,-np.sum(conf*np.log2(conf)))) + idx += 1 + if idx % monitor_gap == 0: + percent = get_ymir_process(stage=YmirStage.TASK, p=idx / N, + task_idx=self.task_idx, task_num=self.task_num) + monitor.write_monitor_logger(percent=percent) + + return mining_result + +def main(): + cfg = get_merged_config() + miner = MiningEntropy(cfg) + mining_result = miner.mining() + rw.write_mining_result(mining_result=mining_result) + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) \ No newline at end of file diff --git a/det-yolov5-tmi/mining/ymir_mining_entropy.py b/det-yolov5-tmi/mining/ymir_mining_entropy.py new file mode 100644 index 0000000..9ccc01e --- /dev/null +++ b/det-yolov5-tmi/mining/ymir_mining_entropy.py @@ -0,0 +1,123 @@ +"""use fake DDP to infer +1. split data with `images_rank = images[RANK::WORLD_SIZE]` +2. infer on the origin dataset +3. infer on the augmentation dataset +4. save splited mining result with `torch.save(results, f'/out/mining_results_{RANK}.pt')` +5. merge mining result +""" +import os +import sys +from functools import partial + +import numpy as np +import torch +import torch.distributed as dist +import torch.utils.data as td +from easydict import EasyDict as edict +from tqdm import tqdm +from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, get_merged_config + +from mining.util import (YmirDataset, collate_fn_with_fake_ann, load_image_file, load_image_file_with_ann, + update_consistency) +from utils.general import scale_coords +from utils.ymir_yolov5 import YmirYolov5 + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + + +def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): + # eg: gpu_id = 1,3,5,7 for LOCAL_RANK = 2, will use gpu 5. + gpu = LOCAL_RANK if LOCAL_RANK >= 0 else 0 + device = torch.device('cuda', gpu) + ymir_yolov5.to(device) + + load_fn = partial(load_image_file, img_size=ymir_yolov5.img_size, stride=ymir_yolov5.stride) + batch_size_per_gpu: int = ymir_yolov5.batch_size_per_gpu + gpu_count: int = ymir_yolov5.gpu_count + cpu_count: int = os.cpu_count() or 1 + num_workers_per_gpu = min([ + cpu_count // max(gpu_count, 1), batch_size_per_gpu if batch_size_per_gpu > 1 else 0, + ymir_yolov5.num_workers_per_gpu + ]) + + with open(ymir_cfg.ymir.input.candidate_index_file, 'r') as f: + images = [line.strip() for line in f.readlines()] + + max_barrier_times = (len(images) // max(1, WORLD_SIZE)) // batch_size_per_gpu + # origin dataset + images_rank = images[RANK::WORLD_SIZE] + origin_dataset = YmirDataset(images_rank, load_fn=load_fn) + origin_dataset_loader = td.DataLoader(origin_dataset, + batch_size=batch_size_per_gpu, + shuffle=False, + sampler=None, + num_workers=num_workers_per_gpu, + pin_memory=ymir_yolov5.pin_memory, + drop_last=False) + + results = [] + mining_results = dict() + beta = 1.3 + dataset_size = len(images_rank) + pbar = tqdm(origin_dataset_loader) if RANK == 0 else origin_dataset_loader + for idx, batch in enumerate(pbar): + # batch-level sync, avoid 30min time-out error + if LOCAL_RANK != -1 and idx < max_barrier_times: + dist.barrier() + + with torch.no_grad(): + pred = ymir_yolov5.forward(batch['image'].float().to(device), nms=False) + + if RANK in [-1, 0]: + ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) + preprocess_image_shape = batch['image'].shape[2:] + for inner_idx, det in enumerate(pred): # per image + result_per_image = [] + image_file = batch['image_file'][inner_idx] + if len(det): + conf = det[:, 4].data.cpu().numpy() + mining_results[image_file] = -np.sum(conf*np.log2(conf)) + else: + mining_results[image_file] = -10 + continue + + torch.save(mining_results, f'/out/mining_results_{RANK}.pt') + + +def main() -> int: + ymir_cfg = get_merged_config() + ymir_yolov5 = YmirYolov5(ymir_cfg, task='mining') + + if LOCAL_RANK != -1: + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + torch.cuda.set_device(LOCAL_RANK) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + + run(ymir_cfg, ymir_yolov5) + + # wait all process to save the mining result + if LOCAL_RANK != -1: + dist.barrier() + + if RANK in [0, -1]: + results = [] + for rank in range(WORLD_SIZE): + results.append(torch.load(f'/out/mining_results_{rank}.pt')) + + ymir_mining_result = [] + for result in results: + for img_file, score in result.items(): + ymir_mining_result.append((img_file, score)) + rw.write_mining_result(mining_result=ymir_mining_result) + + if LOCAL_RANK != -1: + print(f'rank: {RANK}, start destroy process group') + # dist.destroy_process_group() + return 0 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/det-yolov5-tmi/mining/ymir_mining_random.py b/det-yolov5-tmi/mining/ymir_mining_random.py new file mode 100644 index 0000000..418e7f2 --- /dev/null +++ b/det-yolov5-tmi/mining/ymir_mining_random.py @@ -0,0 +1,82 @@ +"""use fake DDP to infer +1. split data with `images_rank = images[RANK::WORLD_SIZE]` +2. infer on the origin dataset +3. infer on the augmentation dataset +4. save splited mining result with `torch.save(results, f'/out/mining_results_{RANK}.pt')` +5. merge mining result +""" +import os +import random +import sys +from functools import partial + +import numpy as np +import torch +import torch.distributed as dist +import torch.utils.data as td +from easydict import EasyDict as edict +from tqdm import tqdm +from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, get_merged_config + +from mining.util import (YmirDataset, collate_fn_with_fake_ann, load_image_file, load_image_file_with_ann, + update_consistency) +from utils.general import scale_coords +from utils.ymir_yolov5 import YmirYolov5 + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + + +def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): + # eg: gpu_id = 1,3,5,7 for LOCAL_RANK = 2, will use gpu 5. + gpu = LOCAL_RANK if LOCAL_RANK >= 0 else 0 + device = torch.device('cuda', gpu) + ymir_yolov5.to(device) + + with open(ymir_cfg.ymir.input.candidate_index_file, 'r') as f: + images = [line.strip() for line in f.readlines()] + + images_rank = images[RANK::WORLD_SIZE] + mining_results=dict() + for image in images_rank: + mining_results[image] = random.random() + + torch.save(mining_results, f'/out/mining_results_{RANK}.pt') + + +def main() -> int: + ymir_cfg = get_merged_config() + ymir_yolov5 = YmirYolov5(ymir_cfg, task='mining') + + if LOCAL_RANK != -1: + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + torch.cuda.set_device(LOCAL_RANK) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + + run(ymir_cfg, ymir_yolov5) + + # wait all process to save the mining result + if LOCAL_RANK != -1: + dist.barrier() + + if RANK in [0, -1]: + results = [] + for rank in range(WORLD_SIZE): + results.append(torch.load(f'/out/mining_results_{rank}.pt')) + + ymir_mining_result = [] + for result in results: + for img_file, score in result.items(): + ymir_mining_result.append((img_file, score)) + rw.write_mining_result(mining_result=ymir_mining_result) + + if LOCAL_RANK != -1: + print(f'rank: {RANK}, start destroy process group') + # dist.destroy_process_group() + return 0 + + +if __name__ == '__main__': + sys.exit(main()) From d760d8dd3bf1b89ead99a906d95ed513de8a186e Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 19 Oct 2022 17:19:15 +0800 Subject: [PATCH 144/204] update readme --- README.MD | 4 +++- README_zh-CN.MD | 4 +++- det-demo-tmi/README.md | 6 ++---- docs/mining-images-overview.md | 13 +++++++++++++ .../official-docker-image.md | 0 5 files changed, 21 insertions(+), 6 deletions(-) create mode 100644 docs/mining-images-overview.md rename official-docker-image.md => docs/official-docker-image.md (100%) diff --git a/README.MD b/README.MD index 823419c..50ace8d 100644 --- a/README.MD +++ b/README.MD @@ -4,7 +4,9 @@ - [wiki](https://github.com/modelai/ymir-executor-fork/wiki) -- [ymir executor](./official-docker-image.md) +- [ymir executor](./docs/official-docker-image.md) + +- [ymir mining algorithm](./docs/mining-images-overview.md) ## overview diff --git a/README_zh-CN.MD b/README_zh-CN.MD index e1d9960..3579823 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -4,7 +4,9 @@ - [说明文档](https://github.com/modelai/ymir-executor-fork/wiki) -- [ymir镜像](./official-docker-image.md) +- [ymir镜像](./docs/official-docker-image.md) + +- [ymir 挖掘算法](./docs/mining-images-overview.md) ## 比较 diff --git a/det-demo-tmi/README.md b/det-demo-tmi/README.md index 715eb47..abccece 100644 --- a/det-demo-tmi/README.md +++ b/det-demo-tmi/README.md @@ -137,7 +137,7 @@ ymir 通过 mir train / mir mining / mir infer 命令启动镜像,遵循以下 2. 镜像框架相关的所有内容都在 `ymir_exc` 包中,包括以下部分: - 安装方式 `pip install "git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.3.0"`, 注意通过 `pip install ymir_exc` 的方式安装的版本不具有 `ymir_exc.util` 包。 + 安装方式 `pip install "git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.3.0"`, 注意通过 ~~`pip install ymir_exc`~~ 的方式安装的版本不具有 `ymir_exc.util` 包。前者在后者的代码基础上进行了扩展,提供了更多的功能(如 `ymir_exc.util`)。 * `env`:环境,提供任务类型,任务 id 等信息 @@ -198,9 +198,7 @@ for idx, line in enumerate(lines): * 例如,如果需要保存 stage_name 为 'epoch-5000' 的模型,则需要把这些模型文件保存到 `os.path.join(cfg.ymir.output.model_dir, 'epoch-5000')` 目录下 - * 之后,可以使用 `result_writer.write_model_stage()` 方法保存训练结果的摘要,这些内容包括:不带目录的模型名称列表,mAP. - - * 也可以使用 `util.write_ymir_training_result()` 方法保存训练结果,它的兼容性与容错性更好。 + * 推荐使用 `util.write_ymir_training_result()` 方法保存训练结果 (不带目录的模型名称列表,mAP等) ,它对 `result_writer.write_model_stage()` 进行了封装,兼容性与容错性更好。 * 需要保存的模型实际记录在`cfg.ymir.output.training_result_file`中,ymir将依据此文件进行文件打包,供用户下载、迭代训练及推理挖掘。 diff --git a/docs/mining-images-overview.md b/docs/mining-images-overview.md new file mode 100644 index 0000000..e9557c1 --- /dev/null +++ b/docs/mining-images-overview.md @@ -0,0 +1,13 @@ +# ymir mining images overview + +| docker images | random | cald | aldd | entropy | +| - | - | - | - | - | +| yolov5 | ✔️ | ✔️ | ✔️ | ✔️ | +| mmdetection | ✔️ | ✔️ | ✔️ | ❌ | +| yolov4 | ❌ | ✔️ | ✔️ | ❌ | +| yolov7 | ❌ | ❌ | ✔️ | ❌ | +| nanodet | ❌ | ❌ | ✔️ | ❌ | +| vidt |❌ | ✔️ | ❌ | ❌ | +| detectron2 | ❌ | ✔️ | ❌ | ❌ | + +view [ALBench: Active Learning Benchmark](https://github.com/modelai/ALBench) for detail diff --git a/official-docker-image.md b/docs/official-docker-image.md similarity index 100% rename from official-docker-image.md rename to docs/official-docker-image.md From 61910668e3736f9c38ac72d33b2246d8383f7d7b Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 20 Oct 2022 09:35:04 +0800 Subject: [PATCH 145/204] add mining overview --- docs/mining-images-overview.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/mining-images-overview.md b/docs/mining-images-overview.md index e9557c1..cec5f86 100644 --- a/docs/mining-images-overview.md +++ b/docs/mining-images-overview.md @@ -11,3 +11,11 @@ | detectron2 | ❌ | ✔️ | ❌ | ❌ | view [ALBench: Active Learning Benchmark](https://github.com/modelai/ALBench) for detail + +## reference + +- entropy: `Multi-class active learning for image classification. CVPR 2009` + +- cald: `Consistency-based Active Learning for Object Detection. CVPR 2022 workshop` + +- aldd: `Active Learning for Deep Detection Neural Networks. ICCV 2019` From 5e605ec8d02eaac4533140dc4a8ca4c35832ed9f Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 20 Oct 2022 11:13:38 +0800 Subject: [PATCH 146/204] merge multi-gpu and single gpu mining mode --- det-yolov5-tmi/mining/mining_cald.py | 162 ------------------- det-yolov5-tmi/mining/mining_entropy.py | 82 ---------- det-yolov5-tmi/mining/util.py | 23 ++- det-yolov5-tmi/mining/ymir_mining_aldd.py | 4 +- det-yolov5-tmi/mining/ymir_mining_cald.py | 2 +- det-yolov5-tmi/mining/ymir_mining_entropy.py | 21 +-- det-yolov5-tmi/mining/ymir_mining_random.py | 27 ++-- det-yolov5-tmi/start.py | 6 +- 8 files changed, 40 insertions(+), 287 deletions(-) delete mode 100644 det-yolov5-tmi/mining/mining_cald.py delete mode 100644 det-yolov5-tmi/mining/mining_entropy.py diff --git a/det-yolov5-tmi/mining/mining_cald.py b/det-yolov5-tmi/mining/mining_cald.py deleted file mode 100644 index ab458ff..0000000 --- a/det-yolov5-tmi/mining/mining_cald.py +++ /dev/null @@ -1,162 +0,0 @@ -""" -Consistency-based Active Learning for Object Detection CVPR 2022 workshop -official code: https://github.com/we1pingyu/CALD/blob/master/cald_train.py -""" -import sys -from typing import Dict, List, Tuple - -import cv2 -import numpy as np -from easydict import EasyDict as edict -from nptyping import NDArray -from scipy.stats import entropy -from tqdm import tqdm -from ymir_exc import dataset_reader as dr -from ymir_exc import env, monitor -from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_merged_config, get_ymir_process - -from mining.data_augment import cutout, horizontal_flip, intersect, resize, rotate -from utils.ymir_yolov5 import BBOX, CV_IMAGE, YmirYolov5 - - -def split_result(result: NDArray) -> Tuple[BBOX, NDArray, NDArray]: - if len(result) > 0: - bboxes = result[:, :4].astype(np.int32) - conf = result[:, 4] - class_id = result[:, 5] - else: - bboxes = np.zeros(shape=(0, 4), dtype=np.int32) - conf = np.zeros(shape=(0, 1), dtype=np.float32) - class_id = np.zeros(shape=(0, 1), dtype=np.int32) - - return bboxes, conf, class_id - - -class MiningCald(YmirYolov5): - - def __init__(self, cfg: edict): - super().__init__(cfg) - - if cfg.ymir.run_mining and cfg.ymir.run_infer: - # multiple task, run mining first, infer later - mining_task_idx = 0 - task_num = 2 - else: - mining_task_idx = 0 - task_num = 1 - - self.task_idx = mining_task_idx - self.task_num = task_num - - def mining(self) -> List: - N = dr.items_count(env.DatasetType.CANDIDATE) - monitor_gap = max(1, N // 1000) - idx = -1 - beta = 1.3 - mining_result = [] - for asset_path, _ in tqdm(dr.item_paths(dataset_type=env.DatasetType.CANDIDATE)): - img = cv2.imread(asset_path) - # xyxy,conf,cls - result = self.predict(img) - bboxes, conf, _ = split_result(result) - if len(result) == 0: - # no result for the image without augmentation - mining_result.append((asset_path, -beta)) - continue - - consistency = 0.0 - aug_bboxes_dict, aug_results_dict = self.aug_predict(img, bboxes) - for key in aug_results_dict: - # no result for the image with augmentation f'{key}' - if len(aug_results_dict[key]) == 0: - consistency += beta - continue - - bboxes_key, conf_key, _ = split_result(aug_results_dict[key]) - cls_scores_aug = 1 - conf_key - cls_scores = 1 - conf - - consistency_per_aug = 2.0 - ious = get_ious(bboxes_key, aug_bboxes_dict[key]) - aug_idxs = np.argmax(ious, axis=0) - for origin_idx, aug_idx in enumerate(aug_idxs): - max_iou = ious[aug_idx, origin_idx] - if max_iou == 0: - consistency_per_aug = min(consistency_per_aug, beta) - p = cls_scores_aug[aug_idx] - q = cls_scores[origin_idx] - m = (p + q) / 2. - js = 0.5 * entropy([p, 1 - p], [m, 1 - m]) + 0.5 * entropy([q, 1 - q], [m, 1 - m]) - if js < 0: - js = 0 - consistency_box = max_iou - consistency_cls = 0.5 * (conf[origin_idx] + conf_key[aug_idx]) * (1 - js) - consistency_per_inst = abs(consistency_box + consistency_cls - beta) - consistency_per_aug = min(consistency_per_aug, consistency_per_inst.item()) - - consistency += consistency_per_aug - - consistency /= len(aug_results_dict) - - mining_result.append((asset_path, consistency)) - idx += 1 - - if idx % monitor_gap == 0: - percent = get_ymir_process(stage=YmirStage.TASK, - p=idx / N, - task_idx=self.task_idx, - task_num=self.task_num) - monitor.write_monitor_logger(percent=percent) - - return mining_result - - def aug_predict(self, image: CV_IMAGE, bboxes: BBOX) -> Tuple[Dict[str, BBOX], Dict[str, NDArray]]: - """ - for different augmentation methods: flip, cutout, rotate and resize - augment the image and bbox and use model to predict them. - - return the predict result and augment bbox. - """ - aug_dict = dict(flip=horizontal_flip, cutout=cutout, rotate=rotate, resize=resize) - - aug_bboxes = dict() - aug_results = dict() - for key in aug_dict: - aug_img, aug_bbox = aug_dict[key](image, bboxes) - - aug_result = self.predict(aug_img) - aug_bboxes[key] = aug_bbox - aug_results[key] = aug_result - - return aug_bboxes, aug_results - - -def get_ious(boxes1: BBOX, boxes2: BBOX) -> NDArray: - """ - args: - boxes1: np.array, (N, 4), xyxy - boxes2: np.array, (M, 4), xyxy - return: - iou: np.array, (N, M) - """ - area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1]) - area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1]) - iner_area = intersect(boxes1, boxes2) - area1 = area1.reshape(-1, 1).repeat(area2.shape[0], axis=1) - area2 = area2.reshape(1, -1).repeat(area1.shape[0], axis=0) - iou = iner_area / (area1 + area2 - iner_area + 1e-14) - return iou - - -def main(): - cfg = get_merged_config() - miner = MiningCald(cfg) - mining_result = miner.mining() - rw.write_mining_result(mining_result=mining_result) - - return 0 - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/det-yolov5-tmi/mining/mining_entropy.py b/det-yolov5-tmi/mining/mining_entropy.py deleted file mode 100644 index ecf9262..0000000 --- a/det-yolov5-tmi/mining/mining_entropy.py +++ /dev/null @@ -1,82 +0,0 @@ -""" -Consistency-based Active Learning for Object Detection CVPR 2022 workshop -official code: https://github.com/we1pingyu/CALD/blob/master/cald_train.py -""" -import sys -from typing import Dict, List, Tuple - -import cv2 -import numpy as np -from easydict import EasyDict as edict -from mining.data_augment import cutout, horizontal_flip, intersect, resize, rotate -from nptyping import NDArray -from scipy.stats import entropy -from tqdm import tqdm -from utils.ymir_yolov5 import BBOX, CV_IMAGE, YmirYolov5 -from ymir_exc import dataset_reader as dr -from ymir_exc import env, monitor -from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_merged_config, get_ymir_process - -def split_result(result: NDArray) -> Tuple[BBOX, NDArray, NDArray]: - if len(result) > 0: - bboxes = result[:, :4].astype(np.int32) - conf = result[:, 4] - class_id = result[:, 5] - else: - bboxes = np.zeros(shape=(0, 4), dtype=np.int32) - conf = np.zeros(shape=(0, 1), dtype=np.float32) - class_id = np.zeros(shape=(0, 1), dtype=np.int32) - - return bboxes, conf, class_id - -class MiningEntropy(YmirYolov5): - def __init__(self, cfg: edict): - super().__init__(cfg) - - if cfg.ymir.run_mining and cfg.ymir.run_infer: - # multiple task, run mining first, infer later - mining_task_idx = 0 - task_num = 2 - else: - mining_task_idx = 0 - task_num = 1 - - self.task_idx = mining_task_idx - self.task_num = task_num - - def mining(self) -> List: - N = dr.items_count(env.DatasetType.CANDIDATE) - monitor_gap = max(1, N // 1000) - idx = -1 - beta = 1.3 - mining_result = [] - for asset_path, _ in tqdm(dr.item_paths(dataset_type=env.DatasetType.CANDIDATE)): - img = cv2.imread(asset_path) - # xyxy,conf,cls - result = self.predict(img,nms=False) - bboxes, conf, _ = split_result(result) - if len(result) == 0: - # no result for the image without augmentation - mining_result.append((asset_path, -10)) - continue - mining_result.append((asset_path,-np.sum(conf*np.log2(conf)))) - idx += 1 - if idx % monitor_gap == 0: - percent = get_ymir_process(stage=YmirStage.TASK, p=idx / N, - task_idx=self.task_idx, task_num=self.task_num) - monitor.write_monitor_logger(percent=percent) - - return mining_result - -def main(): - cfg = get_merged_config() - miner = MiningEntropy(cfg) - mining_result = miner.mining() - rw.write_mining_result(mining_result=mining_result) - - return 0 - - -if __name__ == "__main__": - sys.exit(main()) \ No newline at end of file diff --git a/det-yolov5-tmi/mining/util.py b/det-yolov5-tmi/mining/util.py index 54ef5dd..c69343c 100644 --- a/det-yolov5-tmi/mining/util.py +++ b/det-yolov5-tmi/mining/util.py @@ -19,18 +19,35 @@ import cv2 import numpy as np import torch.utils.data as td +from mining.data_augment import cutout, horizontal_flip, intersect, resize, rotate +from nptyping import NDArray from scipy.stats import entropy from torch.utils.data._utils.collate import default_collate - -from mining.data_augment import cutout, horizontal_flip, resize, rotate -from mining.mining_cald import get_ious from utils.augmentations import letterbox +from utils.ymir_yolov5 import BBOX LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) +def get_ious(boxes1: BBOX, boxes2: BBOX) -> NDArray: + """ + args: + boxes1: np.array, (N, 4), xyxy + boxes2: np.array, (M, 4), xyxy + return: + iou: np.array, (N, M) + """ + area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1]) + area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1]) + iner_area = intersect(boxes1, boxes2) + area1 = area1.reshape(-1, 1).repeat(area2.shape[0], axis=1) + area2 = area2.reshape(1, -1).repeat(area1.shape[0], axis=0) + iou = iner_area / (area1 + area2 - iner_area + 1e-14) + return iou + + def preprocess(img, img_size, stride): img1 = letterbox(img, img_size, stride=stride, auto=False)[0] diff --git a/det-yolov5-tmi/mining/ymir_mining_aldd.py b/det-yolov5-tmi/mining/ymir_mining_aldd.py index dc7d5af..8d6a27c 100644 --- a/det-yolov5-tmi/mining/ymir_mining_aldd.py +++ b/det-yolov5-tmi/mining/ymir_mining_aldd.py @@ -174,7 +174,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): if RANK in [-1, 0]: ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) - torch.save(mining_results, f'/out/mining_results_{RANK}.pt') + torch.save(mining_results, f'/out/mining_results_{max(0,RANK)}.pt') def main() -> int: @@ -191,9 +191,7 @@ def main() -> int: # wait all process to save the mining result if LOCAL_RANK != -1: - print(f'rank: {RANK}, sync start before merge') dist.barrier() - print(f'rank: {RANK}, sync finished before merge') if RANK in [0, -1]: results = [] diff --git a/det-yolov5-tmi/mining/ymir_mining_cald.py b/det-yolov5-tmi/mining/ymir_mining_cald.py index 6dfb01d..b4c6147 100644 --- a/det-yolov5-tmi/mining/ymir_mining_cald.py +++ b/det-yolov5-tmi/mining/ymir_mining_cald.py @@ -155,7 +155,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): image_file = batch['image_file'][inner_idx] mining_results[image_file] = batch_consistency[inner_idx] - torch.save(mining_results, f'/out/mining_results_{RANK}.pt') + torch.save(mining_results, f'/out/mining_results_{max(0,RANK)}.pt') def main() -> int: diff --git a/det-yolov5-tmi/mining/ymir_mining_entropy.py b/det-yolov5-tmi/mining/ymir_mining_entropy.py index 9ccc01e..df5a1ff 100644 --- a/det-yolov5-tmi/mining/ymir_mining_entropy.py +++ b/det-yolov5-tmi/mining/ymir_mining_entropy.py @@ -14,15 +14,12 @@ import torch.distributed as dist import torch.utils.data as td from easydict import EasyDict as edict +from mining.util import YmirDataset, load_image_file from tqdm import tqdm +from utils.ymir_yolov5 import YmirYolov5 from ymir_exc import result_writer as rw from ymir_exc.util import YmirStage, get_merged_config -from mining.util import (YmirDataset, collate_fn_with_fake_ann, load_image_file, load_image_file_with_ann, - update_consistency) -from utils.general import scale_coords -from utils.ymir_yolov5 import YmirYolov5 - LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) @@ -58,9 +55,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): pin_memory=ymir_yolov5.pin_memory, drop_last=False) - results = [] mining_results = dict() - beta = 1.3 dataset_size = len(images_rank) pbar = tqdm(origin_dataset_loader) if RANK == 0 else origin_dataset_loader for idx, batch in enumerate(pbar): @@ -73,18 +68,16 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): if RANK in [-1, 0]: ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) - preprocess_image_shape = batch['image'].shape[2:] for inner_idx, det in enumerate(pred): # per image - result_per_image = [] image_file = batch['image_file'][inner_idx] if len(det): conf = det[:, 4].data.cpu().numpy() - mining_results[image_file] = -np.sum(conf*np.log2(conf)) + mining_results[image_file] = -np.sum(conf * np.log2(conf)) else: mining_results[image_file] = -10 continue - torch.save(mining_results, f'/out/mining_results_{RANK}.pt') + torch.save(mining_results, f'/out/mining_results_{max(0,RANK)}.pt') def main() -> int: @@ -99,7 +92,7 @@ def main() -> int: run(ymir_cfg, ymir_yolov5) # wait all process to save the mining result - if LOCAL_RANK != -1: + if WORLD_SIZE > 1: dist.barrier() if RANK in [0, -1]: @@ -112,10 +105,6 @@ def main() -> int: for img_file, score in result.items(): ymir_mining_result.append((img_file, score)) rw.write_mining_result(mining_result=ymir_mining_result) - - if LOCAL_RANK != -1: - print(f'rank: {RANK}, start destroy process group') - # dist.destroy_process_group() return 0 diff --git a/det-yolov5-tmi/mining/ymir_mining_random.py b/det-yolov5-tmi/mining/ymir_mining_random.py index 418e7f2..30fb099 100644 --- a/det-yolov5-tmi/mining/ymir_mining_random.py +++ b/det-yolov5-tmi/mining/ymir_mining_random.py @@ -8,22 +8,15 @@ import os import random import sys -from functools import partial -import numpy as np import torch import torch.distributed as dist -import torch.utils.data as td from easydict import EasyDict as edict from tqdm import tqdm +from utils.ymir_yolov5 import YmirYolov5 from ymir_exc import result_writer as rw from ymir_exc.util import YmirStage, get_merged_config -from mining.util import (YmirDataset, collate_fn_with_fake_ann, load_image_file, load_image_file_with_ann, - update_consistency) -from utils.general import scale_coords -from utils.ymir_yolov5 import YmirYolov5 - LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) @@ -34,16 +27,20 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): gpu = LOCAL_RANK if LOCAL_RANK >= 0 else 0 device = torch.device('cuda', gpu) ymir_yolov5.to(device) - + with open(ymir_cfg.ymir.input.candidate_index_file, 'r') as f: images = [line.strip() for line in f.readlines()] images_rank = images[RANK::WORLD_SIZE] - mining_results=dict() - for image in images_rank: + mining_results = dict() + dataset_size = len(images_rank) + pbar = tqdm(images_rank) if RANK == 0 else images_rank + for idx, image in enumerate(pbar): + if RANK in [-1, 0]: + ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx / dataset_size) mining_results[image] = random.random() - torch.save(mining_results, f'/out/mining_results_{RANK}.pt') + torch.save(mining_results, f'/out/mining_results_{max(0,RANK)}.pt') def main() -> int: @@ -58,7 +55,7 @@ def main() -> int: run(ymir_cfg, ymir_yolov5) # wait all process to save the mining result - if LOCAL_RANK != -1: + if WORLD_SIZE > 1: dist.barrier() if RANK in [0, -1]: @@ -71,10 +68,6 @@ def main() -> int: for img_file, score in result.items(): ymir_mining_result.append((img_file, score)) rw.write_mining_result(mining_result=ymir_mining_result) - - if LOCAL_RANK != -1: - print(f'rank: {RANK}, start destroy process group') - # dist.destroy_process_group() return 0 diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py index 4ec6cc0..6c82844 100644 --- a/det-yolov5-tmi/start.py +++ b/det-yolov5-tmi/start.py @@ -119,12 +119,12 @@ def _run_mining(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: gpu_count: int = len(gpu_id.split(',')) if gpu_id else 0 mining_algorithm = cfg.param.get('mining_algorithm', 'aldd') - support_mining_algorithms = ['aldd', 'cald'] + support_mining_algorithms = ['aldd', 'cald', 'random', 'entropy'] if mining_algorithm not in support_mining_algorithms: raise Exception(f'unknown mining algorithm {mining_algorithm}, not in {support_mining_algorithms}') - if gpu_count <= 1 and mining_algorithm in ['cald']: - command = 'python3 mining/mining_cald.py' + if gpu_count <= 1: + command = f'python3 mining/ymir_mining_{mining_algorithm}.py' else: port = find_free_port() command = f'python3 -m torch.distributed.launch --nproc_per_node {gpu_count} --master_port {port} mining/ymir_mining_{mining_algorithm}.py' # noqa From 35a000eec3133669445ba7ce6911d86fc61dc7fe Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 20 Oct 2022 11:22:27 +0800 Subject: [PATCH 147/204] mv files --- det-yolov5-tmi/ymir/README.md | 43 ++++ det-yolov5-tmi/ymir/docker/cuda102.dockerfile | 40 ++++ det-yolov5-tmi/ymir/docker/cuda111.dockerfile | 43 ++++ .../ymir/img-man/infer-template.yaml | 15 ++ .../ymir/img-man/mining-template.yaml | 18 ++ .../ymir/img-man/training-template.yaml | 22 ++ det-yolov5-tmi/ymir/mining/data_augment.py | 204 +++++++++++++++++ det-yolov5-tmi/ymir/mining/util.py | 149 +++++++++++++ det-yolov5-tmi/ymir/mining/ymir_infer.py | 130 +++++++++++ .../ymir/mining/ymir_mining_aldd.py | 210 ++++++++++++++++++ .../ymir/mining/ymir_mining_cald.py | 190 ++++++++++++++++ .../ymir/mining/ymir_mining_entropy.py | 112 ++++++++++ .../ymir/mining/ymir_mining_random.py | 75 +++++++ det-yolov5-tmi/ymir/start.py | 191 ++++++++++++++++ det-yolov5-tmi/ymir/ymir_yolov5.py | 187 ++++++++++++++++ 15 files changed, 1629 insertions(+) create mode 100644 det-yolov5-tmi/ymir/README.md create mode 100644 det-yolov5-tmi/ymir/docker/cuda102.dockerfile create mode 100644 det-yolov5-tmi/ymir/docker/cuda111.dockerfile create mode 100644 det-yolov5-tmi/ymir/img-man/infer-template.yaml create mode 100644 det-yolov5-tmi/ymir/img-man/mining-template.yaml create mode 100644 det-yolov5-tmi/ymir/img-man/training-template.yaml create mode 100644 det-yolov5-tmi/ymir/mining/data_augment.py create mode 100644 det-yolov5-tmi/ymir/mining/util.py create mode 100644 det-yolov5-tmi/ymir/mining/ymir_infer.py create mode 100644 det-yolov5-tmi/ymir/mining/ymir_mining_aldd.py create mode 100644 det-yolov5-tmi/ymir/mining/ymir_mining_cald.py create mode 100644 det-yolov5-tmi/ymir/mining/ymir_mining_entropy.py create mode 100644 det-yolov5-tmi/ymir/mining/ymir_mining_random.py create mode 100644 det-yolov5-tmi/ymir/start.py create mode 100644 det-yolov5-tmi/ymir/ymir_yolov5.py diff --git a/det-yolov5-tmi/ymir/README.md b/det-yolov5-tmi/ymir/README.md new file mode 100644 index 0000000..6bf9151 --- /dev/null +++ b/det-yolov5-tmi/ymir/README.md @@ -0,0 +1,43 @@ +# yolov5-ymir readme +- [yolov5 readme](./README_yolov5.md) + +``` +docker build -t ymir/ymir-executor:ymir1.1.0-cuda102-yolov5-tmi --build-arg SERVER_MODE=dev --build-arg YMIR=1.1.0 -f cuda102.dockerfile . + +docker build -t ymir/ymir-executor:ymir1.1.0-cuda111-yolov5-tmi --build-arg SERVER_MODE=dev --build-arg YMIR=1.1.0 -f cuda111.dockerfile . +``` + +## main change log + +- add `start.py` and `utils/ymir_yolov5.py` for train/infer/mining + +- add `utils/ymir_yolov5.py` for useful functions + + - `get_merged_config()` add ymir path config `cfg.yaml` and hyper-parameter `cfg.param` + + - `convert_ymir_to_yolov5()` generate yolov5 dataset config file `data.yaml` + + - `write_ymir_training_result()` save model weight, map and other files. + + - `get_weight_file()` get pretrained weight or init weight file from ymir system + +- modify `utils/datasets.py` for ymir dataset format + +- modify `train.py` for training process monitor + +- add `mining/data_augment.py` and `mining/mining_cald.py` for mining + +- add `training/infer/mining-template.yaml` for `/img-man/training/infer/mining-template.yaml` + +- add `cuda102/111.dockerfile`, remove origin `Dockerfile` + +- modify `requirements.txt` + +- other modify support onnx export, not important. + +## new features + +- 2022/09/08: add aldd active learning algorithm for mining task. [Active Learning for Deep Detection Neural Networks (ICCV 2019)](https://gitlab.com/haghdam/deep_active_learning) +- 2022/09/14: support change hyper-parameter `num_workers_per_gpu` +- 2022/09/16: support change activation, view [rknn](https://github.com/airockchip/rknn_model_zoo/tree/main/models/vision/object_detection/yolov5-pytorch) +- 2022/10/09: fix dist.destroy_process_group() hang diff --git a/det-yolov5-tmi/ymir/docker/cuda102.dockerfile b/det-yolov5-tmi/ymir/docker/cuda102.dockerfile new file mode 100644 index 0000000..0014b60 --- /dev/null +++ b/det-yolov5-tmi/ymir/docker/cuda102.dockerfile @@ -0,0 +1,40 @@ +ARG PYTORCH="1.8.1" +ARG CUDA="10.2" +ARG CUDNN="7" + +FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-runtime +# support YMIR=1.0.0, 1.1.0 or 1.2.0 +ARG YMIR="1.1.0" + +ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" +ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" +ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" +ENV LANG=C.UTF-8 +ENV YMIR_VERSION=${YMIR} + +# Install linux package +RUN apt-get update && apt-get install -y gnupg2 git libglib2.0-0 \ + libgl1-mesa-glx libsm6 libxext6 libxrender-dev curl wget zip vim \ + build-essential ninja-build \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# install ymir-exc sdk +RUN pip install "git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.0.0" + +# Copy file from host to docker and install requirements +COPY . /app +RUN mkdir /img-man && mv /app/*-template.yaml /img-man/ \ + && pip install -r /app/requirements.txt + +# Download pretrained weight and font file +RUN cd /app && bash data/scripts/download_weights.sh \ + && mkdir -p /root/.config/Ultralytics \ + && wget https://ultralytics.com/assets/Arial.ttf -O /root/.config/Ultralytics/Arial.ttf + +# Make PYTHONPATH find local package +ENV PYTHONPATH=. + +WORKDIR /app +RUN echo "python3 /app/start.py" > /usr/bin/start.sh +CMD bash /usr/bin/start.sh diff --git a/det-yolov5-tmi/ymir/docker/cuda111.dockerfile b/det-yolov5-tmi/ymir/docker/cuda111.dockerfile new file mode 100644 index 0000000..84427a8 --- /dev/null +++ b/det-yolov5-tmi/ymir/docker/cuda111.dockerfile @@ -0,0 +1,43 @@ +ARG PYTORCH="1.8.0" +ARG CUDA="11.1" +ARG CUDNN="8" + +# cuda11.1 + pytorch 1.9.0 + cudnn8 not work!!! +FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-runtime +# support YMIR=1.0.0, 1.1.0 or 1.2.0 +ARG YMIR="1.1.0" + + +ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" +ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" +ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" +ENV LANG=C.UTF-8 +ENV YMIR_VERSION=$YMIR + +# Install linux package +RUN apt-get update && apt-get install -y gnupg2 git libglib2.0-0 \ + libgl1-mesa-glx libsm6 libxext6 libxrender-dev curl wget zip vim \ + build-essential ninja-build \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +COPY ./requirements.txt /workspace/ +# install ymir-exc sdk and requirements +RUN pip install "git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.0.0" \ + && pip install -r /workspace/requirements.txt + +# Copy file from host to docker and install requirements +COPY . /app +RUN mkdir /img-man && mv /app/*-template.yaml /img-man/ + +# Download pretrained weight and font file +RUN cd /app && bash data/scripts/download_weights.sh \ + && mkdir -p /root/.config/Ultralytics \ + && wget https://ultralytics.com/assets/Arial.ttf -O /root/.config/Ultralytics/Arial.ttf + +# Make PYTHONPATH find local package +ENV PYTHONPATH=. + +WORKDIR /app +RUN echo "python3 /app/start.py" > /usr/bin/start.sh +CMD bash /usr/bin/start.sh diff --git a/det-yolov5-tmi/ymir/img-man/infer-template.yaml b/det-yolov5-tmi/ymir/img-man/infer-template.yaml new file mode 100644 index 0000000..329887a --- /dev/null +++ b/det-yolov5-tmi/ymir/img-man/infer-template.yaml @@ -0,0 +1,15 @@ +# infer template for your executor app +# after build image, it should at /img-man/infer-template.yaml +# key: gpu_id, task_id, model_params_path, class_names should be preserved + +# gpu_id: '0' +# task_id: 'default-infer-task' +# model_params_path: [] +# class_names: [] + +img_size: 640 +conf_thres: 0.25 +iou_thres: 0.45 +batch_size_per_gpu: 16 +num_workers_per_gpu: 4 +pin_memory: False diff --git a/det-yolov5-tmi/ymir/img-man/mining-template.yaml b/det-yolov5-tmi/ymir/img-man/mining-template.yaml new file mode 100644 index 0000000..485c8bb --- /dev/null +++ b/det-yolov5-tmi/ymir/img-man/mining-template.yaml @@ -0,0 +1,18 @@ +# mining template for your executor app +# after build image, it should at /img-man/mining-template.yaml +# key: gpu_id, task_id, model_params_path, class_names should be preserved + +# gpu_id: '0' +# task_id: 'default-training-task' +# model_params_path: [] +# class_names: [] + +img_size: 640 +mining_algorithm: aldd +class_distribution_scores: '' # 1.0,1.0,0.1,0.2 +conf_thres: 0.25 +iou_thres: 0.45 +batch_size_per_gpu: 16 +num_workers_per_gpu: 4 +pin_memory: False +shm_size: 128G diff --git a/det-yolov5-tmi/ymir/img-man/training-template.yaml b/det-yolov5-tmi/ymir/img-man/training-template.yaml new file mode 100644 index 0000000..1cc4752 --- /dev/null +++ b/det-yolov5-tmi/ymir/img-man/training-template.yaml @@ -0,0 +1,22 @@ +# training template for your executor app +# after build image, it should at /img-man/training-template.yaml +# key: gpu_id, task_id, pretrained_model_params, class_names should be preserved + +# gpu_id: '0' +# task_id: 'default-training-task' +# pretrained_model_params: [] +# class_names: [] + +shm_size: '128G' +export_format: 'ark:raw' +model: 'yolov5s' +batch_size_per_gpu: 16 +num_workers_per_gpu: 4 +epochs: 100 +img_size: 640 +opset: 11 +args_options: '--exist-ok' +save_best_only: True # save the best weight file only +save_period: 10 +sync_bn: False # work for multi-gpu only +ymir_saved_file_patterns: '' # custom saved files, support python regular expression, use , to split multiple pattern diff --git a/det-yolov5-tmi/ymir/mining/data_augment.py b/det-yolov5-tmi/ymir/mining/data_augment.py new file mode 100644 index 0000000..cfafaa7 --- /dev/null +++ b/det-yolov5-tmi/ymir/mining/data_augment.py @@ -0,0 +1,204 @@ +""" +data augmentations for CALD method, including horizontal_flip, rotate(5'), cutout +official code: https://github.com/we1pingyu/CALD/blob/master/cald/cald_helper.py +""" +import random +from typing import Any, List, Tuple + +import cv2 +import numpy as np +from nptyping import NDArray + +from utils.ymir_yolov5 import BBOX, CV_IMAGE + + +def intersect(boxes1: BBOX, boxes2: BBOX) -> NDArray: + ''' + Find intersection of every box combination between two sets of box + boxes1: bounding boxes 1, a tensor of dimensions (n1, 4) + boxes2: bounding boxes 2, a tensor of dimensions (n2, 4) + + Out: Intersection each of boxes1 with respect to each of boxes2, + a tensor of dimensions (n1, n2) + ''' + n1 = boxes1.shape[0] + n2 = boxes2.shape[0] + max_xy = np.minimum( + np.expand_dims(boxes1[:, 2:], axis=1).repeat(n2, axis=1), + np.expand_dims(boxes2[:, 2:], axis=0).repeat(n1, axis=0)) + + min_xy = np.maximum( + np.expand_dims(boxes1[:, :2], axis=1).repeat(n2, axis=1), + np.expand_dims(boxes2[:, :2], axis=0).repeat(n1, axis=0)) + inter = np.clip(max_xy - min_xy, a_min=0, a_max=None) # (n1, n2, 2) + return inter[:, :, 0] * inter[:, :, 1] # (n1, n2) + + +def horizontal_flip(image: CV_IMAGE, bbox: BBOX) \ + -> Tuple[CV_IMAGE, BBOX]: + """ + image: opencv image, [height,width,channels] + bbox: numpy.ndarray, [N,4] --> [x1,y1,x2,y2] + """ + image = image.copy() + + width = image.shape[1] + # Flip image horizontally + image = image[:, ::-1, :] + if len(bbox) > 0: + bbox = bbox.copy() + # Flip bbox horizontally + bbox[:, [0, 2]] = width - bbox[:, [2, 0]] + return image, bbox + + +def cutout(image: CV_IMAGE, + bbox: BBOX, + cut_num: int = 2, + fill_val: int = 0, + bbox_remove_thres: float = 0.4, + bbox_min_thres: float = 0.1) -> Tuple[CV_IMAGE, BBOX]: + ''' + Cutout augmentation + image: A PIL image + boxes: bounding boxes, a tensor of dimensions (#objects, 4) + labels: labels of object, a tensor of dimensions (#objects) + fill_val: Value filled in cut out + bbox_remove_thres: Theshold to remove bbox cut by cutout + + Out: new image, new_boxes, new_labels + ''' + image = image.copy() + bbox = bbox.copy() + + if len(bbox) == 0: + return image, bbox + + original_h, original_w, original_channel = image.shape + count = 0 + for _ in range(50): + # Random cutout size: [0.15, 0.5] of original dimension + cutout_size_h = random.uniform(0.05 * original_h, 0.2 * original_h) + cutout_size_w = random.uniform(0.05 * original_w, 0.2 * original_w) + + # Random position for cutout + left = random.uniform(0, original_w - cutout_size_w) + right = left + cutout_size_w + top = random.uniform(0, original_h - cutout_size_h) + bottom = top + cutout_size_h + cutout = np.array([[float(left), float(top), float(right), float(bottom)]]) + + # Calculate intersect between cutout and bounding boxes + overlap_size = intersect(cutout, bbox) + area_boxes = (bbox[:, 2] - bbox[:, 0]) * (bbox[:, 3] - bbox[:, 1]) + ratio = overlap_size / (area_boxes + 1e-14) + # If all boxes have Iou greater than bbox_remove_thres, try again + if ratio.max() > bbox_remove_thres or ratio.max() < bbox_min_thres: + continue + + image[int(top):int(bottom), int(left):int(right), :] = fill_val + count += 1 + if count >= cut_num: + break + return image, bbox + + +def rotate(image: CV_IMAGE, bbox: BBOX, rot: float = 5) -> Tuple[CV_IMAGE, BBOX]: + image = image.copy() + bbox = bbox.copy() + h, w, c = image.shape + center = np.array([w / 2.0, h / 2.0]) + s = max(h, w) * 1.0 + trans = get_affine_transform(center, s, rot, [w, h]) + if len(bbox) > 0: + for i in range(bbox.shape[0]): + x1, y1 = affine_transform(bbox[i, :2], trans) + x2, y2 = affine_transform(bbox[i, 2:], trans) + x3, y3 = affine_transform(bbox[i, [2, 1]], trans) + x4, y4 = affine_transform(bbox[i, [0, 3]], trans) + bbox[i, :2] = [min(x1, x2, x3, x4), min(y1, y2, y3, y4)] + bbox[i, 2:] = [max(x1, x2, x3, x4), max(y1, y2, y3, y4)] + image = cv2.warpAffine(image, trans, (w, h), flags=cv2.INTER_LINEAR) + return image, bbox + + +def get_3rd_point(a: NDArray, b: NDArray) -> NDArray: + direct = a - b + return b + np.array([-direct[1], direct[0]], dtype=np.float32) + + +def get_dir(src_point: NDArray, rot_rad: float) -> List: + sn, cs = np.sin(rot_rad), np.cos(rot_rad) + + src_result = [0, 0] + src_result[0] = src_point[0] * cs - src_point[1] * sn + src_result[1] = src_point[0] * sn + src_point[1] * cs + + return src_result + + +def transform_preds(coords: NDArray, center: NDArray, scale: Any, rot: float, output_size: List) -> NDArray: + trans = get_affine_transform(center, scale, rot, output_size, inv=True) + target_coords = affine_transform(coords, trans) + return target_coords + + +def get_affine_transform(center: NDArray, + scale: Any, + rot: float, + output_size: List, + shift: NDArray = np.array([0, 0], dtype=np.float32), + inv: bool = False) -> NDArray: + if not isinstance(scale, np.ndarray) and not isinstance(scale, list): + scale = np.array([scale, scale], dtype=np.float32) + + scale_tmp = scale + src_w = scale_tmp[0] + dst_w = output_size[0] + dst_h = output_size[1] + + rot_rad = np.pi * rot / 180 + src_dir = get_dir([0, src_w * -0.5], rot_rad) + dst_dir = np.array([0, dst_w * -0.5], np.float32) + + src = np.zeros((3, 2), dtype=np.float32) + dst = np.zeros((3, 2), dtype=np.float32) + src[0, :] = center + scale_tmp * shift + src[1, :] = center + src_dir + scale_tmp * shift + dst[0, :] = [dst_w * 0.5, dst_h * 0.5] + dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir + + src[2:, :] = get_3rd_point(src[0, :], src[1, :]) + dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :]) + + if inv: + trans = cv2.getAffineTransform(np.float32(dst), np.float32(src)) + else: + trans = cv2.getAffineTransform(np.float32(src), np.float32(dst)) + + return trans + + +def affine_transform(pt: NDArray, t: NDArray) -> NDArray: + new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T + new_pt = np.dot(t, new_pt) + return new_pt[:2] + + +def resize(img: CV_IMAGE, boxes: BBOX, ratio: float = 0.8) -> Tuple[CV_IMAGE, BBOX]: + """ + ratio: <= 1.0 + """ + assert ratio <= 1.0, f'resize ratio {ratio} must <= 1.0' + + h, w, _ = img.shape + ow = int(w * ratio) + oh = int(h * ratio) + resize_img = cv2.resize(img, (ow, oh)) + new_img = np.zeros_like(img) + new_img[:oh, :ow] = resize_img + + if len(boxes) == 0: + return new_img, boxes + else: + return new_img, boxes * ratio diff --git a/det-yolov5-tmi/ymir/mining/util.py b/det-yolov5-tmi/ymir/mining/util.py new file mode 100644 index 0000000..c69343c --- /dev/null +++ b/det-yolov5-tmi/ymir/mining/util.py @@ -0,0 +1,149 @@ +"""run.py: +img --(model)--> pred --(augmentation)--> (aug1_pred, aug2_pred, ..., augN_pred) +img --(augmentation)--> aug1_img --(model)--> pred1 +img --(augmentation)--> aug2_img --(model)--> pred2 +... +img --(augmentation)--> augN_img --(model)--> predN + +dataload(img) --(model)--> pred +dataload(img, pred) --(augmentation1)--> (aug1_img, aug1_pred) --(model)--> pred1 + +1. split dataset with DDP sampler +2. use DDP model to infer sampled dataloader +3. gather infer result + +""" +import os +from typing import Any, List + +import cv2 +import numpy as np +import torch.utils.data as td +from mining.data_augment import cutout, horizontal_flip, intersect, resize, rotate +from nptyping import NDArray +from scipy.stats import entropy +from torch.utils.data._utils.collate import default_collate +from utils.augmentations import letterbox +from utils.ymir_yolov5 import BBOX + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + + +def get_ious(boxes1: BBOX, boxes2: BBOX) -> NDArray: + """ + args: + boxes1: np.array, (N, 4), xyxy + boxes2: np.array, (M, 4), xyxy + return: + iou: np.array, (N, M) + """ + area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1]) + area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1]) + iner_area = intersect(boxes1, boxes2) + area1 = area1.reshape(-1, 1).repeat(area2.shape[0], axis=1) + area2 = area2.reshape(1, -1).repeat(area1.shape[0], axis=0) + iou = iner_area / (area1 + area2 - iner_area + 1e-14) + return iou + + +def preprocess(img, img_size, stride): + img1 = letterbox(img, img_size, stride=stride, auto=False)[0] + + # preprocess: convert data format + img1 = img1.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img1 = np.ascontiguousarray(img1) + # img1 = torch.from_numpy(img1).to(self.device) + + img1 = img1 / 255 # 0 - 255 to 0.0 - 1.0 + return img1 + + +def load_image_file(img_file: str, img_size, stride): + img = cv2.imread(img_file) + img1 = letterbox(img, img_size, stride=stride, auto=False)[0] + + # preprocess: convert data format + img1 = img1.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img1 = np.ascontiguousarray(img1) + # img1 = torch.from_numpy(img1).to(self.device) + + img1 = img1 / 255 # 0 - 255 to 0.0 - 1.0 + # img1.unsqueeze_(dim=0) # expand for batch dim + return dict(image=img1, origin_shape=img.shape[0:2], image_file=img_file) + # return img1 + + +def load_image_file_with_ann(image_info: dict, img_size, stride): + img_file = image_info['image_file'] + # xyxy(int) conf(float) class_index(int) + bboxes = image_info['results'][:, :4].astype(np.int32) + img = cv2.imread(img_file) + aug_dict = dict(flip=horizontal_flip, cutout=cutout, rotate=rotate, resize=resize) + + data = dict(image_file=img_file, origin_shape=img.shape[0:2]) + for key in aug_dict: + aug_img, aug_bbox = aug_dict[key](img, bboxes) + preprocess_aug_img = preprocess(aug_img, img_size, stride) + data[f'image_{key}'] = preprocess_aug_img + data[f'bboxes_{key}'] = aug_bbox + data[f'origin_shape_{key}'] = aug_img.shape[0:2] + + data.update(image_info) + return data + + +def collate_fn_with_fake_ann(batch): + new_batch = dict() + for key in ['flip', 'cutout', 'rotate', 'resize']: + new_batch[f'bboxes_{key}_list'] = [data[f'bboxes_{key}'] for data in batch] + + new_batch[f'image_{key}'] = default_collate([data[f'image_{key}'] for data in batch]) + + new_batch[f'origin_shape_{key}'] = default_collate([data[f'origin_shape_{key}'] for data in batch]) + + new_batch['results_list'] = [data['results'] for data in batch] + new_batch['image_file'] = [data['image_file'] for data in batch] + + return new_batch + + +def update_consistency(consistency, consistency_per_aug, beta, pred_bboxes_key, pred_conf_key, aug_bboxes_key, + aug_conf): + cls_scores_aug = 1 - pred_conf_key + cls_scores = 1 - aug_conf + + consistency_per_aug = 2.0 + ious = get_ious(pred_bboxes_key, aug_bboxes_key) + aug_idxs = np.argmax(ious, axis=0) + for origin_idx, aug_idx in enumerate(aug_idxs): + max_iou = ious[aug_idx, origin_idx] + if max_iou == 0: + consistency_per_aug = min(consistency_per_aug, beta) + p = cls_scores_aug[aug_idx] + q = cls_scores[origin_idx] + m = (p + q) / 2. + js = 0.5 * entropy([p, 1 - p], [m, 1 - m]) + 0.5 * entropy([q, 1 - q], [m, 1 - m]) + if js < 0: + js = 0 + consistency_box = max_iou + consistency_cls = 0.5 * (aug_conf[origin_idx] + pred_conf_key[aug_idx]) * (1 - js) + consistency_per_inst = abs(consistency_box + consistency_cls - beta) + consistency_per_aug = min(consistency_per_aug, consistency_per_inst.item()) + + consistency += consistency_per_aug + return consistency + + +class YmirDataset(td.Dataset): + def __init__(self, images: List[Any], load_fn=None): + super().__init__() + self.images = images + self.load_fn = load_fn + + def __getitem__(self, index): + return self.load_fn(self.images[index]) + + def __len__(self): + return len(self.images) diff --git a/det-yolov5-tmi/ymir/mining/ymir_infer.py b/det-yolov5-tmi/ymir/mining/ymir_infer.py new file mode 100644 index 0000000..ad1e0d2 --- /dev/null +++ b/det-yolov5-tmi/ymir/mining/ymir_infer.py @@ -0,0 +1,130 @@ +"""use fake DDP to infer +1. split data with `images_rank = images[RANK::WORLD_SIZE]` +2. save splited result with `torch.save(results, f'results_{RANK}.pt')` +3. merge result +""" +import os +import sys +import warnings +from functools import partial + +import torch +import torch.distributed as dist +import torch.utils.data as td +from easydict import EasyDict as edict +from mining.util import YmirDataset, load_image_file +from tqdm import tqdm +from utils.general import scale_coords +from utils.ymir_yolov5 import YmirYolov5 +from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, get_merged_config + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + + +def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): + # eg: gpu_id = 1,3,5,7 for LOCAL_RANK = 2, will use gpu 5. + gpu = max(0, LOCAL_RANK) + device = torch.device('cuda', gpu) + ymir_yolov5.to(device) + + load_fn = partial(load_image_file, img_size=ymir_yolov5.img_size, stride=ymir_yolov5.stride) + batch_size_per_gpu = ymir_yolov5.batch_size_per_gpu + gpu_count = ymir_yolov5.gpu_count + cpu_count: int = os.cpu_count() or 1 + num_workers_per_gpu = min([ + cpu_count // max(gpu_count, 1), batch_size_per_gpu if batch_size_per_gpu > 1 else 0, + ymir_yolov5.num_workers_per_gpu + ]) + + with open(ymir_cfg.ymir.input.candidate_index_file, 'r') as f: + images = [line.strip() for line in f.readlines()] + + max_barrier_times = len(images) // max(1, WORLD_SIZE) // batch_size_per_gpu + # origin dataset + images_rank = images[RANK::WORLD_SIZE] + origin_dataset = YmirDataset(images_rank, load_fn=load_fn) + origin_dataset_loader = td.DataLoader(origin_dataset, + batch_size=batch_size_per_gpu, + shuffle=False, + sampler=None, + num_workers=num_workers_per_gpu, + pin_memory=ymir_yolov5.pin_memory, + drop_last=False) + + results = [] + dataset_size = len(images_rank) + monitor_gap = max(1, dataset_size // 1000 // batch_size_per_gpu) + pbar = tqdm(origin_dataset_loader) if RANK == 0 else origin_dataset_loader + for idx, batch in enumerate(pbar): + # batch-level sync, avoid 30min time-out error + if LOCAL_RANK != -1 and idx < max_barrier_times: + dist.barrier() + + with torch.no_grad(): + pred = ymir_yolov5.forward(batch['image'].float().to(device), nms=True) + + if idx % monitor_gap == 0: + ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) + + preprocess_image_shape = batch['image'].shape[2:] + for idx, det in enumerate(pred): # per image + result_per_image = [] + image_file = batch['image_file'][idx] + if len(det): + origin_image_shape = (batch['origin_shape'][0][idx], batch['origin_shape'][1][idx]) + # Rescale boxes from img_size to img size + det[:, :4] = scale_coords(preprocess_image_shape, det[:, :4], origin_image_shape).round() + result_per_image.append(det) + results.append(dict(image_file=image_file, result=result_per_image)) + + torch.save(results, f'/out/infer_results_{RANK}.pt') + + +def main() -> int: + ymir_cfg = get_merged_config() + ymir_yolov5 = YmirYolov5(ymir_cfg, task='infer') + + if LOCAL_RANK != -1: + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + torch.cuda.set_device(LOCAL_RANK) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + + run(ymir_cfg, ymir_yolov5) + + # wait all process to save the infer result + dist.barrier() + + if RANK in [0, -1]: + results = [] + for rank in range(WORLD_SIZE): + results.append(torch.load(f'/out/infer_results_{rank}.pt')) + + ymir_infer_result = dict() + for result in results: + for img_data in result: + img_file = img_data['image_file'] + anns = [] + for each_det in img_data['result']: + each_det_np = each_det.data.cpu().numpy() + for i in range(each_det_np.shape[0]): + xmin, ymin, xmax, ymax, conf, cls = each_det_np[i, :6].tolist() + if conf < ymir_yolov5.conf_thres: + continue + if int(cls) >= len(ymir_yolov5.class_names): + warnings.warn(f'class index {int(cls)} out of range for {ymir_yolov5.class_names}') + continue + ann = rw.Annotation(class_name=ymir_yolov5.class_names[int(cls)], + score=conf, + box=rw.Box(x=int(xmin), y=int(ymin), w=int(xmax - xmin), + h=int(ymax - ymin))) + anns.append(ann) + ymir_infer_result[img_file] = anns + rw.write_infer_result(infer_result=ymir_infer_result) + return 0 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/det-yolov5-tmi/ymir/mining/ymir_mining_aldd.py b/det-yolov5-tmi/ymir/mining/ymir_mining_aldd.py new file mode 100644 index 0000000..8d6a27c --- /dev/null +++ b/det-yolov5-tmi/ymir/mining/ymir_mining_aldd.py @@ -0,0 +1,210 @@ +"""use fake DDP to infer +1. split data with `images_rank = images[RANK::WORLD_SIZE]` +2. infer on the origin dataset +3. infer on the augmentation dataset +4. save splited mining result with `torch.save(results, f'/out/mining_results_{RANK}.pt')` +5. merge mining result +""" +import os +import sys +import warnings +from functools import partial +from typing import Any, List + +import numpy as np +import torch +import torch.distributed as dist +import torch.nn.functional as F +import torch.utils.data as td +from easydict import EasyDict as edict +from mining.util import YmirDataset, load_image_file +from tqdm import tqdm +from utils.ymir_yolov5 import YmirYolov5 +from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, get_merged_config + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + + +class ALDD(object): + + def __init__(self, ymir_cfg: edict): + self.avg_pool_size = 9 + self.max_pool_size = 32 + self.avg_pool_pad = (self.avg_pool_size - 1) // 2 + + self.num_classes = len(ymir_cfg.param.class_names) + if ymir_cfg.param.get('class_distribution_scores', ''): + scores = [float(x.strip()) for x in ymir_cfg.param.class_distribution_scores.split(',')] + if len(scores) < self.num_classes: + warnings.warn('extend 1.0 to class_distribution_scores') + scores.extend([1.0] * (self.num_classes - len(scores))) + self.class_distribution_scores = np.array(scores[0:self.num_classes], dtype=np.float32) + else: + self.class_distribution_scores = np.array([1.0] * self.num_classes, dtype=np.float32) + + def calc_unc_val(self, heatmap: torch.Tensor) -> torch.Tensor: + # mean of entropy + ent = F.binary_cross_entropy(heatmap, heatmap, reduction='none') + avg_ent = F.avg_pool2d(ent, + kernel_size=self.avg_pool_size, + stride=1, + padding=self.avg_pool_pad, + count_include_pad=False) # N, 1, H, W + mean_of_entropy = torch.sum(avg_ent, dim=1, keepdim=True) # N, 1, H, W + + # entropy of mean + avg_heatmap = F.avg_pool2d(heatmap, + kernel_size=self.avg_pool_size, + stride=1, + padding=self.avg_pool_pad, + count_include_pad=False) # N, C, H, W + ent_avg = F.binary_cross_entropy(avg_heatmap, avg_heatmap, reduction='none') + entropy_of_mean = torch.sum(ent_avg, dim=1, keepdim=True) # N, 1, H, W + + uncertainty = entropy_of_mean - mean_of_entropy + unc = F.max_pool2d(uncertainty, + kernel_size=self.max_pool_size, + stride=self.max_pool_size, + padding=0, + ceil_mode=False) + + # aggregating + scores = torch.mean(unc, dim=(1, 2, 3)) # (N,) + return scores + + def compute_aldd_score(self, net_output: List[torch.Tensor], net_input_shape: Any): + """ + args: + imgs: list[np.array(H, W, C)] + returns: + scores: list of float + """ + if not isinstance(net_input_shape, (list, tuple)): + net_input_shape = (net_input_shape, net_input_shape) + + # CLASS_DISTRIBUTION_SCORE = np.array([1.0] * num_of_class) + scores_list = [] + + for feature_map in net_output: + feature_map.sigmoid_() + + for each_class_index in range(self.num_classes): + feature_map_list: List[torch.Tensor] = [] + + # each_output_feature_map: [bs, 3, h, w, 5 + num_classes] + for each_output_feature_map in net_output: + net_output_conf = each_output_feature_map[:, :, :, :, 4] + net_output_cls_mult_conf = net_output_conf * each_output_feature_map[:, :, :, :, 5 + each_class_index] + # feature_map_reshape: [bs, 3, h, w] + feature_map_reshape = F.interpolate(net_output_cls_mult_conf, + net_input_shape, + mode='bilinear', + align_corners=False) + feature_map_list.append(feature_map_reshape) + + # len(net_output) = 3 + # feature_map_concate: [bs, 9, h, w] + feature_map_concate = torch.cat(feature_map_list, 1) + # scores: [bs, 1] for each class + scores = self.calc_unc_val(feature_map_concate) + scores = scores.cpu().detach().numpy() + scores_list.append(scores) + + # total_scores: [bs, num_classes] + total_scores = np.stack(scores_list, axis=1) + total_scores = total_scores * self.class_distribution_scores + total_scores = np.sum(total_scores, axis=1) + + return total_scores + + +def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): + # eg: gpu_id = 1,3,5,7 for LOCAL_RANK = 2, will use gpu 5. + gpu = LOCAL_RANK if LOCAL_RANK >= 0 else 0 + device = torch.device('cuda', gpu) + ymir_yolov5.to(device) + + load_fn = partial(load_image_file, img_size=ymir_yolov5.img_size, stride=ymir_yolov5.stride) + batch_size_per_gpu: int = ymir_yolov5.batch_size_per_gpu + gpu_count: int = ymir_yolov5.gpu_count + cpu_count: int = os.cpu_count() or 1 + num_workers_per_gpu = min([ + cpu_count // max(gpu_count, 1), batch_size_per_gpu if batch_size_per_gpu > 1 else 0, + ymir_yolov5.num_workers_per_gpu + ]) + + with open(ymir_cfg.ymir.input.candidate_index_file, 'r') as f: + images = [line.strip() for line in f.readlines()] + + max_barrier_times = (len(images) // max(1, WORLD_SIZE)) // batch_size_per_gpu + + # origin dataset + if RANK != -1: + images_rank = images[RANK::WORLD_SIZE] + else: + images_rank = images + origin_dataset = YmirDataset(images_rank, load_fn=load_fn) + origin_dataset_loader = td.DataLoader(origin_dataset, + batch_size=batch_size_per_gpu, + shuffle=False, + sampler=None, + num_workers=num_workers_per_gpu, + pin_memory=ymir_yolov5.pin_memory, + drop_last=False) + + mining_results = dict() + dataset_size = len(images_rank) + pbar = tqdm(origin_dataset_loader) if RANK == 0 else origin_dataset_loader + miner = ALDD(ymir_cfg) + for idx, batch in enumerate(pbar): + # batch-level sync, avoid 30min time-out error + if LOCAL_RANK != -1 and idx < max_barrier_times: + dist.barrier() + + with torch.no_grad(): + featuremap_output = ymir_yolov5.model.model(batch['image'].float().to(device))[1] + unc_scores = miner.compute_aldd_score(featuremap_output, ymir_yolov5.img_size) + + for each_imgname, each_score in zip(batch["image_file"], unc_scores): + mining_results[each_imgname] = each_score + + if RANK in [-1, 0]: + ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) + + torch.save(mining_results, f'/out/mining_results_{max(0,RANK)}.pt') + + +def main() -> int: + ymir_cfg = get_merged_config() + # note select_device(gpu_id) will set os.environ['CUDA_VISIBLE_DEVICES'] to gpu_id + ymir_yolov5 = YmirYolov5(ymir_cfg, task='mining') + + if LOCAL_RANK != -1: + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + torch.cuda.set_device(LOCAL_RANK) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + + run(ymir_cfg, ymir_yolov5) + + # wait all process to save the mining result + if LOCAL_RANK != -1: + dist.barrier() + + if RANK in [0, -1]: + results = [] + for rank in range(WORLD_SIZE): + results.append(torch.load(f'/out/mining_results_{rank}.pt')) + + ymir_mining_result = [] + for result in results: + for img_file, score in result.items(): + ymir_mining_result.append((img_file, score)) + rw.write_mining_result(mining_result=ymir_mining_result) + return 0 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/det-yolov5-tmi/ymir/mining/ymir_mining_cald.py b/det-yolov5-tmi/ymir/mining/ymir_mining_cald.py new file mode 100644 index 0000000..b4c6147 --- /dev/null +++ b/det-yolov5-tmi/ymir/mining/ymir_mining_cald.py @@ -0,0 +1,190 @@ +"""use fake DDP to infer +1. split data with `images_rank = images[RANK::WORLD_SIZE]` +2. infer on the origin dataset +3. infer on the augmentation dataset +4. save splited mining result with `torch.save(results, f'/out/mining_results_{RANK}.pt')` +5. merge mining result +""" +import os +import sys +from functools import partial + +import numpy as np +import torch +import torch.distributed as dist +import torch.utils.data as td +from easydict import EasyDict as edict +from mining.util import (YmirDataset, collate_fn_with_fake_ann, load_image_file, load_image_file_with_ann, + update_consistency) +from tqdm import tqdm +from utils.general import scale_coords +from utils.ymir_yolov5 import YmirYolov5 +from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, get_merged_config + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + + +def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): + # eg: gpu_id = 1,3,5,7 for LOCAL_RANK = 2, will use gpu 5. + gpu = LOCAL_RANK if LOCAL_RANK >= 0 else 0 + device = torch.device('cuda', gpu) + ymir_yolov5.to(device) + + load_fn = partial(load_image_file, img_size=ymir_yolov5.img_size, stride=ymir_yolov5.stride) + batch_size_per_gpu: int = ymir_yolov5.batch_size_per_gpu + gpu_count: int = ymir_yolov5.gpu_count + cpu_count: int = os.cpu_count() or 1 + num_workers_per_gpu = min([ + cpu_count // max(gpu_count, 1), batch_size_per_gpu if batch_size_per_gpu > 1 else 0, + ymir_yolov5.num_workers_per_gpu + ]) + + with open(ymir_cfg.ymir.input.candidate_index_file, 'r') as f: + images = [line.strip() for line in f.readlines()] + + max_barrier_times = (len(images) // max(1, WORLD_SIZE)) // batch_size_per_gpu + # origin dataset + images_rank = images[RANK::WORLD_SIZE] + origin_dataset = YmirDataset(images_rank, load_fn=load_fn) + origin_dataset_loader = td.DataLoader(origin_dataset, + batch_size=batch_size_per_gpu, + shuffle=False, + sampler=None, + num_workers=num_workers_per_gpu, + pin_memory=ymir_yolov5.pin_memory, + drop_last=False) + + results = [] + mining_results = dict() + beta = 1.3 + dataset_size = len(images_rank) + pbar = tqdm(origin_dataset_loader) if RANK == 0 else origin_dataset_loader + for idx, batch in enumerate(pbar): + # batch-level sync, avoid 30min time-out error + if LOCAL_RANK != -1 and idx < max_barrier_times: + dist.barrier() + + with torch.no_grad(): + pred = ymir_yolov5.forward(batch['image'].float().to(device), nms=True) + + if RANK in [-1, 0]: + ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) + preprocess_image_shape = batch['image'].shape[2:] + for inner_idx, det in enumerate(pred): # per image + result_per_image = [] + image_file = batch['image_file'][inner_idx] + if len(det): + origin_image_shape = (batch['origin_shape'][0][inner_idx], batch['origin_shape'][1][inner_idx]) + # Rescale boxes from img_size to img size + det[:, :4] = scale_coords(preprocess_image_shape, det[:, :4], origin_image_shape).round() + result_per_image.append(det) + else: + mining_results[image_file] = -beta + continue + + results_per_image = torch.cat(result_per_image, dim=0).data.cpu().numpy() + results.append(dict(image_file=image_file, origin_shape=origin_image_shape, results=results_per_image)) + + aug_load_fn = partial(load_image_file_with_ann, img_size=ymir_yolov5.img_size, stride=ymir_yolov5.stride) + aug_dataset = YmirDataset(results, load_fn=aug_load_fn) + aug_dataset_loader = td.DataLoader(aug_dataset, + batch_size=batch_size_per_gpu, + shuffle=False, + sampler=None, + collate_fn=collate_fn_with_fake_ann, + num_workers=num_workers_per_gpu, + pin_memory=ymir_yolov5.pin_memory, + drop_last=False) + + # cannot sync here!!! + dataset_size = len(results) + monitor_gap = max(1, dataset_size // 1000 // batch_size_per_gpu) + pbar = tqdm(aug_dataset_loader) if RANK == 0 else aug_dataset_loader + for idx, batch in enumerate(pbar): + if idx % monitor_gap == 0 and RANK in [-1, 0]: + ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) + + batch_consistency = [0.0 for _ in range(len(batch['image_file']))] + aug_keys = ['flip', 'cutout', 'rotate', 'resize'] + + pred_result = dict() + for key in aug_keys: + with torch.no_grad(): + pred_result[key] = ymir_yolov5.forward(batch[f'image_{key}'].float().to(device), nms=True) + + for inner_idx in range(len(batch['image_file'])): + for key in aug_keys: + preprocess_image_shape = batch[f'image_{key}'].shape[2:] + result_per_image = [] + det = pred_result[key][inner_idx] + if len(det) == 0: + # no result for the image with augmentation f'{key}' + batch_consistency[inner_idx] += beta + continue + + # prediction result from origin image + fake_ann = batch['results_list'][inner_idx] + # bboxes = fake_ann[:, :4].data.cpu().numpy().astype(np.int32) + conf = fake_ann[:, 4] + + # augmentated bbox from bboxes, aug_conf = conf + aug_bboxes_key = batch[f'bboxes_{key}_list'][inner_idx].astype(np.int32) + + origin_image_shape = (batch[f'origin_shape_{key}'][0][inner_idx], + batch[f'origin_shape_{key}'][1][inner_idx]) + + # Rescale boxes from img_size to img size + det[:, :4] = scale_coords(preprocess_image_shape, det[:, :4], origin_image_shape).round() + result_per_image.append(det) + + pred_bboxes_key = det[:, :4].data.cpu().numpy().astype(np.int32) + pred_conf_key = det[:, 4].data.cpu().numpy() + batch_consistency[inner_idx] = update_consistency(consistency=batch_consistency[inner_idx], + consistency_per_aug=2.0, + beta=beta, + pred_bboxes_key=pred_bboxes_key, + pred_conf_key=pred_conf_key, + aug_bboxes_key=aug_bboxes_key, + aug_conf=conf) + + for inner_idx in range(len(batch['image_file'])): + batch_consistency[inner_idx] /= len(aug_keys) + image_file = batch['image_file'][inner_idx] + mining_results[image_file] = batch_consistency[inner_idx] + + torch.save(mining_results, f'/out/mining_results_{max(0,RANK)}.pt') + + +def main() -> int: + ymir_cfg = get_merged_config() + ymir_yolov5 = YmirYolov5(ymir_cfg, task='mining') + + if LOCAL_RANK != -1: + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + torch.cuda.set_device(LOCAL_RANK) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + + run(ymir_cfg, ymir_yolov5) + + # wait all process to save the mining result + if LOCAL_RANK != -1: + dist.barrier() + + if RANK in [0, -1]: + results = [] + for rank in range(WORLD_SIZE): + results.append(torch.load(f'/out/mining_results_{rank}.pt')) + + ymir_mining_result = [] + for result in results: + for img_file, score in result.items(): + ymir_mining_result.append((img_file, score)) + rw.write_mining_result(mining_result=ymir_mining_result) + return 0 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/det-yolov5-tmi/ymir/mining/ymir_mining_entropy.py b/det-yolov5-tmi/ymir/mining/ymir_mining_entropy.py new file mode 100644 index 0000000..df5a1ff --- /dev/null +++ b/det-yolov5-tmi/ymir/mining/ymir_mining_entropy.py @@ -0,0 +1,112 @@ +"""use fake DDP to infer +1. split data with `images_rank = images[RANK::WORLD_SIZE]` +2. infer on the origin dataset +3. infer on the augmentation dataset +4. save splited mining result with `torch.save(results, f'/out/mining_results_{RANK}.pt')` +5. merge mining result +""" +import os +import sys +from functools import partial + +import numpy as np +import torch +import torch.distributed as dist +import torch.utils.data as td +from easydict import EasyDict as edict +from mining.util import YmirDataset, load_image_file +from tqdm import tqdm +from utils.ymir_yolov5 import YmirYolov5 +from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, get_merged_config + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + + +def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): + # eg: gpu_id = 1,3,5,7 for LOCAL_RANK = 2, will use gpu 5. + gpu = LOCAL_RANK if LOCAL_RANK >= 0 else 0 + device = torch.device('cuda', gpu) + ymir_yolov5.to(device) + + load_fn = partial(load_image_file, img_size=ymir_yolov5.img_size, stride=ymir_yolov5.stride) + batch_size_per_gpu: int = ymir_yolov5.batch_size_per_gpu + gpu_count: int = ymir_yolov5.gpu_count + cpu_count: int = os.cpu_count() or 1 + num_workers_per_gpu = min([ + cpu_count // max(gpu_count, 1), batch_size_per_gpu if batch_size_per_gpu > 1 else 0, + ymir_yolov5.num_workers_per_gpu + ]) + + with open(ymir_cfg.ymir.input.candidate_index_file, 'r') as f: + images = [line.strip() for line in f.readlines()] + + max_barrier_times = (len(images) // max(1, WORLD_SIZE)) // batch_size_per_gpu + # origin dataset + images_rank = images[RANK::WORLD_SIZE] + origin_dataset = YmirDataset(images_rank, load_fn=load_fn) + origin_dataset_loader = td.DataLoader(origin_dataset, + batch_size=batch_size_per_gpu, + shuffle=False, + sampler=None, + num_workers=num_workers_per_gpu, + pin_memory=ymir_yolov5.pin_memory, + drop_last=False) + + mining_results = dict() + dataset_size = len(images_rank) + pbar = tqdm(origin_dataset_loader) if RANK == 0 else origin_dataset_loader + for idx, batch in enumerate(pbar): + # batch-level sync, avoid 30min time-out error + if LOCAL_RANK != -1 and idx < max_barrier_times: + dist.barrier() + + with torch.no_grad(): + pred = ymir_yolov5.forward(batch['image'].float().to(device), nms=False) + + if RANK in [-1, 0]: + ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) + for inner_idx, det in enumerate(pred): # per image + image_file = batch['image_file'][inner_idx] + if len(det): + conf = det[:, 4].data.cpu().numpy() + mining_results[image_file] = -np.sum(conf * np.log2(conf)) + else: + mining_results[image_file] = -10 + continue + + torch.save(mining_results, f'/out/mining_results_{max(0,RANK)}.pt') + + +def main() -> int: + ymir_cfg = get_merged_config() + ymir_yolov5 = YmirYolov5(ymir_cfg, task='mining') + + if LOCAL_RANK != -1: + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + torch.cuda.set_device(LOCAL_RANK) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + + run(ymir_cfg, ymir_yolov5) + + # wait all process to save the mining result + if WORLD_SIZE > 1: + dist.barrier() + + if RANK in [0, -1]: + results = [] + for rank in range(WORLD_SIZE): + results.append(torch.load(f'/out/mining_results_{rank}.pt')) + + ymir_mining_result = [] + for result in results: + for img_file, score in result.items(): + ymir_mining_result.append((img_file, score)) + rw.write_mining_result(mining_result=ymir_mining_result) + return 0 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/det-yolov5-tmi/ymir/mining/ymir_mining_random.py b/det-yolov5-tmi/ymir/mining/ymir_mining_random.py new file mode 100644 index 0000000..30fb099 --- /dev/null +++ b/det-yolov5-tmi/ymir/mining/ymir_mining_random.py @@ -0,0 +1,75 @@ +"""use fake DDP to infer +1. split data with `images_rank = images[RANK::WORLD_SIZE]` +2. infer on the origin dataset +3. infer on the augmentation dataset +4. save splited mining result with `torch.save(results, f'/out/mining_results_{RANK}.pt')` +5. merge mining result +""" +import os +import random +import sys + +import torch +import torch.distributed as dist +from easydict import EasyDict as edict +from tqdm import tqdm +from utils.ymir_yolov5 import YmirYolov5 +from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, get_merged_config + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + + +def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): + # eg: gpu_id = 1,3,5,7 for LOCAL_RANK = 2, will use gpu 5. + gpu = LOCAL_RANK if LOCAL_RANK >= 0 else 0 + device = torch.device('cuda', gpu) + ymir_yolov5.to(device) + + with open(ymir_cfg.ymir.input.candidate_index_file, 'r') as f: + images = [line.strip() for line in f.readlines()] + + images_rank = images[RANK::WORLD_SIZE] + mining_results = dict() + dataset_size = len(images_rank) + pbar = tqdm(images_rank) if RANK == 0 else images_rank + for idx, image in enumerate(pbar): + if RANK in [-1, 0]: + ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx / dataset_size) + mining_results[image] = random.random() + + torch.save(mining_results, f'/out/mining_results_{max(0,RANK)}.pt') + + +def main() -> int: + ymir_cfg = get_merged_config() + ymir_yolov5 = YmirYolov5(ymir_cfg, task='mining') + + if LOCAL_RANK != -1: + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + torch.cuda.set_device(LOCAL_RANK) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + + run(ymir_cfg, ymir_yolov5) + + # wait all process to save the mining result + if WORLD_SIZE > 1: + dist.barrier() + + if RANK in [0, -1]: + results = [] + for rank in range(WORLD_SIZE): + results.append(torch.load(f'/out/mining_results_{rank}.pt')) + + ymir_mining_result = [] + for result in results: + for img_file, score in result.items(): + ymir_mining_result.append((img_file, score)) + rw.write_mining_result(mining_result=ymir_mining_result) + return 0 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/det-yolov5-tmi/ymir/start.py b/det-yolov5-tmi/ymir/start.py new file mode 100644 index 0000000..6c82844 --- /dev/null +++ b/det-yolov5-tmi/ymir/start.py @@ -0,0 +1,191 @@ +import logging +import os +import subprocess +import sys + +import cv2 +from easydict import EasyDict as edict +from models.experimental import attempt_download +from utils.ymir_yolov5 import YmirYolov5, convert_ymir_to_yolov5, get_weight_file +from ymir_exc import dataset_reader as dr +from ymir_exc import env, monitor +from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, find_free_port, get_bool, get_merged_config, get_ymir_process + + +def start(cfg: edict) -> int: + logging.info(f'merged config: {cfg}') + + if cfg.ymir.run_training: + _run_training(cfg) + else: + if cfg.ymir.run_mining and cfg.ymir.run_infer: + # multiple task, run mining first, infer later + mining_task_idx = 0 + infer_task_idx = 1 + task_num = 2 + else: + mining_task_idx = 0 + infer_task_idx = 0 + task_num = 1 + + if cfg.ymir.run_mining: + _run_mining(cfg, mining_task_idx, task_num) + if cfg.ymir.run_infer: + _run_infer(cfg, infer_task_idx, task_num) + + return 0 + + +def _run_training(cfg: edict) -> None: + """ + function for training task + 1. convert dataset + 2. training model + 3. save model weight/hyperparameter/... to design directory + """ + # 1. convert dataset + out_dir = cfg.ymir.output.root_dir + convert_ymir_to_yolov5(cfg) + logging.info(f'generate {out_dir}/data.yaml') + monitor.write_monitor_logger(percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0)) + + # 2. training model + epochs: int = int(cfg.param.epochs) + batch_size_per_gpu: int = int(cfg.param.batch_size_per_gpu) + num_workers_per_gpu: int = int(cfg.param.get('num_workers_per_gpu', 4)) + model: str = cfg.param.model + img_size: int = int(cfg.param.img_size) + save_period: int = int(cfg.param.save_period) + save_best_only: bool = get_bool(cfg, key='save_best_only', default_value=True) + args_options: str = cfg.param.args_options + gpu_id: str = str(cfg.param.get('gpu_id', '0')) + gpu_count: int = len(gpu_id.split(',')) if gpu_id else 0 + batch_size: int = batch_size_per_gpu * max(1, gpu_count) + port: int = find_free_port() + sync_bn: bool = get_bool(cfg, key='sync_bn', default_value=False) + + weights = get_weight_file(cfg) + if not weights: + # download pretrained weight + weights = attempt_download(f'{model}.pt') + + models_dir = cfg.ymir.output.models_dir + project = os.path.dirname(models_dir) + name = os.path.basename(models_dir) + assert os.path.join(project, name) == models_dir + + commands = ['python3'] + device = gpu_id or 'cpu' + if gpu_count > 1: + commands.extend(f'-m torch.distributed.launch --nproc_per_node {gpu_count} --master_port {port}'.split()) + + commands.extend([ + 'train.py', '--epochs', + str(epochs), '--batch-size', + str(batch_size), '--data', f'{out_dir}/data.yaml', '--project', project, '--cfg', f'models/{model}.yaml', + '--name', name, '--weights', weights, '--img-size', + str(img_size), '--save-period', + str(save_period), '--device', device, + '--workers', str(num_workers_per_gpu) + ]) + + if save_best_only: + commands.append("--nosave") + + if gpu_count > 1 and sync_bn: + commands.append("--sync-bn") + + if args_options: + commands.extend(args_options.split()) + + logging.info(f'start training: {commands}') + + subprocess.run(commands, check=True) + monitor.write_monitor_logger(percent=get_ymir_process(stage=YmirStage.TASK, p=1.0)) + + # if task done, write 100% percent log + monitor.write_monitor_logger(percent=1.0) + + +def _run_mining(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: + # generate data.yaml for mining + out_dir = cfg.ymir.output.root_dir + convert_ymir_to_yolov5(cfg) + logging.info(f'generate {out_dir}/data.yaml') + monitor.write_monitor_logger( + percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) + gpu_id: str = str(cfg.param.get('gpu_id', '0')) + gpu_count: int = len(gpu_id.split(',')) if gpu_id else 0 + + mining_algorithm = cfg.param.get('mining_algorithm', 'aldd') + support_mining_algorithms = ['aldd', 'cald', 'random', 'entropy'] + if mining_algorithm not in support_mining_algorithms: + raise Exception(f'unknown mining algorithm {mining_algorithm}, not in {support_mining_algorithms}') + + if gpu_count <= 1: + command = f'python3 mining/ymir_mining_{mining_algorithm}.py' + else: + port = find_free_port() + command = f'python3 -m torch.distributed.launch --nproc_per_node {gpu_count} --master_port {port} mining/ymir_mining_{mining_algorithm}.py' # noqa + + logging.info(f'mining: {command}') + subprocess.run(command.split(), check=True) + monitor.write_monitor_logger( + percent=get_ymir_process(stage=YmirStage.POSTPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) + + +def _run_infer(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: + # generate data.yaml for infer + out_dir = cfg.ymir.output.root_dir + convert_ymir_to_yolov5(cfg) + logging.info(f'generate {out_dir}/data.yaml') + monitor.write_monitor_logger( + percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) + + gpu_id: str = str(cfg.param.get('gpu_id', '0')) + gpu_count: int = len(gpu_id.split(',')) if gpu_id else 0 + + if gpu_count <= 1: + N = dr.items_count(env.DatasetType.CANDIDATE) + infer_result = dict() + model = YmirYolov5(cfg) + idx = -1 + + monitor_gap = max(1, N // 100) + for asset_path, _ in dr.item_paths(dataset_type=env.DatasetType.CANDIDATE): + img = cv2.imread(asset_path) + result = model.infer(img) + infer_result[asset_path] = result + idx += 1 + + if idx % monitor_gap == 0: + percent = get_ymir_process(stage=YmirStage.TASK, p=idx / N, task_idx=task_idx, task_num=task_num) + monitor.write_monitor_logger(percent=percent) + + rw.write_infer_result(infer_result=infer_result) + else: + port = find_free_port() + command = f'python3 -m torch.distributed.launch --nproc_per_node {gpu_count} --master_port {port} mining/ymir_infer.py' # noqa + + logging.info(f'infer: {command}') + subprocess.run(command.split(), check=True) + + monitor.write_monitor_logger( + percent=get_ymir_process(stage=YmirStage.POSTPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) + + +if __name__ == '__main__': + logging.basicConfig(stream=sys.stdout, + format='%(levelname)-8s: [%(asctime)s] %(message)s', + datefmt='%Y%m%d-%H:%M:%S', + level=logging.INFO) + + cfg = get_merged_config() + os.environ.setdefault('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', 'python') + + # activation: relu + activation: str = cfg.param.get('activation', '') + if activation: + os.environ.setdefault('ACTIVATION', activation) + sys.exit(start(cfg)) diff --git a/det-yolov5-tmi/ymir/ymir_yolov5.py b/det-yolov5-tmi/ymir/ymir_yolov5.py new file mode 100644 index 0000000..c463ded --- /dev/null +++ b/det-yolov5-tmi/ymir/ymir_yolov5.py @@ -0,0 +1,187 @@ +""" +utils function for ymir and yolov5 +""" +import os.path as osp +import shutil +from typing import Any, List + +import numpy as np +import torch +import yaml +from easydict import EasyDict as edict +from models.common import DetectMultiBackend +from nptyping import NDArray, Shape, UInt8 +from utils.augmentations import letterbox +from utils.general import check_img_size, non_max_suppression, scale_coords +from utils.torch_utils import select_device +from ymir_exc import monitor +from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, get_bool, get_weight_files, get_ymir_process + +BBOX = NDArray[Shape['*,4'], Any] +CV_IMAGE = NDArray[Shape['*,*,3'], UInt8] + + +def get_weight_file(cfg: edict) -> str: + """ + return the weight file path by priority + find weight file in cfg.param.model_params_path or cfg.param.model_params_path + """ + weight_files = get_weight_files(cfg, suffix=('.pt')) + # choose weight file by priority, best.pt > xxx.pt + for p in weight_files: + if p.endswith('best.pt'): + return p + + if len(weight_files) > 0: + return max(weight_files, key=osp.getctime) + + return "" + + +class YmirYolov5(torch.nn.Module): + """ + used for mining and inference to init detector and predict. + """ + def __init__(self, cfg: edict, task='infer'): + super().__init__() + self.cfg = cfg + if cfg.ymir.run_mining and cfg.ymir.run_infer: + # multiple task, run mining first, infer later + if task == 'infer': + self.task_idx = 1 + elif task == 'mining': + self.task_idx = 0 + else: + raise Exception(f'unknown task {task}') + + self.task_num = 2 + else: + self.task_idx = 0 + self.task_num = 1 + + self.gpu_id: str = str(cfg.param.get('gpu_id', '0')) + device = select_device(self.gpu_id) # will set CUDA_VISIBLE_DEVICES=self.gpu_id + self.gpu_count: int = len(self.gpu_id.split(',')) if self.gpu_id else 0 + self.batch_size_per_gpu: int = int(cfg.param.get('batch_size_per_gpu', 4)) + self.num_workers_per_gpu: int = int(cfg.param.get('num_workers_per_gpu', 4)) + self.pin_memory: bool = get_bool(cfg, 'pin_memory', False) + self.batch_size: int = self.batch_size_per_gpu * self.gpu_count + self.model = self.init_detector(device) + self.model.eval() + self.device = device + self.class_names: List[str] = cfg.param.class_names + self.stride = self.model.stride + self.conf_thres: float = float(cfg.param.conf_thres) + self.iou_thres: float = float(cfg.param.iou_thres) + + img_size = int(cfg.param.img_size) + imgsz = [img_size, img_size] + imgsz = check_img_size(imgsz, s=self.stride) + + self.model.warmup(imgsz=(1, 3, *imgsz), half=False) # warmup + self.img_size: List[int] = imgsz + + def extract_feats(self, x): + """ + return the feature maps before sigmoid for mining + """ + return self.model.model(x)[1] + + def forward(self, x, nms=False): + pred = self.model(x) + if not nms: + return pred + + pred = non_max_suppression(pred, + conf_thres=self.conf_thres, + iou_thres=self.iou_thres, + classes=None, # not filter class_idx + agnostic=False, + max_det=100) + return pred + + def init_detector(self, device: torch.device) -> DetectMultiBackend: + weights = get_weight_file(self.cfg) + + if not weights: + raise Exception("no weights file specified!") + + data_yaml = osp.join(self.cfg.ymir.output.root_dir, 'data.yaml') + model = DetectMultiBackend( + weights=weights, + device=device, + dnn=False, # not use opencv dnn for onnx inference + data=data_yaml) # dataset.yaml path + + return model + + def predict(self, img: CV_IMAGE) -> NDArray: + """ + predict single image and return bbox information + img: opencv BGR, uint8 format + """ + # preprocess: padded resize + img1 = letterbox(img, self.img_size, stride=self.stride, auto=True)[0] + + # preprocess: convert data format + img1 = img1.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img1 = np.ascontiguousarray(img1) + img1 = torch.from_numpy(img1).to(self.device) + + img1 = img1 / 255 # 0 - 255 to 0.0 - 1.0 + img1.unsqueeze_(dim=0) # expand for batch dim + pred = self.forward(img1, nms=True) + + result = [] + for det in pred: + if len(det): + # Rescale boxes from img_size to img size + det[:, :4] = scale_coords(img1.shape[2:], det[:, :4], img.shape).round() + result.append(det) + + # xyxy, conf, cls + if len(result) > 0: + tensor_result = torch.cat(result, dim=0) + numpy_result = tensor_result.data.cpu().numpy() + else: + numpy_result = np.zeros(shape=(0, 6), dtype=np.float32) + + return numpy_result + + def infer(self, img: CV_IMAGE) -> List[rw.Annotation]: + anns = [] + result = self.predict(img) + + for i in range(result.shape[0]): + xmin, ymin, xmax, ymax, conf, cls = result[i, :6].tolist() + ann = rw.Annotation(class_name=self.class_names[int(cls)], + score=conf, + box=rw.Box(x=int(xmin), y=int(ymin), w=int(xmax - xmin), h=int(ymax - ymin))) + + anns.append(ann) + + return anns + + def write_monitor_logger(self, stage: YmirStage, p: float): + monitor.write_monitor_logger( + percent=get_ymir_process(stage=stage, p=p, task_idx=self.task_idx, task_num=self.task_num)) + + +def convert_ymir_to_yolov5(cfg: edict, out_dir: str = None): + """ + convert ymir format dataset to yolov5 format + generate data.yaml for training/mining/infer + """ + + out_dir = out_dir or cfg.ymir.output.root_dir + data = dict(path=out_dir, nc=len(cfg.param.class_names), names=cfg.param.class_names) + for split, prefix in zip(['train', 'val', 'test'], ['training', 'val', 'candidate']): + src_file = getattr(cfg.ymir.input, f'{prefix}_index_file') + if osp.exists(src_file): + shutil.copy(src_file, f'{out_dir}/{split}.tsv') + + data[split] = f'{split}.tsv' + + with open(osp.join(out_dir, 'data.yaml'), 'w') as fw: + fw.write(yaml.safe_dump(data)) From 2f36c1c3533ec062a8628e02c45f9d9c93258151 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 20 Oct 2022 11:24:47 +0800 Subject: [PATCH 148/204] mv folder --- det-yolov5-tmi/README.md | 307 +++++++++++++++++-- det-yolov5-tmi/README_yolov5.md | 304 ------------------ det-yolov5-tmi/cuda102.dockerfile | 40 --- det-yolov5-tmi/cuda111.dockerfile | 43 --- det-yolov5-tmi/infer-template.yaml | 15 - det-yolov5-tmi/mining-template.yaml | 18 -- det-yolov5-tmi/mining/data_augment.py | 204 ------------ det-yolov5-tmi/mining/util.py | 149 --------- det-yolov5-tmi/mining/ymir_infer.py | 130 -------- det-yolov5-tmi/mining/ymir_mining_aldd.py | 210 ------------- det-yolov5-tmi/mining/ymir_mining_cald.py | 190 ------------ det-yolov5-tmi/mining/ymir_mining_entropy.py | 112 ------- det-yolov5-tmi/mining/ymir_mining_random.py | 75 ----- det-yolov5-tmi/start.py | 191 ------------ det-yolov5-tmi/training-template.yaml | 22 -- det-yolov5-tmi/utils/ymir_yolov5.py | 187 ----------- 16 files changed, 284 insertions(+), 1913 deletions(-) delete mode 100644 det-yolov5-tmi/README_yolov5.md delete mode 100644 det-yolov5-tmi/cuda102.dockerfile delete mode 100644 det-yolov5-tmi/cuda111.dockerfile delete mode 100644 det-yolov5-tmi/infer-template.yaml delete mode 100644 det-yolov5-tmi/mining-template.yaml delete mode 100644 det-yolov5-tmi/mining/data_augment.py delete mode 100644 det-yolov5-tmi/mining/util.py delete mode 100644 det-yolov5-tmi/mining/ymir_infer.py delete mode 100644 det-yolov5-tmi/mining/ymir_mining_aldd.py delete mode 100644 det-yolov5-tmi/mining/ymir_mining_cald.py delete mode 100644 det-yolov5-tmi/mining/ymir_mining_entropy.py delete mode 100644 det-yolov5-tmi/mining/ymir_mining_random.py delete mode 100644 det-yolov5-tmi/start.py delete mode 100644 det-yolov5-tmi/training-template.yaml delete mode 100644 det-yolov5-tmi/utils/ymir_yolov5.py diff --git a/det-yolov5-tmi/README.md b/det-yolov5-tmi/README.md index 6bf9151..b03a7c5 100644 --- a/det-yolov5-tmi/README.md +++ b/det-yolov5-tmi/README.md @@ -1,43 +1,304 @@ -# yolov5-ymir readme -- [yolov5 readme](./README_yolov5.md) +
    +

    + + +

    +
    +
    + CI CPU testing + YOLOv5 Citation + Docker Pulls +
    + Open In Colab + Open In Kaggle + Join Forum +
    +
    +

    +YOLOv5 🚀 is a family of object detection architectures and models pretrained on the COCO dataset, and represents Ultralytics + open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. +

    + + + + + +
    + +##
    Documentation
    + +See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. + +##
    Quick Start Examples
    + +
    +Install + +Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a +[**Python>=3.7.0**](https://www.python.org/) environment, including +[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). + +```bash +git clone https://github.com/ultralytics/yolov5 # clone +cd yolov5 +pip install -r requirements.txt # install ``` -docker build -t ymir/ymir-executor:ymir1.1.0-cuda102-yolov5-tmi --build-arg SERVER_MODE=dev --build-arg YMIR=1.1.0 -f cuda102.dockerfile . -docker build -t ymir/ymir-executor:ymir1.1.0-cuda111-yolov5-tmi --build-arg SERVER_MODE=dev --build-arg YMIR=1.1.0 -f cuda111.dockerfile . +
    + +
    +Inference + +Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) +. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest +YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). + +```python +import torch + +# Model +model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5m, yolov5l, yolov5x, custom + +# Images +img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list + +# Inference +results = model(img) + +# Results +results.print() # or .show(), .save(), .crop(), .pandas(), etc. +``` + +
    + + + +
    +Inference with detect.py + +`detect.py` runs inference on a variety of sources, downloading [models](https://github.com/ultralytics/yolov5/tree/master/models) automatically from +the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. + +```bash +python detect.py --source 0 # webcam + img.jpg # image + vid.mp4 # video + path/ # directory + path/*.jpg # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream ``` -## main change log +
    + +
    +Training + +The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) +results. [Models](https://github.com/ultralytics/yolov5/tree/master/models) +and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest +YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are +1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://github.com/ultralytics/yolov5/issues/475) times faster). Use the +largest `--batch-size` possible, or pass `--batch-size -1` for +YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB. + +```bash +python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 128 + yolov5s 64 + yolov5m 40 + yolov5l 24 + yolov5x 16 +``` + + + +
    + +
    +Tutorials + +* [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)  🚀 RECOMMENDED +* [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)  ☘️ + RECOMMENDED +* [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289)  🌟 NEW +* [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)  🌟 NEW +* [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) +* [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)  ⭐ NEW +* [TFLite, ONNX, CoreML, TensorRT Export](https://github.com/ultralytics/yolov5/issues/251) 🚀 +* [Test-Time Augmentation (TTA)](https://github.com/ultralytics/yolov5/issues/303) +* [Model Ensembling](https://github.com/ultralytics/yolov5/issues/318) +* [Model Pruning/Sparsity](https://github.com/ultralytics/yolov5/issues/304) +* [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607) +* [Transfer Learning with Frozen Layers](https://github.com/ultralytics/yolov5/issues/1314)  ⭐ NEW +* [TensorRT Deployment](https://github.com/wang-xinyu/tensorrtx) + +
    + +##
    Environments
    + +Get started in seconds with our verified environments. Click each icon below for details. + + + +##
    Integrations
    + + + +|Weights and Biases|Roboflow ⭐ NEW| +|:-:|:-:| +|Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | + + + + +##
    Why YOLOv5
    + +

    +
    + YOLOv5-P5 640 Figure (click to expand) + +

    +
    +
    + Figure Notes (click to expand) -- add `start.py` and `utils/ymir_yolov5.py` for train/infer/mining +* **COCO AP val** denotes mAP@0.5:0.95 metric measured on the 5000-image [COCO val2017](http://cocodataset.org) dataset over various inference sizes from 256 to 1536. +* **GPU Speed** measures average inference time per image on [COCO val2017](http://cocodataset.org) dataset using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100 instance at batch-size 32. +* **EfficientDet** data from [google/automl](https://github.com/google/automl) at batch size 8. +* **Reproduce** by `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` +
    -- add `utils/ymir_yolov5.py` for useful functions +### Pretrained Checkpoints - - `get_merged_config()` add ymir path config `cfg.yaml` and hyper-parameter `cfg.param` +[assets]: https://github.com/ultralytics/yolov5/releases - - `convert_ymir_to_yolov5()` generate yolov5 dataset config file `data.yaml` +[TTA]: https://github.com/ultralytics/yolov5/issues/303 - - `write_ymir_training_result()` save model weight, map and other files. +|Model |size
    (pixels) |mAPval
    0.5:0.95 |mAPval
    0.5 |Speed
    CPU b1
    (ms) |Speed
    V100 b1
    (ms) |Speed
    V100 b32
    (ms) |params
    (M) |FLOPs
    @640 (B) +|--- |--- |--- |--- |--- |--- |--- |--- |--- +|[YOLOv5n][assets] |640 |28.0 |45.7 |**45** |**6.3**|**0.6**|**1.9**|**4.5** +|[YOLOv5s][assets] |640 |37.4 |56.8 |98 |6.4 |0.9 |7.2 |16.5 +|[YOLOv5m][assets] |640 |45.4 |64.1 |224 |8.2 |1.7 |21.2 |49.0 +|[YOLOv5l][assets] |640 |49.0 |67.3 |430 |10.1 |2.7 |46.5 |109.1 +|[YOLOv5x][assets] |640 |50.7 |68.9 |766 |12.1 |4.8 |86.7 |205.7 +| | | | | | | | | +|[YOLOv5n6][assets] |1280 |36.0 |54.4 |153 |8.1 |2.1 |3.2 |4.6 +|[YOLOv5s6][assets] |1280 |44.8 |63.7 |385 |8.2 |3.6 |16.8 |12.6 +|[YOLOv5m6][assets] |1280 |51.3 |69.3 |887 |11.1 |6.8 |35.7 |50.0 +|[YOLOv5l6][assets] |1280 |53.7 |71.3 |1784 |15.8 |10.5 |76.8 |111.4 +|[YOLOv5x6][assets]
    + [TTA][TTA]|1280
    1536 |55.0
    **55.8** |72.7
    **72.7** |3136
    - |26.2
    - |19.4
    - |140.7
    - |209.8
    - - - `get_weight_file()` get pretrained weight or init weight file from ymir system +
    + Table Notes (click to expand) -- modify `utils/datasets.py` for ymir dataset format +* All checkpoints are trained to 300 epochs with default settings. Nano and Small models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, all others use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). +* **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
    Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` +* **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.
    Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1` +* **TTA** [Test Time Augmentation](https://github.com/ultralytics/yolov5/issues/303) includes reflection and scale augmentations.
    Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` -- modify `train.py` for training process monitor +
    -- add `mining/data_augment.py` and `mining/mining_cald.py` for mining +##
    Contribute
    -- add `training/infer/mining-template.yaml` for `/img-man/training/infer/mining-template.yaml` +We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! -- add `cuda102/111.dockerfile`, remove origin `Dockerfile` + -- modify `requirements.txt` +##
    Contact
    -- other modify support onnx export, not important. +For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For business inquiries or +professional support requests please visit [https://ultralytics.com/contact](https://ultralytics.com/contact). -## new features +
    -- 2022/09/08: add aldd active learning algorithm for mining task. [Active Learning for Deep Detection Neural Networks (ICCV 2019)](https://gitlab.com/haghdam/deep_active_learning) -- 2022/09/14: support change hyper-parameter `num_workers_per_gpu` -- 2022/09/16: support change activation, view [rknn](https://github.com/airockchip/rknn_model_zoo/tree/main/models/vision/object_detection/yolov5-pytorch) -- 2022/10/09: fix dist.destroy_process_group() hang + diff --git a/det-yolov5-tmi/README_yolov5.md b/det-yolov5-tmi/README_yolov5.md deleted file mode 100644 index b03a7c5..0000000 --- a/det-yolov5-tmi/README_yolov5.md +++ /dev/null @@ -1,304 +0,0 @@ -
    -

    - - -

    -
    -
    - CI CPU testing - YOLOv5 Citation - Docker Pulls -
    - Open In Colab - Open In Kaggle - Join Forum -
    - -
    -

    -YOLOv5 🚀 is a family of object detection architectures and models pretrained on the COCO dataset, and represents Ultralytics - open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. -

    - - - - - -
    - -##
    Documentation
    - -See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. - -##
    Quick Start Examples
    - -
    -Install - -Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a -[**Python>=3.7.0**](https://www.python.org/) environment, including -[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). - -```bash -git clone https://github.com/ultralytics/yolov5 # clone -cd yolov5 -pip install -r requirements.txt # install -``` - -
    - -
    -Inference - -Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) -. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest -YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). - -```python -import torch - -# Model -model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5m, yolov5l, yolov5x, custom - -# Images -img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list - -# Inference -results = model(img) - -# Results -results.print() # or .show(), .save(), .crop(), .pandas(), etc. -``` - -
    - - - -
    -Inference with detect.py - -`detect.py` runs inference on a variety of sources, downloading [models](https://github.com/ultralytics/yolov5/tree/master/models) automatically from -the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. - -```bash -python detect.py --source 0 # webcam - img.jpg # image - vid.mp4 # video - path/ # directory - path/*.jpg # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream -``` - -
    - -
    -Training - -The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) -results. [Models](https://github.com/ultralytics/yolov5/tree/master/models) -and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest -YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are -1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://github.com/ultralytics/yolov5/issues/475) times faster). Use the -largest `--batch-size` possible, or pass `--batch-size -1` for -YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB. - -```bash -python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 128 - yolov5s 64 - yolov5m 40 - yolov5l 24 - yolov5x 16 -``` - - - -
    - -
    -Tutorials - -* [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)  🚀 RECOMMENDED -* [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)  ☘️ - RECOMMENDED -* [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289)  🌟 NEW -* [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)  🌟 NEW -* [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) -* [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)  ⭐ NEW -* [TFLite, ONNX, CoreML, TensorRT Export](https://github.com/ultralytics/yolov5/issues/251) 🚀 -* [Test-Time Augmentation (TTA)](https://github.com/ultralytics/yolov5/issues/303) -* [Model Ensembling](https://github.com/ultralytics/yolov5/issues/318) -* [Model Pruning/Sparsity](https://github.com/ultralytics/yolov5/issues/304) -* [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607) -* [Transfer Learning with Frozen Layers](https://github.com/ultralytics/yolov5/issues/1314)  ⭐ NEW -* [TensorRT Deployment](https://github.com/wang-xinyu/tensorrtx) - -
    - -##
    Environments
    - -Get started in seconds with our verified environments. Click each icon below for details. - - - -##
    Integrations
    - - - -|Weights and Biases|Roboflow ⭐ NEW| -|:-:|:-:| -|Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | - - - - -##
    Why YOLOv5
    - -

    -
    - YOLOv5-P5 640 Figure (click to expand) - -

    -
    -
    - Figure Notes (click to expand) - -* **COCO AP val** denotes mAP@0.5:0.95 metric measured on the 5000-image [COCO val2017](http://cocodataset.org) dataset over various inference sizes from 256 to 1536. -* **GPU Speed** measures average inference time per image on [COCO val2017](http://cocodataset.org) dataset using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100 instance at batch-size 32. -* **EfficientDet** data from [google/automl](https://github.com/google/automl) at batch size 8. -* **Reproduce** by `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` -
    - -### Pretrained Checkpoints - -[assets]: https://github.com/ultralytics/yolov5/releases - -[TTA]: https://github.com/ultralytics/yolov5/issues/303 - -|Model |size
    (pixels) |mAPval
    0.5:0.95 |mAPval
    0.5 |Speed
    CPU b1
    (ms) |Speed
    V100 b1
    (ms) |Speed
    V100 b32
    (ms) |params
    (M) |FLOPs
    @640 (B) -|--- |--- |--- |--- |--- |--- |--- |--- |--- -|[YOLOv5n][assets] |640 |28.0 |45.7 |**45** |**6.3**|**0.6**|**1.9**|**4.5** -|[YOLOv5s][assets] |640 |37.4 |56.8 |98 |6.4 |0.9 |7.2 |16.5 -|[YOLOv5m][assets] |640 |45.4 |64.1 |224 |8.2 |1.7 |21.2 |49.0 -|[YOLOv5l][assets] |640 |49.0 |67.3 |430 |10.1 |2.7 |46.5 |109.1 -|[YOLOv5x][assets] |640 |50.7 |68.9 |766 |12.1 |4.8 |86.7 |205.7 -| | | | | | | | | -|[YOLOv5n6][assets] |1280 |36.0 |54.4 |153 |8.1 |2.1 |3.2 |4.6 -|[YOLOv5s6][assets] |1280 |44.8 |63.7 |385 |8.2 |3.6 |16.8 |12.6 -|[YOLOv5m6][assets] |1280 |51.3 |69.3 |887 |11.1 |6.8 |35.7 |50.0 -|[YOLOv5l6][assets] |1280 |53.7 |71.3 |1784 |15.8 |10.5 |76.8 |111.4 -|[YOLOv5x6][assets]
    + [TTA][TTA]|1280
    1536 |55.0
    **55.8** |72.7
    **72.7** |3136
    - |26.2
    - |19.4
    - |140.7
    - |209.8
    - - -
    - Table Notes (click to expand) - -* All checkpoints are trained to 300 epochs with default settings. Nano and Small models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, all others use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). -* **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
    Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -* **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.
    Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1` -* **TTA** [Test Time Augmentation](https://github.com/ultralytics/yolov5/issues/303) includes reflection and scale augmentations.
    Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` - -
    - -##
    Contribute
    - -We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! - - - -##
    Contact
    - -For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For business inquiries or -professional support requests please visit [https://ultralytics.com/contact](https://ultralytics.com/contact). - -
    - - diff --git a/det-yolov5-tmi/cuda102.dockerfile b/det-yolov5-tmi/cuda102.dockerfile deleted file mode 100644 index 0014b60..0000000 --- a/det-yolov5-tmi/cuda102.dockerfile +++ /dev/null @@ -1,40 +0,0 @@ -ARG PYTORCH="1.8.1" -ARG CUDA="10.2" -ARG CUDNN="7" - -FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-runtime -# support YMIR=1.0.0, 1.1.0 or 1.2.0 -ARG YMIR="1.1.0" - -ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" -ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" -ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" -ENV LANG=C.UTF-8 -ENV YMIR_VERSION=${YMIR} - -# Install linux package -RUN apt-get update && apt-get install -y gnupg2 git libglib2.0-0 \ - libgl1-mesa-glx libsm6 libxext6 libxrender-dev curl wget zip vim \ - build-essential ninja-build \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -# install ymir-exc sdk -RUN pip install "git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.0.0" - -# Copy file from host to docker and install requirements -COPY . /app -RUN mkdir /img-man && mv /app/*-template.yaml /img-man/ \ - && pip install -r /app/requirements.txt - -# Download pretrained weight and font file -RUN cd /app && bash data/scripts/download_weights.sh \ - && mkdir -p /root/.config/Ultralytics \ - && wget https://ultralytics.com/assets/Arial.ttf -O /root/.config/Ultralytics/Arial.ttf - -# Make PYTHONPATH find local package -ENV PYTHONPATH=. - -WORKDIR /app -RUN echo "python3 /app/start.py" > /usr/bin/start.sh -CMD bash /usr/bin/start.sh diff --git a/det-yolov5-tmi/cuda111.dockerfile b/det-yolov5-tmi/cuda111.dockerfile deleted file mode 100644 index 84427a8..0000000 --- a/det-yolov5-tmi/cuda111.dockerfile +++ /dev/null @@ -1,43 +0,0 @@ -ARG PYTORCH="1.8.0" -ARG CUDA="11.1" -ARG CUDNN="8" - -# cuda11.1 + pytorch 1.9.0 + cudnn8 not work!!! -FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-runtime -# support YMIR=1.0.0, 1.1.0 or 1.2.0 -ARG YMIR="1.1.0" - - -ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" -ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" -ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" -ENV LANG=C.UTF-8 -ENV YMIR_VERSION=$YMIR - -# Install linux package -RUN apt-get update && apt-get install -y gnupg2 git libglib2.0-0 \ - libgl1-mesa-glx libsm6 libxext6 libxrender-dev curl wget zip vim \ - build-essential ninja-build \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -COPY ./requirements.txt /workspace/ -# install ymir-exc sdk and requirements -RUN pip install "git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.0.0" \ - && pip install -r /workspace/requirements.txt - -# Copy file from host to docker and install requirements -COPY . /app -RUN mkdir /img-man && mv /app/*-template.yaml /img-man/ - -# Download pretrained weight and font file -RUN cd /app && bash data/scripts/download_weights.sh \ - && mkdir -p /root/.config/Ultralytics \ - && wget https://ultralytics.com/assets/Arial.ttf -O /root/.config/Ultralytics/Arial.ttf - -# Make PYTHONPATH find local package -ENV PYTHONPATH=. - -WORKDIR /app -RUN echo "python3 /app/start.py" > /usr/bin/start.sh -CMD bash /usr/bin/start.sh diff --git a/det-yolov5-tmi/infer-template.yaml b/det-yolov5-tmi/infer-template.yaml deleted file mode 100644 index 329887a..0000000 --- a/det-yolov5-tmi/infer-template.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# infer template for your executor app -# after build image, it should at /img-man/infer-template.yaml -# key: gpu_id, task_id, model_params_path, class_names should be preserved - -# gpu_id: '0' -# task_id: 'default-infer-task' -# model_params_path: [] -# class_names: [] - -img_size: 640 -conf_thres: 0.25 -iou_thres: 0.45 -batch_size_per_gpu: 16 -num_workers_per_gpu: 4 -pin_memory: False diff --git a/det-yolov5-tmi/mining-template.yaml b/det-yolov5-tmi/mining-template.yaml deleted file mode 100644 index 485c8bb..0000000 --- a/det-yolov5-tmi/mining-template.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# mining template for your executor app -# after build image, it should at /img-man/mining-template.yaml -# key: gpu_id, task_id, model_params_path, class_names should be preserved - -# gpu_id: '0' -# task_id: 'default-training-task' -# model_params_path: [] -# class_names: [] - -img_size: 640 -mining_algorithm: aldd -class_distribution_scores: '' # 1.0,1.0,0.1,0.2 -conf_thres: 0.25 -iou_thres: 0.45 -batch_size_per_gpu: 16 -num_workers_per_gpu: 4 -pin_memory: False -shm_size: 128G diff --git a/det-yolov5-tmi/mining/data_augment.py b/det-yolov5-tmi/mining/data_augment.py deleted file mode 100644 index cfafaa7..0000000 --- a/det-yolov5-tmi/mining/data_augment.py +++ /dev/null @@ -1,204 +0,0 @@ -""" -data augmentations for CALD method, including horizontal_flip, rotate(5'), cutout -official code: https://github.com/we1pingyu/CALD/blob/master/cald/cald_helper.py -""" -import random -from typing import Any, List, Tuple - -import cv2 -import numpy as np -from nptyping import NDArray - -from utils.ymir_yolov5 import BBOX, CV_IMAGE - - -def intersect(boxes1: BBOX, boxes2: BBOX) -> NDArray: - ''' - Find intersection of every box combination between two sets of box - boxes1: bounding boxes 1, a tensor of dimensions (n1, 4) - boxes2: bounding boxes 2, a tensor of dimensions (n2, 4) - - Out: Intersection each of boxes1 with respect to each of boxes2, - a tensor of dimensions (n1, n2) - ''' - n1 = boxes1.shape[0] - n2 = boxes2.shape[0] - max_xy = np.minimum( - np.expand_dims(boxes1[:, 2:], axis=1).repeat(n2, axis=1), - np.expand_dims(boxes2[:, 2:], axis=0).repeat(n1, axis=0)) - - min_xy = np.maximum( - np.expand_dims(boxes1[:, :2], axis=1).repeat(n2, axis=1), - np.expand_dims(boxes2[:, :2], axis=0).repeat(n1, axis=0)) - inter = np.clip(max_xy - min_xy, a_min=0, a_max=None) # (n1, n2, 2) - return inter[:, :, 0] * inter[:, :, 1] # (n1, n2) - - -def horizontal_flip(image: CV_IMAGE, bbox: BBOX) \ - -> Tuple[CV_IMAGE, BBOX]: - """ - image: opencv image, [height,width,channels] - bbox: numpy.ndarray, [N,4] --> [x1,y1,x2,y2] - """ - image = image.copy() - - width = image.shape[1] - # Flip image horizontally - image = image[:, ::-1, :] - if len(bbox) > 0: - bbox = bbox.copy() - # Flip bbox horizontally - bbox[:, [0, 2]] = width - bbox[:, [2, 0]] - return image, bbox - - -def cutout(image: CV_IMAGE, - bbox: BBOX, - cut_num: int = 2, - fill_val: int = 0, - bbox_remove_thres: float = 0.4, - bbox_min_thres: float = 0.1) -> Tuple[CV_IMAGE, BBOX]: - ''' - Cutout augmentation - image: A PIL image - boxes: bounding boxes, a tensor of dimensions (#objects, 4) - labels: labels of object, a tensor of dimensions (#objects) - fill_val: Value filled in cut out - bbox_remove_thres: Theshold to remove bbox cut by cutout - - Out: new image, new_boxes, new_labels - ''' - image = image.copy() - bbox = bbox.copy() - - if len(bbox) == 0: - return image, bbox - - original_h, original_w, original_channel = image.shape - count = 0 - for _ in range(50): - # Random cutout size: [0.15, 0.5] of original dimension - cutout_size_h = random.uniform(0.05 * original_h, 0.2 * original_h) - cutout_size_w = random.uniform(0.05 * original_w, 0.2 * original_w) - - # Random position for cutout - left = random.uniform(0, original_w - cutout_size_w) - right = left + cutout_size_w - top = random.uniform(0, original_h - cutout_size_h) - bottom = top + cutout_size_h - cutout = np.array([[float(left), float(top), float(right), float(bottom)]]) - - # Calculate intersect between cutout and bounding boxes - overlap_size = intersect(cutout, bbox) - area_boxes = (bbox[:, 2] - bbox[:, 0]) * (bbox[:, 3] - bbox[:, 1]) - ratio = overlap_size / (area_boxes + 1e-14) - # If all boxes have Iou greater than bbox_remove_thres, try again - if ratio.max() > bbox_remove_thres or ratio.max() < bbox_min_thres: - continue - - image[int(top):int(bottom), int(left):int(right), :] = fill_val - count += 1 - if count >= cut_num: - break - return image, bbox - - -def rotate(image: CV_IMAGE, bbox: BBOX, rot: float = 5) -> Tuple[CV_IMAGE, BBOX]: - image = image.copy() - bbox = bbox.copy() - h, w, c = image.shape - center = np.array([w / 2.0, h / 2.0]) - s = max(h, w) * 1.0 - trans = get_affine_transform(center, s, rot, [w, h]) - if len(bbox) > 0: - for i in range(bbox.shape[0]): - x1, y1 = affine_transform(bbox[i, :2], trans) - x2, y2 = affine_transform(bbox[i, 2:], trans) - x3, y3 = affine_transform(bbox[i, [2, 1]], trans) - x4, y4 = affine_transform(bbox[i, [0, 3]], trans) - bbox[i, :2] = [min(x1, x2, x3, x4), min(y1, y2, y3, y4)] - bbox[i, 2:] = [max(x1, x2, x3, x4), max(y1, y2, y3, y4)] - image = cv2.warpAffine(image, trans, (w, h), flags=cv2.INTER_LINEAR) - return image, bbox - - -def get_3rd_point(a: NDArray, b: NDArray) -> NDArray: - direct = a - b - return b + np.array([-direct[1], direct[0]], dtype=np.float32) - - -def get_dir(src_point: NDArray, rot_rad: float) -> List: - sn, cs = np.sin(rot_rad), np.cos(rot_rad) - - src_result = [0, 0] - src_result[0] = src_point[0] * cs - src_point[1] * sn - src_result[1] = src_point[0] * sn + src_point[1] * cs - - return src_result - - -def transform_preds(coords: NDArray, center: NDArray, scale: Any, rot: float, output_size: List) -> NDArray: - trans = get_affine_transform(center, scale, rot, output_size, inv=True) - target_coords = affine_transform(coords, trans) - return target_coords - - -def get_affine_transform(center: NDArray, - scale: Any, - rot: float, - output_size: List, - shift: NDArray = np.array([0, 0], dtype=np.float32), - inv: bool = False) -> NDArray: - if not isinstance(scale, np.ndarray) and not isinstance(scale, list): - scale = np.array([scale, scale], dtype=np.float32) - - scale_tmp = scale - src_w = scale_tmp[0] - dst_w = output_size[0] - dst_h = output_size[1] - - rot_rad = np.pi * rot / 180 - src_dir = get_dir([0, src_w * -0.5], rot_rad) - dst_dir = np.array([0, dst_w * -0.5], np.float32) - - src = np.zeros((3, 2), dtype=np.float32) - dst = np.zeros((3, 2), dtype=np.float32) - src[0, :] = center + scale_tmp * shift - src[1, :] = center + src_dir + scale_tmp * shift - dst[0, :] = [dst_w * 0.5, dst_h * 0.5] - dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir - - src[2:, :] = get_3rd_point(src[0, :], src[1, :]) - dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :]) - - if inv: - trans = cv2.getAffineTransform(np.float32(dst), np.float32(src)) - else: - trans = cv2.getAffineTransform(np.float32(src), np.float32(dst)) - - return trans - - -def affine_transform(pt: NDArray, t: NDArray) -> NDArray: - new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T - new_pt = np.dot(t, new_pt) - return new_pt[:2] - - -def resize(img: CV_IMAGE, boxes: BBOX, ratio: float = 0.8) -> Tuple[CV_IMAGE, BBOX]: - """ - ratio: <= 1.0 - """ - assert ratio <= 1.0, f'resize ratio {ratio} must <= 1.0' - - h, w, _ = img.shape - ow = int(w * ratio) - oh = int(h * ratio) - resize_img = cv2.resize(img, (ow, oh)) - new_img = np.zeros_like(img) - new_img[:oh, :ow] = resize_img - - if len(boxes) == 0: - return new_img, boxes - else: - return new_img, boxes * ratio diff --git a/det-yolov5-tmi/mining/util.py b/det-yolov5-tmi/mining/util.py deleted file mode 100644 index c69343c..0000000 --- a/det-yolov5-tmi/mining/util.py +++ /dev/null @@ -1,149 +0,0 @@ -"""run.py: -img --(model)--> pred --(augmentation)--> (aug1_pred, aug2_pred, ..., augN_pred) -img --(augmentation)--> aug1_img --(model)--> pred1 -img --(augmentation)--> aug2_img --(model)--> pred2 -... -img --(augmentation)--> augN_img --(model)--> predN - -dataload(img) --(model)--> pred -dataload(img, pred) --(augmentation1)--> (aug1_img, aug1_pred) --(model)--> pred1 - -1. split dataset with DDP sampler -2. use DDP model to infer sampled dataloader -3. gather infer result - -""" -import os -from typing import Any, List - -import cv2 -import numpy as np -import torch.utils.data as td -from mining.data_augment import cutout, horizontal_flip, intersect, resize, rotate -from nptyping import NDArray -from scipy.stats import entropy -from torch.utils.data._utils.collate import default_collate -from utils.augmentations import letterbox -from utils.ymir_yolov5 import BBOX - -LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html -RANK = int(os.getenv('RANK', -1)) -WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) - - -def get_ious(boxes1: BBOX, boxes2: BBOX) -> NDArray: - """ - args: - boxes1: np.array, (N, 4), xyxy - boxes2: np.array, (M, 4), xyxy - return: - iou: np.array, (N, M) - """ - area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1]) - area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1]) - iner_area = intersect(boxes1, boxes2) - area1 = area1.reshape(-1, 1).repeat(area2.shape[0], axis=1) - area2 = area2.reshape(1, -1).repeat(area1.shape[0], axis=0) - iou = iner_area / (area1 + area2 - iner_area + 1e-14) - return iou - - -def preprocess(img, img_size, stride): - img1 = letterbox(img, img_size, stride=stride, auto=False)[0] - - # preprocess: convert data format - img1 = img1.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB - img1 = np.ascontiguousarray(img1) - # img1 = torch.from_numpy(img1).to(self.device) - - img1 = img1 / 255 # 0 - 255 to 0.0 - 1.0 - return img1 - - -def load_image_file(img_file: str, img_size, stride): - img = cv2.imread(img_file) - img1 = letterbox(img, img_size, stride=stride, auto=False)[0] - - # preprocess: convert data format - img1 = img1.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB - img1 = np.ascontiguousarray(img1) - # img1 = torch.from_numpy(img1).to(self.device) - - img1 = img1 / 255 # 0 - 255 to 0.0 - 1.0 - # img1.unsqueeze_(dim=0) # expand for batch dim - return dict(image=img1, origin_shape=img.shape[0:2], image_file=img_file) - # return img1 - - -def load_image_file_with_ann(image_info: dict, img_size, stride): - img_file = image_info['image_file'] - # xyxy(int) conf(float) class_index(int) - bboxes = image_info['results'][:, :4].astype(np.int32) - img = cv2.imread(img_file) - aug_dict = dict(flip=horizontal_flip, cutout=cutout, rotate=rotate, resize=resize) - - data = dict(image_file=img_file, origin_shape=img.shape[0:2]) - for key in aug_dict: - aug_img, aug_bbox = aug_dict[key](img, bboxes) - preprocess_aug_img = preprocess(aug_img, img_size, stride) - data[f'image_{key}'] = preprocess_aug_img - data[f'bboxes_{key}'] = aug_bbox - data[f'origin_shape_{key}'] = aug_img.shape[0:2] - - data.update(image_info) - return data - - -def collate_fn_with_fake_ann(batch): - new_batch = dict() - for key in ['flip', 'cutout', 'rotate', 'resize']: - new_batch[f'bboxes_{key}_list'] = [data[f'bboxes_{key}'] for data in batch] - - new_batch[f'image_{key}'] = default_collate([data[f'image_{key}'] for data in batch]) - - new_batch[f'origin_shape_{key}'] = default_collate([data[f'origin_shape_{key}'] for data in batch]) - - new_batch['results_list'] = [data['results'] for data in batch] - new_batch['image_file'] = [data['image_file'] for data in batch] - - return new_batch - - -def update_consistency(consistency, consistency_per_aug, beta, pred_bboxes_key, pred_conf_key, aug_bboxes_key, - aug_conf): - cls_scores_aug = 1 - pred_conf_key - cls_scores = 1 - aug_conf - - consistency_per_aug = 2.0 - ious = get_ious(pred_bboxes_key, aug_bboxes_key) - aug_idxs = np.argmax(ious, axis=0) - for origin_idx, aug_idx in enumerate(aug_idxs): - max_iou = ious[aug_idx, origin_idx] - if max_iou == 0: - consistency_per_aug = min(consistency_per_aug, beta) - p = cls_scores_aug[aug_idx] - q = cls_scores[origin_idx] - m = (p + q) / 2. - js = 0.5 * entropy([p, 1 - p], [m, 1 - m]) + 0.5 * entropy([q, 1 - q], [m, 1 - m]) - if js < 0: - js = 0 - consistency_box = max_iou - consistency_cls = 0.5 * (aug_conf[origin_idx] + pred_conf_key[aug_idx]) * (1 - js) - consistency_per_inst = abs(consistency_box + consistency_cls - beta) - consistency_per_aug = min(consistency_per_aug, consistency_per_inst.item()) - - consistency += consistency_per_aug - return consistency - - -class YmirDataset(td.Dataset): - def __init__(self, images: List[Any], load_fn=None): - super().__init__() - self.images = images - self.load_fn = load_fn - - def __getitem__(self, index): - return self.load_fn(self.images[index]) - - def __len__(self): - return len(self.images) diff --git a/det-yolov5-tmi/mining/ymir_infer.py b/det-yolov5-tmi/mining/ymir_infer.py deleted file mode 100644 index ad1e0d2..0000000 --- a/det-yolov5-tmi/mining/ymir_infer.py +++ /dev/null @@ -1,130 +0,0 @@ -"""use fake DDP to infer -1. split data with `images_rank = images[RANK::WORLD_SIZE]` -2. save splited result with `torch.save(results, f'results_{RANK}.pt')` -3. merge result -""" -import os -import sys -import warnings -from functools import partial - -import torch -import torch.distributed as dist -import torch.utils.data as td -from easydict import EasyDict as edict -from mining.util import YmirDataset, load_image_file -from tqdm import tqdm -from utils.general import scale_coords -from utils.ymir_yolov5 import YmirYolov5 -from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_merged_config - -LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html -RANK = int(os.getenv('RANK', -1)) -WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) - - -def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): - # eg: gpu_id = 1,3,5,7 for LOCAL_RANK = 2, will use gpu 5. - gpu = max(0, LOCAL_RANK) - device = torch.device('cuda', gpu) - ymir_yolov5.to(device) - - load_fn = partial(load_image_file, img_size=ymir_yolov5.img_size, stride=ymir_yolov5.stride) - batch_size_per_gpu = ymir_yolov5.batch_size_per_gpu - gpu_count = ymir_yolov5.gpu_count - cpu_count: int = os.cpu_count() or 1 - num_workers_per_gpu = min([ - cpu_count // max(gpu_count, 1), batch_size_per_gpu if batch_size_per_gpu > 1 else 0, - ymir_yolov5.num_workers_per_gpu - ]) - - with open(ymir_cfg.ymir.input.candidate_index_file, 'r') as f: - images = [line.strip() for line in f.readlines()] - - max_barrier_times = len(images) // max(1, WORLD_SIZE) // batch_size_per_gpu - # origin dataset - images_rank = images[RANK::WORLD_SIZE] - origin_dataset = YmirDataset(images_rank, load_fn=load_fn) - origin_dataset_loader = td.DataLoader(origin_dataset, - batch_size=batch_size_per_gpu, - shuffle=False, - sampler=None, - num_workers=num_workers_per_gpu, - pin_memory=ymir_yolov5.pin_memory, - drop_last=False) - - results = [] - dataset_size = len(images_rank) - monitor_gap = max(1, dataset_size // 1000 // batch_size_per_gpu) - pbar = tqdm(origin_dataset_loader) if RANK == 0 else origin_dataset_loader - for idx, batch in enumerate(pbar): - # batch-level sync, avoid 30min time-out error - if LOCAL_RANK != -1 and idx < max_barrier_times: - dist.barrier() - - with torch.no_grad(): - pred = ymir_yolov5.forward(batch['image'].float().to(device), nms=True) - - if idx % monitor_gap == 0: - ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) - - preprocess_image_shape = batch['image'].shape[2:] - for idx, det in enumerate(pred): # per image - result_per_image = [] - image_file = batch['image_file'][idx] - if len(det): - origin_image_shape = (batch['origin_shape'][0][idx], batch['origin_shape'][1][idx]) - # Rescale boxes from img_size to img size - det[:, :4] = scale_coords(preprocess_image_shape, det[:, :4], origin_image_shape).round() - result_per_image.append(det) - results.append(dict(image_file=image_file, result=result_per_image)) - - torch.save(results, f'/out/infer_results_{RANK}.pt') - - -def main() -> int: - ymir_cfg = get_merged_config() - ymir_yolov5 = YmirYolov5(ymir_cfg, task='infer') - - if LOCAL_RANK != -1: - assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' - torch.cuda.set_device(LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") - - run(ymir_cfg, ymir_yolov5) - - # wait all process to save the infer result - dist.barrier() - - if RANK in [0, -1]: - results = [] - for rank in range(WORLD_SIZE): - results.append(torch.load(f'/out/infer_results_{rank}.pt')) - - ymir_infer_result = dict() - for result in results: - for img_data in result: - img_file = img_data['image_file'] - anns = [] - for each_det in img_data['result']: - each_det_np = each_det.data.cpu().numpy() - for i in range(each_det_np.shape[0]): - xmin, ymin, xmax, ymax, conf, cls = each_det_np[i, :6].tolist() - if conf < ymir_yolov5.conf_thres: - continue - if int(cls) >= len(ymir_yolov5.class_names): - warnings.warn(f'class index {int(cls)} out of range for {ymir_yolov5.class_names}') - continue - ann = rw.Annotation(class_name=ymir_yolov5.class_names[int(cls)], - score=conf, - box=rw.Box(x=int(xmin), y=int(ymin), w=int(xmax - xmin), - h=int(ymax - ymin))) - anns.append(ann) - ymir_infer_result[img_file] = anns - rw.write_infer_result(infer_result=ymir_infer_result) - return 0 - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/det-yolov5-tmi/mining/ymir_mining_aldd.py b/det-yolov5-tmi/mining/ymir_mining_aldd.py deleted file mode 100644 index 8d6a27c..0000000 --- a/det-yolov5-tmi/mining/ymir_mining_aldd.py +++ /dev/null @@ -1,210 +0,0 @@ -"""use fake DDP to infer -1. split data with `images_rank = images[RANK::WORLD_SIZE]` -2. infer on the origin dataset -3. infer on the augmentation dataset -4. save splited mining result with `torch.save(results, f'/out/mining_results_{RANK}.pt')` -5. merge mining result -""" -import os -import sys -import warnings -from functools import partial -from typing import Any, List - -import numpy as np -import torch -import torch.distributed as dist -import torch.nn.functional as F -import torch.utils.data as td -from easydict import EasyDict as edict -from mining.util import YmirDataset, load_image_file -from tqdm import tqdm -from utils.ymir_yolov5 import YmirYolov5 -from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_merged_config - -LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html -RANK = int(os.getenv('RANK', -1)) -WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) - - -class ALDD(object): - - def __init__(self, ymir_cfg: edict): - self.avg_pool_size = 9 - self.max_pool_size = 32 - self.avg_pool_pad = (self.avg_pool_size - 1) // 2 - - self.num_classes = len(ymir_cfg.param.class_names) - if ymir_cfg.param.get('class_distribution_scores', ''): - scores = [float(x.strip()) for x in ymir_cfg.param.class_distribution_scores.split(',')] - if len(scores) < self.num_classes: - warnings.warn('extend 1.0 to class_distribution_scores') - scores.extend([1.0] * (self.num_classes - len(scores))) - self.class_distribution_scores = np.array(scores[0:self.num_classes], dtype=np.float32) - else: - self.class_distribution_scores = np.array([1.0] * self.num_classes, dtype=np.float32) - - def calc_unc_val(self, heatmap: torch.Tensor) -> torch.Tensor: - # mean of entropy - ent = F.binary_cross_entropy(heatmap, heatmap, reduction='none') - avg_ent = F.avg_pool2d(ent, - kernel_size=self.avg_pool_size, - stride=1, - padding=self.avg_pool_pad, - count_include_pad=False) # N, 1, H, W - mean_of_entropy = torch.sum(avg_ent, dim=1, keepdim=True) # N, 1, H, W - - # entropy of mean - avg_heatmap = F.avg_pool2d(heatmap, - kernel_size=self.avg_pool_size, - stride=1, - padding=self.avg_pool_pad, - count_include_pad=False) # N, C, H, W - ent_avg = F.binary_cross_entropy(avg_heatmap, avg_heatmap, reduction='none') - entropy_of_mean = torch.sum(ent_avg, dim=1, keepdim=True) # N, 1, H, W - - uncertainty = entropy_of_mean - mean_of_entropy - unc = F.max_pool2d(uncertainty, - kernel_size=self.max_pool_size, - stride=self.max_pool_size, - padding=0, - ceil_mode=False) - - # aggregating - scores = torch.mean(unc, dim=(1, 2, 3)) # (N,) - return scores - - def compute_aldd_score(self, net_output: List[torch.Tensor], net_input_shape: Any): - """ - args: - imgs: list[np.array(H, W, C)] - returns: - scores: list of float - """ - if not isinstance(net_input_shape, (list, tuple)): - net_input_shape = (net_input_shape, net_input_shape) - - # CLASS_DISTRIBUTION_SCORE = np.array([1.0] * num_of_class) - scores_list = [] - - for feature_map in net_output: - feature_map.sigmoid_() - - for each_class_index in range(self.num_classes): - feature_map_list: List[torch.Tensor] = [] - - # each_output_feature_map: [bs, 3, h, w, 5 + num_classes] - for each_output_feature_map in net_output: - net_output_conf = each_output_feature_map[:, :, :, :, 4] - net_output_cls_mult_conf = net_output_conf * each_output_feature_map[:, :, :, :, 5 + each_class_index] - # feature_map_reshape: [bs, 3, h, w] - feature_map_reshape = F.interpolate(net_output_cls_mult_conf, - net_input_shape, - mode='bilinear', - align_corners=False) - feature_map_list.append(feature_map_reshape) - - # len(net_output) = 3 - # feature_map_concate: [bs, 9, h, w] - feature_map_concate = torch.cat(feature_map_list, 1) - # scores: [bs, 1] for each class - scores = self.calc_unc_val(feature_map_concate) - scores = scores.cpu().detach().numpy() - scores_list.append(scores) - - # total_scores: [bs, num_classes] - total_scores = np.stack(scores_list, axis=1) - total_scores = total_scores * self.class_distribution_scores - total_scores = np.sum(total_scores, axis=1) - - return total_scores - - -def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): - # eg: gpu_id = 1,3,5,7 for LOCAL_RANK = 2, will use gpu 5. - gpu = LOCAL_RANK if LOCAL_RANK >= 0 else 0 - device = torch.device('cuda', gpu) - ymir_yolov5.to(device) - - load_fn = partial(load_image_file, img_size=ymir_yolov5.img_size, stride=ymir_yolov5.stride) - batch_size_per_gpu: int = ymir_yolov5.batch_size_per_gpu - gpu_count: int = ymir_yolov5.gpu_count - cpu_count: int = os.cpu_count() or 1 - num_workers_per_gpu = min([ - cpu_count // max(gpu_count, 1), batch_size_per_gpu if batch_size_per_gpu > 1 else 0, - ymir_yolov5.num_workers_per_gpu - ]) - - with open(ymir_cfg.ymir.input.candidate_index_file, 'r') as f: - images = [line.strip() for line in f.readlines()] - - max_barrier_times = (len(images) // max(1, WORLD_SIZE)) // batch_size_per_gpu - - # origin dataset - if RANK != -1: - images_rank = images[RANK::WORLD_SIZE] - else: - images_rank = images - origin_dataset = YmirDataset(images_rank, load_fn=load_fn) - origin_dataset_loader = td.DataLoader(origin_dataset, - batch_size=batch_size_per_gpu, - shuffle=False, - sampler=None, - num_workers=num_workers_per_gpu, - pin_memory=ymir_yolov5.pin_memory, - drop_last=False) - - mining_results = dict() - dataset_size = len(images_rank) - pbar = tqdm(origin_dataset_loader) if RANK == 0 else origin_dataset_loader - miner = ALDD(ymir_cfg) - for idx, batch in enumerate(pbar): - # batch-level sync, avoid 30min time-out error - if LOCAL_RANK != -1 and idx < max_barrier_times: - dist.barrier() - - with torch.no_grad(): - featuremap_output = ymir_yolov5.model.model(batch['image'].float().to(device))[1] - unc_scores = miner.compute_aldd_score(featuremap_output, ymir_yolov5.img_size) - - for each_imgname, each_score in zip(batch["image_file"], unc_scores): - mining_results[each_imgname] = each_score - - if RANK in [-1, 0]: - ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) - - torch.save(mining_results, f'/out/mining_results_{max(0,RANK)}.pt') - - -def main() -> int: - ymir_cfg = get_merged_config() - # note select_device(gpu_id) will set os.environ['CUDA_VISIBLE_DEVICES'] to gpu_id - ymir_yolov5 = YmirYolov5(ymir_cfg, task='mining') - - if LOCAL_RANK != -1: - assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' - torch.cuda.set_device(LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") - - run(ymir_cfg, ymir_yolov5) - - # wait all process to save the mining result - if LOCAL_RANK != -1: - dist.barrier() - - if RANK in [0, -1]: - results = [] - for rank in range(WORLD_SIZE): - results.append(torch.load(f'/out/mining_results_{rank}.pt')) - - ymir_mining_result = [] - for result in results: - for img_file, score in result.items(): - ymir_mining_result.append((img_file, score)) - rw.write_mining_result(mining_result=ymir_mining_result) - return 0 - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/det-yolov5-tmi/mining/ymir_mining_cald.py b/det-yolov5-tmi/mining/ymir_mining_cald.py deleted file mode 100644 index b4c6147..0000000 --- a/det-yolov5-tmi/mining/ymir_mining_cald.py +++ /dev/null @@ -1,190 +0,0 @@ -"""use fake DDP to infer -1. split data with `images_rank = images[RANK::WORLD_SIZE]` -2. infer on the origin dataset -3. infer on the augmentation dataset -4. save splited mining result with `torch.save(results, f'/out/mining_results_{RANK}.pt')` -5. merge mining result -""" -import os -import sys -from functools import partial - -import numpy as np -import torch -import torch.distributed as dist -import torch.utils.data as td -from easydict import EasyDict as edict -from mining.util import (YmirDataset, collate_fn_with_fake_ann, load_image_file, load_image_file_with_ann, - update_consistency) -from tqdm import tqdm -from utils.general import scale_coords -from utils.ymir_yolov5 import YmirYolov5 -from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_merged_config - -LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html -RANK = int(os.getenv('RANK', -1)) -WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) - - -def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): - # eg: gpu_id = 1,3,5,7 for LOCAL_RANK = 2, will use gpu 5. - gpu = LOCAL_RANK if LOCAL_RANK >= 0 else 0 - device = torch.device('cuda', gpu) - ymir_yolov5.to(device) - - load_fn = partial(load_image_file, img_size=ymir_yolov5.img_size, stride=ymir_yolov5.stride) - batch_size_per_gpu: int = ymir_yolov5.batch_size_per_gpu - gpu_count: int = ymir_yolov5.gpu_count - cpu_count: int = os.cpu_count() or 1 - num_workers_per_gpu = min([ - cpu_count // max(gpu_count, 1), batch_size_per_gpu if batch_size_per_gpu > 1 else 0, - ymir_yolov5.num_workers_per_gpu - ]) - - with open(ymir_cfg.ymir.input.candidate_index_file, 'r') as f: - images = [line.strip() for line in f.readlines()] - - max_barrier_times = (len(images) // max(1, WORLD_SIZE)) // batch_size_per_gpu - # origin dataset - images_rank = images[RANK::WORLD_SIZE] - origin_dataset = YmirDataset(images_rank, load_fn=load_fn) - origin_dataset_loader = td.DataLoader(origin_dataset, - batch_size=batch_size_per_gpu, - shuffle=False, - sampler=None, - num_workers=num_workers_per_gpu, - pin_memory=ymir_yolov5.pin_memory, - drop_last=False) - - results = [] - mining_results = dict() - beta = 1.3 - dataset_size = len(images_rank) - pbar = tqdm(origin_dataset_loader) if RANK == 0 else origin_dataset_loader - for idx, batch in enumerate(pbar): - # batch-level sync, avoid 30min time-out error - if LOCAL_RANK != -1 and idx < max_barrier_times: - dist.barrier() - - with torch.no_grad(): - pred = ymir_yolov5.forward(batch['image'].float().to(device), nms=True) - - if RANK in [-1, 0]: - ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) - preprocess_image_shape = batch['image'].shape[2:] - for inner_idx, det in enumerate(pred): # per image - result_per_image = [] - image_file = batch['image_file'][inner_idx] - if len(det): - origin_image_shape = (batch['origin_shape'][0][inner_idx], batch['origin_shape'][1][inner_idx]) - # Rescale boxes from img_size to img size - det[:, :4] = scale_coords(preprocess_image_shape, det[:, :4], origin_image_shape).round() - result_per_image.append(det) - else: - mining_results[image_file] = -beta - continue - - results_per_image = torch.cat(result_per_image, dim=0).data.cpu().numpy() - results.append(dict(image_file=image_file, origin_shape=origin_image_shape, results=results_per_image)) - - aug_load_fn = partial(load_image_file_with_ann, img_size=ymir_yolov5.img_size, stride=ymir_yolov5.stride) - aug_dataset = YmirDataset(results, load_fn=aug_load_fn) - aug_dataset_loader = td.DataLoader(aug_dataset, - batch_size=batch_size_per_gpu, - shuffle=False, - sampler=None, - collate_fn=collate_fn_with_fake_ann, - num_workers=num_workers_per_gpu, - pin_memory=ymir_yolov5.pin_memory, - drop_last=False) - - # cannot sync here!!! - dataset_size = len(results) - monitor_gap = max(1, dataset_size // 1000 // batch_size_per_gpu) - pbar = tqdm(aug_dataset_loader) if RANK == 0 else aug_dataset_loader - for idx, batch in enumerate(pbar): - if idx % monitor_gap == 0 and RANK in [-1, 0]: - ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) - - batch_consistency = [0.0 for _ in range(len(batch['image_file']))] - aug_keys = ['flip', 'cutout', 'rotate', 'resize'] - - pred_result = dict() - for key in aug_keys: - with torch.no_grad(): - pred_result[key] = ymir_yolov5.forward(batch[f'image_{key}'].float().to(device), nms=True) - - for inner_idx in range(len(batch['image_file'])): - for key in aug_keys: - preprocess_image_shape = batch[f'image_{key}'].shape[2:] - result_per_image = [] - det = pred_result[key][inner_idx] - if len(det) == 0: - # no result for the image with augmentation f'{key}' - batch_consistency[inner_idx] += beta - continue - - # prediction result from origin image - fake_ann = batch['results_list'][inner_idx] - # bboxes = fake_ann[:, :4].data.cpu().numpy().astype(np.int32) - conf = fake_ann[:, 4] - - # augmentated bbox from bboxes, aug_conf = conf - aug_bboxes_key = batch[f'bboxes_{key}_list'][inner_idx].astype(np.int32) - - origin_image_shape = (batch[f'origin_shape_{key}'][0][inner_idx], - batch[f'origin_shape_{key}'][1][inner_idx]) - - # Rescale boxes from img_size to img size - det[:, :4] = scale_coords(preprocess_image_shape, det[:, :4], origin_image_shape).round() - result_per_image.append(det) - - pred_bboxes_key = det[:, :4].data.cpu().numpy().astype(np.int32) - pred_conf_key = det[:, 4].data.cpu().numpy() - batch_consistency[inner_idx] = update_consistency(consistency=batch_consistency[inner_idx], - consistency_per_aug=2.0, - beta=beta, - pred_bboxes_key=pred_bboxes_key, - pred_conf_key=pred_conf_key, - aug_bboxes_key=aug_bboxes_key, - aug_conf=conf) - - for inner_idx in range(len(batch['image_file'])): - batch_consistency[inner_idx] /= len(aug_keys) - image_file = batch['image_file'][inner_idx] - mining_results[image_file] = batch_consistency[inner_idx] - - torch.save(mining_results, f'/out/mining_results_{max(0,RANK)}.pt') - - -def main() -> int: - ymir_cfg = get_merged_config() - ymir_yolov5 = YmirYolov5(ymir_cfg, task='mining') - - if LOCAL_RANK != -1: - assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' - torch.cuda.set_device(LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") - - run(ymir_cfg, ymir_yolov5) - - # wait all process to save the mining result - if LOCAL_RANK != -1: - dist.barrier() - - if RANK in [0, -1]: - results = [] - for rank in range(WORLD_SIZE): - results.append(torch.load(f'/out/mining_results_{rank}.pt')) - - ymir_mining_result = [] - for result in results: - for img_file, score in result.items(): - ymir_mining_result.append((img_file, score)) - rw.write_mining_result(mining_result=ymir_mining_result) - return 0 - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/det-yolov5-tmi/mining/ymir_mining_entropy.py b/det-yolov5-tmi/mining/ymir_mining_entropy.py deleted file mode 100644 index df5a1ff..0000000 --- a/det-yolov5-tmi/mining/ymir_mining_entropy.py +++ /dev/null @@ -1,112 +0,0 @@ -"""use fake DDP to infer -1. split data with `images_rank = images[RANK::WORLD_SIZE]` -2. infer on the origin dataset -3. infer on the augmentation dataset -4. save splited mining result with `torch.save(results, f'/out/mining_results_{RANK}.pt')` -5. merge mining result -""" -import os -import sys -from functools import partial - -import numpy as np -import torch -import torch.distributed as dist -import torch.utils.data as td -from easydict import EasyDict as edict -from mining.util import YmirDataset, load_image_file -from tqdm import tqdm -from utils.ymir_yolov5 import YmirYolov5 -from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_merged_config - -LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html -RANK = int(os.getenv('RANK', -1)) -WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) - - -def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): - # eg: gpu_id = 1,3,5,7 for LOCAL_RANK = 2, will use gpu 5. - gpu = LOCAL_RANK if LOCAL_RANK >= 0 else 0 - device = torch.device('cuda', gpu) - ymir_yolov5.to(device) - - load_fn = partial(load_image_file, img_size=ymir_yolov5.img_size, stride=ymir_yolov5.stride) - batch_size_per_gpu: int = ymir_yolov5.batch_size_per_gpu - gpu_count: int = ymir_yolov5.gpu_count - cpu_count: int = os.cpu_count() or 1 - num_workers_per_gpu = min([ - cpu_count // max(gpu_count, 1), batch_size_per_gpu if batch_size_per_gpu > 1 else 0, - ymir_yolov5.num_workers_per_gpu - ]) - - with open(ymir_cfg.ymir.input.candidate_index_file, 'r') as f: - images = [line.strip() for line in f.readlines()] - - max_barrier_times = (len(images) // max(1, WORLD_SIZE)) // batch_size_per_gpu - # origin dataset - images_rank = images[RANK::WORLD_SIZE] - origin_dataset = YmirDataset(images_rank, load_fn=load_fn) - origin_dataset_loader = td.DataLoader(origin_dataset, - batch_size=batch_size_per_gpu, - shuffle=False, - sampler=None, - num_workers=num_workers_per_gpu, - pin_memory=ymir_yolov5.pin_memory, - drop_last=False) - - mining_results = dict() - dataset_size = len(images_rank) - pbar = tqdm(origin_dataset_loader) if RANK == 0 else origin_dataset_loader - for idx, batch in enumerate(pbar): - # batch-level sync, avoid 30min time-out error - if LOCAL_RANK != -1 and idx < max_barrier_times: - dist.barrier() - - with torch.no_grad(): - pred = ymir_yolov5.forward(batch['image'].float().to(device), nms=False) - - if RANK in [-1, 0]: - ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) - for inner_idx, det in enumerate(pred): # per image - image_file = batch['image_file'][inner_idx] - if len(det): - conf = det[:, 4].data.cpu().numpy() - mining_results[image_file] = -np.sum(conf * np.log2(conf)) - else: - mining_results[image_file] = -10 - continue - - torch.save(mining_results, f'/out/mining_results_{max(0,RANK)}.pt') - - -def main() -> int: - ymir_cfg = get_merged_config() - ymir_yolov5 = YmirYolov5(ymir_cfg, task='mining') - - if LOCAL_RANK != -1: - assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' - torch.cuda.set_device(LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") - - run(ymir_cfg, ymir_yolov5) - - # wait all process to save the mining result - if WORLD_SIZE > 1: - dist.barrier() - - if RANK in [0, -1]: - results = [] - for rank in range(WORLD_SIZE): - results.append(torch.load(f'/out/mining_results_{rank}.pt')) - - ymir_mining_result = [] - for result in results: - for img_file, score in result.items(): - ymir_mining_result.append((img_file, score)) - rw.write_mining_result(mining_result=ymir_mining_result) - return 0 - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/det-yolov5-tmi/mining/ymir_mining_random.py b/det-yolov5-tmi/mining/ymir_mining_random.py deleted file mode 100644 index 30fb099..0000000 --- a/det-yolov5-tmi/mining/ymir_mining_random.py +++ /dev/null @@ -1,75 +0,0 @@ -"""use fake DDP to infer -1. split data with `images_rank = images[RANK::WORLD_SIZE]` -2. infer on the origin dataset -3. infer on the augmentation dataset -4. save splited mining result with `torch.save(results, f'/out/mining_results_{RANK}.pt')` -5. merge mining result -""" -import os -import random -import sys - -import torch -import torch.distributed as dist -from easydict import EasyDict as edict -from tqdm import tqdm -from utils.ymir_yolov5 import YmirYolov5 -from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_merged_config - -LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html -RANK = int(os.getenv('RANK', -1)) -WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) - - -def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): - # eg: gpu_id = 1,3,5,7 for LOCAL_RANK = 2, will use gpu 5. - gpu = LOCAL_RANK if LOCAL_RANK >= 0 else 0 - device = torch.device('cuda', gpu) - ymir_yolov5.to(device) - - with open(ymir_cfg.ymir.input.candidate_index_file, 'r') as f: - images = [line.strip() for line in f.readlines()] - - images_rank = images[RANK::WORLD_SIZE] - mining_results = dict() - dataset_size = len(images_rank) - pbar = tqdm(images_rank) if RANK == 0 else images_rank - for idx, image in enumerate(pbar): - if RANK in [-1, 0]: - ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx / dataset_size) - mining_results[image] = random.random() - - torch.save(mining_results, f'/out/mining_results_{max(0,RANK)}.pt') - - -def main() -> int: - ymir_cfg = get_merged_config() - ymir_yolov5 = YmirYolov5(ymir_cfg, task='mining') - - if LOCAL_RANK != -1: - assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' - torch.cuda.set_device(LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") - - run(ymir_cfg, ymir_yolov5) - - # wait all process to save the mining result - if WORLD_SIZE > 1: - dist.barrier() - - if RANK in [0, -1]: - results = [] - for rank in range(WORLD_SIZE): - results.append(torch.load(f'/out/mining_results_{rank}.pt')) - - ymir_mining_result = [] - for result in results: - for img_file, score in result.items(): - ymir_mining_result.append((img_file, score)) - rw.write_mining_result(mining_result=ymir_mining_result) - return 0 - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/det-yolov5-tmi/start.py b/det-yolov5-tmi/start.py deleted file mode 100644 index 6c82844..0000000 --- a/det-yolov5-tmi/start.py +++ /dev/null @@ -1,191 +0,0 @@ -import logging -import os -import subprocess -import sys - -import cv2 -from easydict import EasyDict as edict -from models.experimental import attempt_download -from utils.ymir_yolov5 import YmirYolov5, convert_ymir_to_yolov5, get_weight_file -from ymir_exc import dataset_reader as dr -from ymir_exc import env, monitor -from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, find_free_port, get_bool, get_merged_config, get_ymir_process - - -def start(cfg: edict) -> int: - logging.info(f'merged config: {cfg}') - - if cfg.ymir.run_training: - _run_training(cfg) - else: - if cfg.ymir.run_mining and cfg.ymir.run_infer: - # multiple task, run mining first, infer later - mining_task_idx = 0 - infer_task_idx = 1 - task_num = 2 - else: - mining_task_idx = 0 - infer_task_idx = 0 - task_num = 1 - - if cfg.ymir.run_mining: - _run_mining(cfg, mining_task_idx, task_num) - if cfg.ymir.run_infer: - _run_infer(cfg, infer_task_idx, task_num) - - return 0 - - -def _run_training(cfg: edict) -> None: - """ - function for training task - 1. convert dataset - 2. training model - 3. save model weight/hyperparameter/... to design directory - """ - # 1. convert dataset - out_dir = cfg.ymir.output.root_dir - convert_ymir_to_yolov5(cfg) - logging.info(f'generate {out_dir}/data.yaml') - monitor.write_monitor_logger(percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0)) - - # 2. training model - epochs: int = int(cfg.param.epochs) - batch_size_per_gpu: int = int(cfg.param.batch_size_per_gpu) - num_workers_per_gpu: int = int(cfg.param.get('num_workers_per_gpu', 4)) - model: str = cfg.param.model - img_size: int = int(cfg.param.img_size) - save_period: int = int(cfg.param.save_period) - save_best_only: bool = get_bool(cfg, key='save_best_only', default_value=True) - args_options: str = cfg.param.args_options - gpu_id: str = str(cfg.param.get('gpu_id', '0')) - gpu_count: int = len(gpu_id.split(',')) if gpu_id else 0 - batch_size: int = batch_size_per_gpu * max(1, gpu_count) - port: int = find_free_port() - sync_bn: bool = get_bool(cfg, key='sync_bn', default_value=False) - - weights = get_weight_file(cfg) - if not weights: - # download pretrained weight - weights = attempt_download(f'{model}.pt') - - models_dir = cfg.ymir.output.models_dir - project = os.path.dirname(models_dir) - name = os.path.basename(models_dir) - assert os.path.join(project, name) == models_dir - - commands = ['python3'] - device = gpu_id or 'cpu' - if gpu_count > 1: - commands.extend(f'-m torch.distributed.launch --nproc_per_node {gpu_count} --master_port {port}'.split()) - - commands.extend([ - 'train.py', '--epochs', - str(epochs), '--batch-size', - str(batch_size), '--data', f'{out_dir}/data.yaml', '--project', project, '--cfg', f'models/{model}.yaml', - '--name', name, '--weights', weights, '--img-size', - str(img_size), '--save-period', - str(save_period), '--device', device, - '--workers', str(num_workers_per_gpu) - ]) - - if save_best_only: - commands.append("--nosave") - - if gpu_count > 1 and sync_bn: - commands.append("--sync-bn") - - if args_options: - commands.extend(args_options.split()) - - logging.info(f'start training: {commands}') - - subprocess.run(commands, check=True) - monitor.write_monitor_logger(percent=get_ymir_process(stage=YmirStage.TASK, p=1.0)) - - # if task done, write 100% percent log - monitor.write_monitor_logger(percent=1.0) - - -def _run_mining(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: - # generate data.yaml for mining - out_dir = cfg.ymir.output.root_dir - convert_ymir_to_yolov5(cfg) - logging.info(f'generate {out_dir}/data.yaml') - monitor.write_monitor_logger( - percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) - gpu_id: str = str(cfg.param.get('gpu_id', '0')) - gpu_count: int = len(gpu_id.split(',')) if gpu_id else 0 - - mining_algorithm = cfg.param.get('mining_algorithm', 'aldd') - support_mining_algorithms = ['aldd', 'cald', 'random', 'entropy'] - if mining_algorithm not in support_mining_algorithms: - raise Exception(f'unknown mining algorithm {mining_algorithm}, not in {support_mining_algorithms}') - - if gpu_count <= 1: - command = f'python3 mining/ymir_mining_{mining_algorithm}.py' - else: - port = find_free_port() - command = f'python3 -m torch.distributed.launch --nproc_per_node {gpu_count} --master_port {port} mining/ymir_mining_{mining_algorithm}.py' # noqa - - logging.info(f'mining: {command}') - subprocess.run(command.split(), check=True) - monitor.write_monitor_logger( - percent=get_ymir_process(stage=YmirStage.POSTPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) - - -def _run_infer(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: - # generate data.yaml for infer - out_dir = cfg.ymir.output.root_dir - convert_ymir_to_yolov5(cfg) - logging.info(f'generate {out_dir}/data.yaml') - monitor.write_monitor_logger( - percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) - - gpu_id: str = str(cfg.param.get('gpu_id', '0')) - gpu_count: int = len(gpu_id.split(',')) if gpu_id else 0 - - if gpu_count <= 1: - N = dr.items_count(env.DatasetType.CANDIDATE) - infer_result = dict() - model = YmirYolov5(cfg) - idx = -1 - - monitor_gap = max(1, N // 100) - for asset_path, _ in dr.item_paths(dataset_type=env.DatasetType.CANDIDATE): - img = cv2.imread(asset_path) - result = model.infer(img) - infer_result[asset_path] = result - idx += 1 - - if idx % monitor_gap == 0: - percent = get_ymir_process(stage=YmirStage.TASK, p=idx / N, task_idx=task_idx, task_num=task_num) - monitor.write_monitor_logger(percent=percent) - - rw.write_infer_result(infer_result=infer_result) - else: - port = find_free_port() - command = f'python3 -m torch.distributed.launch --nproc_per_node {gpu_count} --master_port {port} mining/ymir_infer.py' # noqa - - logging.info(f'infer: {command}') - subprocess.run(command.split(), check=True) - - monitor.write_monitor_logger( - percent=get_ymir_process(stage=YmirStage.POSTPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) - - -if __name__ == '__main__': - logging.basicConfig(stream=sys.stdout, - format='%(levelname)-8s: [%(asctime)s] %(message)s', - datefmt='%Y%m%d-%H:%M:%S', - level=logging.INFO) - - cfg = get_merged_config() - os.environ.setdefault('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', 'python') - - # activation: relu - activation: str = cfg.param.get('activation', '') - if activation: - os.environ.setdefault('ACTIVATION', activation) - sys.exit(start(cfg)) diff --git a/det-yolov5-tmi/training-template.yaml b/det-yolov5-tmi/training-template.yaml deleted file mode 100644 index 1cc4752..0000000 --- a/det-yolov5-tmi/training-template.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# training template for your executor app -# after build image, it should at /img-man/training-template.yaml -# key: gpu_id, task_id, pretrained_model_params, class_names should be preserved - -# gpu_id: '0' -# task_id: 'default-training-task' -# pretrained_model_params: [] -# class_names: [] - -shm_size: '128G' -export_format: 'ark:raw' -model: 'yolov5s' -batch_size_per_gpu: 16 -num_workers_per_gpu: 4 -epochs: 100 -img_size: 640 -opset: 11 -args_options: '--exist-ok' -save_best_only: True # save the best weight file only -save_period: 10 -sync_bn: False # work for multi-gpu only -ymir_saved_file_patterns: '' # custom saved files, support python regular expression, use , to split multiple pattern diff --git a/det-yolov5-tmi/utils/ymir_yolov5.py b/det-yolov5-tmi/utils/ymir_yolov5.py deleted file mode 100644 index c463ded..0000000 --- a/det-yolov5-tmi/utils/ymir_yolov5.py +++ /dev/null @@ -1,187 +0,0 @@ -""" -utils function for ymir and yolov5 -""" -import os.path as osp -import shutil -from typing import Any, List - -import numpy as np -import torch -import yaml -from easydict import EasyDict as edict -from models.common import DetectMultiBackend -from nptyping import NDArray, Shape, UInt8 -from utils.augmentations import letterbox -from utils.general import check_img_size, non_max_suppression, scale_coords -from utils.torch_utils import select_device -from ymir_exc import monitor -from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_bool, get_weight_files, get_ymir_process - -BBOX = NDArray[Shape['*,4'], Any] -CV_IMAGE = NDArray[Shape['*,*,3'], UInt8] - - -def get_weight_file(cfg: edict) -> str: - """ - return the weight file path by priority - find weight file in cfg.param.model_params_path or cfg.param.model_params_path - """ - weight_files = get_weight_files(cfg, suffix=('.pt')) - # choose weight file by priority, best.pt > xxx.pt - for p in weight_files: - if p.endswith('best.pt'): - return p - - if len(weight_files) > 0: - return max(weight_files, key=osp.getctime) - - return "" - - -class YmirYolov5(torch.nn.Module): - """ - used for mining and inference to init detector and predict. - """ - def __init__(self, cfg: edict, task='infer'): - super().__init__() - self.cfg = cfg - if cfg.ymir.run_mining and cfg.ymir.run_infer: - # multiple task, run mining first, infer later - if task == 'infer': - self.task_idx = 1 - elif task == 'mining': - self.task_idx = 0 - else: - raise Exception(f'unknown task {task}') - - self.task_num = 2 - else: - self.task_idx = 0 - self.task_num = 1 - - self.gpu_id: str = str(cfg.param.get('gpu_id', '0')) - device = select_device(self.gpu_id) # will set CUDA_VISIBLE_DEVICES=self.gpu_id - self.gpu_count: int = len(self.gpu_id.split(',')) if self.gpu_id else 0 - self.batch_size_per_gpu: int = int(cfg.param.get('batch_size_per_gpu', 4)) - self.num_workers_per_gpu: int = int(cfg.param.get('num_workers_per_gpu', 4)) - self.pin_memory: bool = get_bool(cfg, 'pin_memory', False) - self.batch_size: int = self.batch_size_per_gpu * self.gpu_count - self.model = self.init_detector(device) - self.model.eval() - self.device = device - self.class_names: List[str] = cfg.param.class_names - self.stride = self.model.stride - self.conf_thres: float = float(cfg.param.conf_thres) - self.iou_thres: float = float(cfg.param.iou_thres) - - img_size = int(cfg.param.img_size) - imgsz = [img_size, img_size] - imgsz = check_img_size(imgsz, s=self.stride) - - self.model.warmup(imgsz=(1, 3, *imgsz), half=False) # warmup - self.img_size: List[int] = imgsz - - def extract_feats(self, x): - """ - return the feature maps before sigmoid for mining - """ - return self.model.model(x)[1] - - def forward(self, x, nms=False): - pred = self.model(x) - if not nms: - return pred - - pred = non_max_suppression(pred, - conf_thres=self.conf_thres, - iou_thres=self.iou_thres, - classes=None, # not filter class_idx - agnostic=False, - max_det=100) - return pred - - def init_detector(self, device: torch.device) -> DetectMultiBackend: - weights = get_weight_file(self.cfg) - - if not weights: - raise Exception("no weights file specified!") - - data_yaml = osp.join(self.cfg.ymir.output.root_dir, 'data.yaml') - model = DetectMultiBackend( - weights=weights, - device=device, - dnn=False, # not use opencv dnn for onnx inference - data=data_yaml) # dataset.yaml path - - return model - - def predict(self, img: CV_IMAGE) -> NDArray: - """ - predict single image and return bbox information - img: opencv BGR, uint8 format - """ - # preprocess: padded resize - img1 = letterbox(img, self.img_size, stride=self.stride, auto=True)[0] - - # preprocess: convert data format - img1 = img1.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB - img1 = np.ascontiguousarray(img1) - img1 = torch.from_numpy(img1).to(self.device) - - img1 = img1 / 255 # 0 - 255 to 0.0 - 1.0 - img1.unsqueeze_(dim=0) # expand for batch dim - pred = self.forward(img1, nms=True) - - result = [] - for det in pred: - if len(det): - # Rescale boxes from img_size to img size - det[:, :4] = scale_coords(img1.shape[2:], det[:, :4], img.shape).round() - result.append(det) - - # xyxy, conf, cls - if len(result) > 0: - tensor_result = torch.cat(result, dim=0) - numpy_result = tensor_result.data.cpu().numpy() - else: - numpy_result = np.zeros(shape=(0, 6), dtype=np.float32) - - return numpy_result - - def infer(self, img: CV_IMAGE) -> List[rw.Annotation]: - anns = [] - result = self.predict(img) - - for i in range(result.shape[0]): - xmin, ymin, xmax, ymax, conf, cls = result[i, :6].tolist() - ann = rw.Annotation(class_name=self.class_names[int(cls)], - score=conf, - box=rw.Box(x=int(xmin), y=int(ymin), w=int(xmax - xmin), h=int(ymax - ymin))) - - anns.append(ann) - - return anns - - def write_monitor_logger(self, stage: YmirStage, p: float): - monitor.write_monitor_logger( - percent=get_ymir_process(stage=stage, p=p, task_idx=self.task_idx, task_num=self.task_num)) - - -def convert_ymir_to_yolov5(cfg: edict, out_dir: str = None): - """ - convert ymir format dataset to yolov5 format - generate data.yaml for training/mining/infer - """ - - out_dir = out_dir or cfg.ymir.output.root_dir - data = dict(path=out_dir, nc=len(cfg.param.class_names), names=cfg.param.class_names) - for split, prefix in zip(['train', 'val', 'test'], ['training', 'val', 'candidate']): - src_file = getattr(cfg.ymir.input, f'{prefix}_index_file') - if osp.exists(src_file): - shutil.copy(src_file, f'{out_dir}/{split}.tsv') - - data[split] = f'{split}.tsv' - - with open(osp.join(out_dir, 'data.yaml'), 'w') as fw: - fw.write(yaml.safe_dump(data)) From 8980e0a9586cd407511ecf31d24c906807b2ba79 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 20 Oct 2022 11:40:17 +0800 Subject: [PATCH 149/204] merge single-gpu and mutiple gpu infer --- det-yolov5-tmi/mypy.ini | 1 - det-yolov5-tmi/ymir/README.md | 4 +-- det-yolov5-tmi/ymir/mining/data_augment.py | 3 +- det-yolov5-tmi/ymir/mining/util.py | 4 +-- det-yolov5-tmi/ymir/mining/ymir_infer.py | 11 ++++--- .../ymir/mining/ymir_mining_aldd.py | 4 +-- .../ymir/mining/ymir_mining_cald.py | 11 ++++--- .../ymir/mining/ymir_mining_entropy.py | 9 ++++-- .../ymir/mining/ymir_mining_random.py | 7 +++-- det-yolov5-tmi/ymir/start.py | 30 +++++-------------- 10 files changed, 39 insertions(+), 45 deletions(-) diff --git a/det-yolov5-tmi/mypy.ini b/det-yolov5-tmi/mypy.ini index bb96738..6a356a3 100644 --- a/det-yolov5-tmi/mypy.ini +++ b/det-yolov5-tmi/mypy.ini @@ -2,7 +2,6 @@ ignore_missing_imports = True disallow_untyped_defs = False exclude = [utils/general.py, models/*.py, utils/*.py] -files = mining/*.py, utils/ymir_yolov5.py, start.py, train.py [mypy-torch.*] ignore_errors = True diff --git a/det-yolov5-tmi/ymir/README.md b/det-yolov5-tmi/ymir/README.md index 6bf9151..1936a93 100644 --- a/det-yolov5-tmi/ymir/README.md +++ b/det-yolov5-tmi/ymir/README.md @@ -9,9 +9,9 @@ docker build -t ymir/ymir-executor:ymir1.1.0-cuda111-yolov5-tmi --build-arg SERV ## main change log -- add `start.py` and `utils/ymir_yolov5.py` for train/infer/mining +- add `start.py` and `ymir/ymir_yolov5.py` for train/infer/mining -- add `utils/ymir_yolov5.py` for useful functions +- add `ymir/ymir_yolov5.py` for useful functions - `get_merged_config()` add ymir path config `cfg.yaml` and hyper-parameter `cfg.param` diff --git a/det-yolov5-tmi/ymir/mining/data_augment.py b/det-yolov5-tmi/ymir/mining/data_augment.py index cfafaa7..d88a86d 100644 --- a/det-yolov5-tmi/ymir/mining/data_augment.py +++ b/det-yolov5-tmi/ymir/mining/data_augment.py @@ -8,8 +8,7 @@ import cv2 import numpy as np from nptyping import NDArray - -from utils.ymir_yolov5 import BBOX, CV_IMAGE +from ymir.ymir_yolov5 import BBOX, CV_IMAGE def intersect(boxes1: BBOX, boxes2: BBOX) -> NDArray: diff --git a/det-yolov5-tmi/ymir/mining/util.py b/det-yolov5-tmi/ymir/mining/util.py index c69343c..0e9e3f5 100644 --- a/det-yolov5-tmi/ymir/mining/util.py +++ b/det-yolov5-tmi/ymir/mining/util.py @@ -19,12 +19,12 @@ import cv2 import numpy as np import torch.utils.data as td -from mining.data_augment import cutout, horizontal_flip, intersect, resize, rotate from nptyping import NDArray from scipy.stats import entropy from torch.utils.data._utils.collate import default_collate from utils.augmentations import letterbox -from utils.ymir_yolov5 import BBOX +from ymir.mining.data_augment import cutout, horizontal_flip, intersect, resize, rotate +from ymir.ymir_yolov5 import BBOX LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) diff --git a/det-yolov5-tmi/ymir/mining/ymir_infer.py b/det-yolov5-tmi/ymir/mining/ymir_infer.py index ad1e0d2..bd1c237 100644 --- a/det-yolov5-tmi/ymir/mining/ymir_infer.py +++ b/det-yolov5-tmi/ymir/mining/ymir_infer.py @@ -12,10 +12,10 @@ import torch.distributed as dist import torch.utils.data as td from easydict import EasyDict as edict -from mining.util import YmirDataset, load_image_file from tqdm import tqdm from utils.general import scale_coords -from utils.ymir_yolov5 import YmirYolov5 +from ymir.mining.util import YmirDataset, load_image_file +from ymir.ymir_yolov5 import YmirYolov5 from ymir_exc import result_writer as rw from ymir_exc.util import YmirStage, get_merged_config @@ -44,7 +44,10 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): max_barrier_times = len(images) // max(1, WORLD_SIZE) // batch_size_per_gpu # origin dataset - images_rank = images[RANK::WORLD_SIZE] + if RANK != -1: + images_rank = images[RANK::WORLD_SIZE] + else: + images_rank = images origin_dataset = YmirDataset(images_rank, load_fn=load_fn) origin_dataset_loader = td.DataLoader(origin_dataset, batch_size=batch_size_per_gpu, @@ -80,7 +83,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): result_per_image.append(det) results.append(dict(image_file=image_file, result=result_per_image)) - torch.save(results, f'/out/infer_results_{RANK}.pt') + torch.save(results, f'/out/infer_results_{max(0,RANK)}.pt') def main() -> int: diff --git a/det-yolov5-tmi/ymir/mining/ymir_mining_aldd.py b/det-yolov5-tmi/ymir/mining/ymir_mining_aldd.py index 8d6a27c..0a90e3f 100644 --- a/det-yolov5-tmi/ymir/mining/ymir_mining_aldd.py +++ b/det-yolov5-tmi/ymir/mining/ymir_mining_aldd.py @@ -17,9 +17,9 @@ import torch.nn.functional as F import torch.utils.data as td from easydict import EasyDict as edict -from mining.util import YmirDataset, load_image_file from tqdm import tqdm -from utils.ymir_yolov5 import YmirYolov5 +from ymir.mining.util import YmirDataset, load_image_file +from ymir.ymir_yolov5 import YmirYolov5 from ymir_exc import result_writer as rw from ymir_exc.util import YmirStage, get_merged_config diff --git a/det-yolov5-tmi/ymir/mining/ymir_mining_cald.py b/det-yolov5-tmi/ymir/mining/ymir_mining_cald.py index b4c6147..4a07d32 100644 --- a/det-yolov5-tmi/ymir/mining/ymir_mining_cald.py +++ b/det-yolov5-tmi/ymir/mining/ymir_mining_cald.py @@ -14,11 +14,11 @@ import torch.distributed as dist import torch.utils.data as td from easydict import EasyDict as edict -from mining.util import (YmirDataset, collate_fn_with_fake_ann, load_image_file, load_image_file_with_ann, - update_consistency) from tqdm import tqdm from utils.general import scale_coords -from utils.ymir_yolov5 import YmirYolov5 +from ymir.mining.util import (YmirDataset, collate_fn_with_fake_ann, load_image_file, load_image_file_with_ann, + update_consistency) +from ymir.ymir_yolov5 import YmirYolov5 from ymir_exc import result_writer as rw from ymir_exc.util import YmirStage, get_merged_config @@ -47,7 +47,10 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): max_barrier_times = (len(images) // max(1, WORLD_SIZE)) // batch_size_per_gpu # origin dataset - images_rank = images[RANK::WORLD_SIZE] + if RANK != -1: + images_rank = images[RANK::WORLD_SIZE] + else: + images_rank = images origin_dataset = YmirDataset(images_rank, load_fn=load_fn) origin_dataset_loader = td.DataLoader(origin_dataset, batch_size=batch_size_per_gpu, diff --git a/det-yolov5-tmi/ymir/mining/ymir_mining_entropy.py b/det-yolov5-tmi/ymir/mining/ymir_mining_entropy.py index df5a1ff..86136e1 100644 --- a/det-yolov5-tmi/ymir/mining/ymir_mining_entropy.py +++ b/det-yolov5-tmi/ymir/mining/ymir_mining_entropy.py @@ -14,9 +14,9 @@ import torch.distributed as dist import torch.utils.data as td from easydict import EasyDict as edict -from mining.util import YmirDataset, load_image_file from tqdm import tqdm -from utils.ymir_yolov5 import YmirYolov5 +from ymir.mining.util import YmirDataset, load_image_file +from ymir.ymir_yolov5 import YmirYolov5 from ymir_exc import result_writer as rw from ymir_exc.util import YmirStage, get_merged_config @@ -45,7 +45,10 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): max_barrier_times = (len(images) // max(1, WORLD_SIZE)) // batch_size_per_gpu # origin dataset - images_rank = images[RANK::WORLD_SIZE] + if RANK != -1: + images_rank = images[RANK::WORLD_SIZE] + else: + images_rank = images origin_dataset = YmirDataset(images_rank, load_fn=load_fn) origin_dataset_loader = td.DataLoader(origin_dataset, batch_size=batch_size_per_gpu, diff --git a/det-yolov5-tmi/ymir/mining/ymir_mining_random.py b/det-yolov5-tmi/ymir/mining/ymir_mining_random.py index 30fb099..eeb08cf 100644 --- a/det-yolov5-tmi/ymir/mining/ymir_mining_random.py +++ b/det-yolov5-tmi/ymir/mining/ymir_mining_random.py @@ -13,7 +13,7 @@ import torch.distributed as dist from easydict import EasyDict as edict from tqdm import tqdm -from utils.ymir_yolov5 import YmirYolov5 +from ymir.ymir_yolov5 import YmirYolov5 from ymir_exc import result_writer as rw from ymir_exc.util import YmirStage, get_merged_config @@ -31,7 +31,10 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): with open(ymir_cfg.ymir.input.candidate_index_file, 'r') as f: images = [line.strip() for line in f.readlines()] - images_rank = images[RANK::WORLD_SIZE] + if RANK != -1: + images_rank = images[RANK::WORLD_SIZE] + else: + images_rank = images mining_results = dict() dataset_size = len(images_rank) pbar = tqdm(images_rank) if RANK == 0 else images_rank diff --git a/det-yolov5-tmi/ymir/start.py b/det-yolov5-tmi/ymir/start.py index 6c82844..11eece0 100644 --- a/det-yolov5-tmi/ymir/start.py +++ b/det-yolov5-tmi/ymir/start.py @@ -6,7 +6,7 @@ import cv2 from easydict import EasyDict as edict from models.experimental import attempt_download -from utils.ymir_yolov5 import YmirYolov5, convert_ymir_to_yolov5, get_weight_file +from ymir.ymir_yolov5 import YmirYolov5, convert_ymir_to_yolov5, get_weight_file from ymir_exc import dataset_reader as dr from ymir_exc import env, monitor from ymir_exc import result_writer as rw @@ -124,10 +124,10 @@ def _run_mining(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: raise Exception(f'unknown mining algorithm {mining_algorithm}, not in {support_mining_algorithms}') if gpu_count <= 1: - command = f'python3 mining/ymir_mining_{mining_algorithm}.py' + command = f'python3 ymir/mining/ymir_mining_{mining_algorithm}.py' else: port = find_free_port() - command = f'python3 -m torch.distributed.launch --nproc_per_node {gpu_count} --master_port {port} mining/ymir_mining_{mining_algorithm}.py' # noqa + command = f'python3 -m torch.distributed.launch --nproc_per_node {gpu_count} --master_port {port} ymir/mining/ymir_mining_{mining_algorithm}.py' # noqa logging.info(f'mining: {command}') subprocess.run(command.split(), check=True) @@ -147,29 +147,13 @@ def _run_infer(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: gpu_count: int = len(gpu_id.split(',')) if gpu_id else 0 if gpu_count <= 1: - N = dr.items_count(env.DatasetType.CANDIDATE) - infer_result = dict() - model = YmirYolov5(cfg) - idx = -1 - - monitor_gap = max(1, N // 100) - for asset_path, _ in dr.item_paths(dataset_type=env.DatasetType.CANDIDATE): - img = cv2.imread(asset_path) - result = model.infer(img) - infer_result[asset_path] = result - idx += 1 - - if idx % monitor_gap == 0: - percent = get_ymir_process(stage=YmirStage.TASK, p=idx / N, task_idx=task_idx, task_num=task_num) - monitor.write_monitor_logger(percent=percent) - - rw.write_infer_result(infer_result=infer_result) + command = 'python3 ymir/mining/ymir_infer.py' else: port = find_free_port() - command = f'python3 -m torch.distributed.launch --nproc_per_node {gpu_count} --master_port {port} mining/ymir_infer.py' # noqa + command = f'python3 -m torch.distributed.launch --nproc_per_node {gpu_count} --master_port {port} ymir/mining/ymir_infer.py' # noqa - logging.info(f'infer: {command}') - subprocess.run(command.split(), check=True) + logging.info(f'infer: {command}') + subprocess.run(command.split(), check=True) monitor.write_monitor_logger( percent=get_ymir_process(stage=YmirStage.POSTPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) From 180a8fefe3f599c9436395e532b51b27130b2339 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 20 Oct 2022 14:36:07 +0800 Subject: [PATCH 150/204] update monitor process --- .../mmdet/core/evaluation/eval_hooks.py | 20 +- det-mmdetection-tmi/ymir_infer.py | 23 +-- det-mmdetection-tmi/ymir_mining_cald.py | 23 +-- det-mmdetection-tmi/ymir_train.py | 5 +- det-yolov5-tmi/train.py | 173 ++++++++++-------- det-yolov5-tmi/ymir/docker/cuda102.dockerfile | 4 +- det-yolov5-tmi/ymir/docker/cuda111.dockerfile | 4 +- det-yolov5-tmi/ymir/mining/ymir_infer.py | 11 +- .../ymir/mining/ymir_mining_aldd.py | 13 +- .../ymir/mining/ymir_mining_cald.py | 12 +- .../ymir/mining/ymir_mining_entropy.py | 8 +- .../ymir/mining/ymir_mining_random.py | 6 +- det-yolov5-tmi/ymir/start.py | 43 ++--- det-yolov5-tmi/ymir/ymir_yolov5.py | 35 +--- 14 files changed, 169 insertions(+), 211 deletions(-) diff --git a/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py b/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py index 81a36bb..735049d 100644 --- a/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py +++ b/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py @@ -9,7 +9,7 @@ from mmdet.utils.util_ymir import write_ymir_training_result from torch.nn.modules.batchnorm import _BatchNorm from ymir_exc import monitor -from ymir_exc.util import YmirStage, get_ymir_process +from ymir_exc.util import YmirStage, get_merged_config, write_ymir_monitor_process def _calc_dynamic_intervals(start_interval, dynamic_interval_list): @@ -28,6 +28,7 @@ class EvalHook(BaseEvalHook): def __init__(self, *args, dynamic_intervals=None, **kwargs): super(EvalHook, self).__init__(*args, **kwargs) + self.ymir_cfg = get_merged_config() self.use_dynamic_intervals = dynamic_intervals is not None if self.use_dynamic_intervals: @@ -51,9 +52,7 @@ def after_train_epoch(self, runner): if self.by_epoch: monitor_interval = max(1, runner.max_epochs // 1000) if runner.epoch % monitor_interval == 0: - percent = get_ymir_process( - stage=YmirStage.TASK, p=runner.epoch / runner.max_epochs) - monitor.write_monitor_logger(percent=percent) + write_ymir_monitor_process(self.ymir_cfg, task='training', naive_stage_percent=runner.epoch / runner.max_epochs, stage=YmirStage.TASK) super().after_train_epoch(runner) def before_train_iter(self, runner): @@ -64,9 +63,7 @@ def after_train_iter(self, runner): if not self.by_epoch: monitor_interval = max(1, runner.max_iters // 1000) if runner.iter % monitor_interval == 0: - percent = get_ymir_process( - stage=YmirStage.TASK, p=runner.iter / runner.max_iters) - monitor.write_monitor_logger(percent=percent) + write_ymir_monitor_process(self.ymir_cfg, task='training', naive_stage_percent=runner.ite / runner.max_iters, stage=YmirStage.TASK) super().after_train_iter(runner) def _do_evaluate(self, runner): @@ -98,6 +95,7 @@ class DistEvalHook(BaseDistEvalHook): def __init__(self, *args, dynamic_intervals=None, **kwargs): super(DistEvalHook, self).__init__(*args, **kwargs) + self.ymir_cfg = get_merged_config() self.use_dynamic_intervals = dynamic_intervals is not None if self.use_dynamic_intervals: @@ -121,9 +119,7 @@ def after_train_epoch(self, runner): if self.by_epoch and runner.rank == 0: monitor_interval = max(1, runner.max_epochs // 1000) if runner.epoch % monitor_interval == 0: - percent = get_ymir_process( - stage=YmirStage.TASK, p=runner.epoch / runner.max_epochs) - monitor.write_monitor_logger(percent=percent) + write_ymir_monitor_process(self.ymir_cfg, task='training', naive_stage_percent=runner.epoch / runner.max_epochs, stage=YmirStage.TASK) super().after_train_epoch(runner) def before_train_iter(self, runner): @@ -134,9 +130,7 @@ def after_train_iter(self, runner): if not self.by_epoch and runner.rank == 0: monitor_interval = max(1, runner.max_iters // 1000) if runner.iter % monitor_interval == 0: - percent = get_ymir_process( - stage=YmirStage.TASK, p=runner.iter / runner.max_iters) - monitor.write_monitor_logger(percent=percent) + write_ymir_monitor_process(self.ymir_cfg, task='training', naive_stage_percent=runner.iter / runner.max_iters, stage=YmirStage.TASK) super().after_train_iter(runner) def _do_evaluate(self, runner): diff --git a/det-mmdetection-tmi/ymir_infer.py b/det-mmdetection-tmi/ymir_infer.py index 939e5bf..bda229e 100644 --- a/det-mmdetection-tmi/ymir_infer.py +++ b/det-mmdetection-tmi/ymir_infer.py @@ -14,7 +14,7 @@ from ymir_exc import dataset_reader as dr from ymir_exc import env, monitor from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_merged_config, get_ymir_process +from ymir_exc.util import YmirStage, get_merged_config, write_ymir_monitor_process def parse_option(cfg_options: str) -> dict: @@ -73,18 +73,6 @@ class YmirModel: def __init__(self, cfg: edict): self.cfg = cfg - if cfg.ymir.run_mining and cfg.ymir.run_infer: - # mining_task_idx = 0 - infer_task_idx = 1 - task_num = 2 - else: - # mining_task_idx = 0 - infer_task_idx = 0 - task_num = 1 - - self.task_idx = infer_task_idx - self.task_num = task_num - # Specify the path to model config and checkpoint file config_file = get_config_file(cfg) checkpoint_file = get_best_weight_file(cfg) @@ -121,15 +109,10 @@ def main(): idx += 1 if idx % monitor_gap == 0: - percent = get_ymir_process(stage=YmirStage.TASK, - p=idx / N, - task_idx=model.task_idx, - task_num=model.task_num) - monitor.write_monitor_logger(percent=percent) + write_ymir_monitor_process(cfg, task='infer', naive_stage_percent=idx / N, stage = YmirStage.TASK) rw.write_infer_result(infer_result=infer_result) - percent = get_ymir_process(stage=YmirStage.POSTPROCESS, p=1, task_idx=model.task_idx, task_num=model.task_num) - monitor.write_monitor_logger(percent=percent) + write_ymir_monitor_process(cfg, task='infer', naive_stage_percent=1.0, stage=YmirStage.POSTPROCESS) return 0 diff --git a/det-mmdetection-tmi/ymir_mining_cald.py b/det-mmdetection-tmi/ymir_mining_cald.py index fe437ff..65e6ff4 100644 --- a/det-mmdetection-tmi/ymir_mining_cald.py +++ b/det-mmdetection-tmi/ymir_mining_cald.py @@ -20,7 +20,7 @@ from tqdm import tqdm from ymir_exc import monitor from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_merged_config, get_ymir_process +from ymir_exc.util import YmirStage, get_merged_config, write_ymir_monitor_process from ymir_infer import YmirModel LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html @@ -254,16 +254,6 @@ class YmirMining(YmirModel): def __init__(self, cfg: edict): super().__init__(cfg) - if cfg.ymir.run_mining and cfg.ymir.run_infer: - mining_task_idx = 0 - # infer_task_idx = 1 - task_num = 2 - else: - mining_task_idx = 0 - # infer_task_idx = 0 - task_num = 1 - self.task_idx = mining_task_idx - self.task_num = task_num def mining(self): with open(self.cfg.ymir.input.candidate_index_file, 'r') as f: @@ -287,11 +277,8 @@ def mining(self): mining_result = [] for idx, asset_path in enumerate(tbar): if idx % monitor_gap == 0: - percent = get_ymir_process(stage=YmirStage.TASK, - p=idx / N, - task_idx=self.task_idx, - task_num=self.task_num) - monitor.write_monitor_logger(percent=percent) + write_ymir_monitor_process(self.cfg, task='mining', naive_stage_percent=idx / N, stage=YmirStage.TASK) + # batch-level sync, avoid 30min time-out error if WORLD_SIZE > 1 and idx < max_barrier_times: dist.barrier() @@ -401,9 +388,7 @@ def main(): if RANK in [0, -1]: rw.write_mining_result(mining_result=mining_result) - - percent = get_ymir_process(stage=YmirStage.POSTPROCESS, p=1, task_idx=miner.task_idx, task_num=miner.task_num) - monitor.write_monitor_logger(percent=percent) + write_ymir_monitor_process(cfg, task='mining', naive_stage_percent=1, stage=YmirStage.POSTPROCESS) return 0 diff --git a/det-mmdetection-tmi/ymir_train.py b/det-mmdetection-tmi/ymir_train.py index 06ed4dd..b71595f 100644 --- a/det-mmdetection-tmi/ymir_train.py +++ b/det-mmdetection-tmi/ymir_train.py @@ -7,7 +7,7 @@ from easydict import EasyDict as edict from mmdet.utils.util_ymir import get_best_weight_file, write_ymir_training_result from ymir_exc import monitor -from ymir_exc.util import YmirStage, find_free_port, get_merged_config, get_ymir_process +from ymir_exc.util import YmirStage, find_free_port, get_merged_config, write_ymir_monitor_process def main(cfg: edict) -> int: @@ -41,8 +41,7 @@ def main(cfg: edict) -> int: else: logging.warning('no weight file used for training!') - monitor.write_monitor_logger( - percent=get_ymir_process(YmirStage.PREPROCESS, p=0.2)) + write_ymir_monitor_process(cfg, task='training', naive_stage_percent=0.2, stage=YmirStage.POSTPROCESS) work_dir = cfg.ymir.output.models_dir if num_gpus == 0: diff --git a/det-yolov5-tmi/train.py b/det-yolov5-tmi/train.py index 54fd2e8..e8e794a 100644 --- a/det-yolov5-tmi/train.py +++ b/det-yolov5-tmi/train.py @@ -16,12 +16,12 @@ import math import os import random +import subprocess import sys import time from copy import deepcopy from datetime import datetime from pathlib import Path -import subprocess import numpy as np import torch @@ -40,8 +40,6 @@ sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative -from ymir_exc.util import YmirStage, get_merged_config, get_ymir_process, write_ymir_training_result - import val # for end-of-epoch mAP from models.experimental import attempt_load from models.yolo import Model @@ -60,17 +58,18 @@ from utils.metrics import fitness from utils.plots import plot_evolve, plot_labels from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, select_device, torch_distributed_zero_first +from ymir_exc.util import YmirStage, get_merged_config, write_ymir_monitor_process, write_ymir_training_result LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) -def train(hyp, # path/to/hyp.yaml or hyp dictionary - opt, - device, - callbacks - ): +def train( + hyp, # path/to/hyp.yaml or hyp dictionary + opt, + device, + callbacks): save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze @@ -186,7 +185,10 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if opt.cos_lr: lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] else: - def lf(x): return (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear + + def lf(x): + return (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA @@ -227,20 +229,38 @@ def lf(x): return (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear LOGGER.info('Using SyncBatchNorm()') # Trainloader - train_loader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls, - hyp=hyp, augment=True, cache=None if opt.cache == 'val' else opt.cache, - rect=opt.rect, rank=LOCAL_RANK, workers=workers, - image_weights=opt.image_weights, quad=opt.quad, - prefix=colorstr('train: '), shuffle=True) + train_loader, dataset = create_dataloader(train_path, + imgsz, + batch_size // WORLD_SIZE, + gs, + single_cls, + hyp=hyp, + augment=True, + cache=None if opt.cache == 'val' else opt.cache, + rect=opt.rect, + rank=LOCAL_RANK, + workers=workers, + image_weights=opt.image_weights, + quad=opt.quad, + prefix=colorstr('train: '), + shuffle=True) mlc = int(np.concatenate(dataset.labels, 0)[:, 0].max()) # max label class nb = len(train_loader) # number of batches assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' # Process 0 if RANK in [-1, 0]: - val_loader = create_dataloader(val_path, imgsz, batch_size // WORLD_SIZE * 2, gs, single_cls, - hyp=hyp, cache=None if noval else opt.cache, - rect=True, rank=-1, workers=workers * 2, pad=0.5, + val_loader = create_dataloader(val_path, + imgsz, + batch_size // WORLD_SIZE * 2, + gs, + single_cls, + hyp=hyp, + cache=None if noval else opt.cache, + rect=True, + rank=-1, + workers=workers * 2, + pad=0.5, prefix=colorstr('val: '))[0] if not resume: @@ -269,7 +289,7 @@ def lf(x): return (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) hyp['box'] *= 3 / nl # scale to layers hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers - hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers + hyp['obj'] *= (imgsz / 640)**2 * 3 / nl # scale to image size and layers hyp['label_smoothing'] = opt.label_smoothing model.nc = nc # attach number of classes to model model.hyp = hyp # attach hyperparameters to model @@ -298,12 +318,11 @@ def lf(x): return (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear # ymir monitor if epoch % monitor_gap == 0: - percent = get_ymir_process(stage=YmirStage.TASK, p=(epoch - start_epoch + 1) / (epochs - start_epoch + 1)) - monitor.write_monitor_logger(percent=percent) + write_ymir_monitor_process(ymir_cfg, task='training', naive_stage_percent=(epoch - start_epoch + 1) / (epochs - start_epoch + 1), stage=YmirStage.TASK) # Update image weights (optional, single-GPU only) if opt.image_weights: - cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights + cw = model.class_weights.cpu().numpy() * (1 - maps)**2 / nc # class weights iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx @@ -367,8 +386,8 @@ def lf(x): return (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear if RANK in [-1, 0]: mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) - pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % ( - f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) + pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % + (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) callbacks.run('on_train_batch_end', ni, model, imgs, targets, paths, plots, opt.sync_bn) if callbacks.stop_training: return @@ -404,14 +423,16 @@ def lf(x): return (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear # Save model if (not nosave) or (best_fitness == fi) or (final_epoch and not evolve): # if save - ckpt = {'epoch': epoch, - 'best_fitness': best_fitness, - 'model': deepcopy(de_parallel(model)).half(), - 'ema': deepcopy(ema.ema).half(), - 'updates': ema.updates, - 'optimizer': optimizer.state_dict(), - 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None, - 'date': datetime.now().isoformat()} + ckpt = { + 'epoch': epoch, + 'best_fitness': best_fitness, + 'model': deepcopy(de_parallel(model)).half(), + 'ema': deepcopy(ema.ema).half(), + 'updates': ema.updates, + 'optimizer': optimizer.state_dict(), + 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None, + 'date': datetime.now().isoformat() + } # Save last, best and delete torch.save(ckpt, last) @@ -448,19 +469,20 @@ def lf(x): return (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear strip_optimizer(f) # strip optimizers if f is best: LOGGER.info(f'\nValidating {f}...') - results, _, _ = val.run(data_dict, - batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz, - model=attempt_load(f, device).half(), - iou_thres=0.65 if is_coco else 0.60, # best pycocotools results at 0.65 - single_cls=single_cls, - dataloader=val_loader, - save_dir=save_dir, - save_json=is_coco, - verbose=True, - plots=True, - callbacks=callbacks, - compute_loss=compute_loss) # val best model with plots + results, _, _ = val.run( + data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + model=attempt_load(f, device).half(), + iou_thres=0.65 if is_coco else 0.60, # best pycocotools results at 0.65 + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + save_json=is_coco, + verbose=True, + plots=True, + callbacks=callbacks, + compute_loss=compute_loss) # val best model with plots if is_coco: callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) @@ -539,7 +561,8 @@ def main(opt, callbacks=Callbacks()): ymir_cfg = get_merged_config() # Resume if opt.resume and not check_wandb_resume(opt) and not opt.evolve: # resume an interrupted run - ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run(ymir_cfg.ymir.input.root_dir) # specified or most recent path + ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run( + ymir_cfg.ymir.input.root_dir) # specified or most recent path assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' opt_file = Path(ckpt).parent / 'opt.yaml' @@ -580,35 +603,37 @@ def main(opt, callbacks=Callbacks()): # Evolve hyperparameters (optional) else: # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) - meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) - 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) - 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 - 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay - 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) - 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum - 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr - 'box': (1, 0.02, 0.2), # box loss gain - 'cls': (1, 0.2, 4.0), # cls loss gain - 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight - 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) - 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight - 'iou_t': (0, 0.1, 0.7), # IoU training threshold - 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold - 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) - 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) - 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) - 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) - 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) - 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) - 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) - 'scale': (1, 0.0, 0.9), # image scale (+/- gain) - 'shear': (1, 0.0, 10.0), # image shear (+/- deg) - 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 - 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) - 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) - 'mosaic': (1, 0.0, 1.0), # image mixup (probability) - 'mixup': (1, 0.0, 1.0), # image mixup (probability) - 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) + meta = { + 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) + 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) + 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 + 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay + 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) + 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum + 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr + 'box': (1, 0.02, 0.2), # box loss gain + 'cls': (1, 0.2, 4.0), # cls loss gain + 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight + 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) + 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight + 'iou_t': (0, 0.1, 0.7), # IoU training threshold + 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold + 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) + 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) + 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) + 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) + 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) + 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) + 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) + 'scale': (1, 0.0, 0.9), # image scale (+/- gain) + 'shear': (1, 0.0, 10.0), # image shear (+/- deg) + 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 + 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) + 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) + 'mosaic': (1, 0.0, 1.0), # image mixup (probability) + 'mixup': (1, 0.0, 1.0), # image mixup (probability) + 'copy_paste': (1, 0.0, 1.0) + } # segment copy-paste (probability) with open(opt.hyp, errors='ignore') as f: hyp = yaml.safe_load(f) # load hyps dict diff --git a/det-yolov5-tmi/ymir/docker/cuda102.dockerfile b/det-yolov5-tmi/ymir/docker/cuda102.dockerfile index 0014b60..d50072d 100644 --- a/det-yolov5-tmi/ymir/docker/cuda102.dockerfile +++ b/det-yolov5-tmi/ymir/docker/cuda102.dockerfile @@ -24,7 +24,7 @@ RUN pip install "git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.0.0" # Copy file from host to docker and install requirements COPY . /app -RUN mkdir /img-man && mv /app/*-template.yaml /img-man/ \ +RUN mkdir /img-man && mv /app/ymir/img-man/*-template.yaml /img-man/ \ && pip install -r /app/requirements.txt # Download pretrained weight and font file @@ -36,5 +36,5 @@ RUN cd /app && bash data/scripts/download_weights.sh \ ENV PYTHONPATH=. WORKDIR /app -RUN echo "python3 /app/start.py" > /usr/bin/start.sh +RUN echo "python3 /app/ymir/start.py" > /usr/bin/start.sh CMD bash /usr/bin/start.sh diff --git a/det-yolov5-tmi/ymir/docker/cuda111.dockerfile b/det-yolov5-tmi/ymir/docker/cuda111.dockerfile index 84427a8..f7d8538 100644 --- a/det-yolov5-tmi/ymir/docker/cuda111.dockerfile +++ b/det-yolov5-tmi/ymir/docker/cuda111.dockerfile @@ -28,7 +28,7 @@ RUN pip install "git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.0.0" # Copy file from host to docker and install requirements COPY . /app -RUN mkdir /img-man && mv /app/*-template.yaml /img-man/ +RUN mkdir /img-man && mv /app/ymir/img-man/*-template.yaml /img-man/ # Download pretrained weight and font file RUN cd /app && bash data/scripts/download_weights.sh \ @@ -39,5 +39,5 @@ RUN cd /app && bash data/scripts/download_weights.sh \ ENV PYTHONPATH=. WORKDIR /app -RUN echo "python3 /app/start.py" > /usr/bin/start.sh +RUN echo "python3 /app/ymir/start.py" > /usr/bin/start.sh CMD bash /usr/bin/start.sh diff --git a/det-yolov5-tmi/ymir/mining/ymir_infer.py b/det-yolov5-tmi/ymir/mining/ymir_infer.py index bd1c237..92fa935 100644 --- a/det-yolov5-tmi/ymir/mining/ymir_infer.py +++ b/det-yolov5-tmi/ymir/mining/ymir_infer.py @@ -17,7 +17,7 @@ from ymir.mining.util import YmirDataset, load_image_file from ymir.ymir_yolov5 import YmirYolov5 from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_merged_config +from ymir_exc.util import YmirStage, get_merged_config, write_ymir_monitor_process LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) @@ -63,14 +63,14 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): pbar = tqdm(origin_dataset_loader) if RANK == 0 else origin_dataset_loader for idx, batch in enumerate(pbar): # batch-level sync, avoid 30min time-out error - if LOCAL_RANK != -1 and idx < max_barrier_times: + if WORLD_SIZE > 1 and idx < max_barrier_times: dist.barrier() with torch.no_grad(): pred = ymir_yolov5.forward(batch['image'].float().to(device), nms=True) if idx % monitor_gap == 0: - ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) + write_ymir_monitor_process(ymir_cfg, task='infer', naive_stage_percent=idx * batch_size_per_gpu / dataset_size, stage=YmirStage.TASK) preprocess_image_shape = batch['image'].shape[2:] for idx, det in enumerate(pred): # per image @@ -88,7 +88,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): def main() -> int: ymir_cfg = get_merged_config() - ymir_yolov5 = YmirYolov5(ymir_cfg, task='infer') + ymir_yolov5 = YmirYolov5(ymir_cfg) if LOCAL_RANK != -1: assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' @@ -98,7 +98,8 @@ def main() -> int: run(ymir_cfg, ymir_yolov5) # wait all process to save the infer result - dist.barrier() + if WORLD_SIZE > 1: + dist.barrier() if RANK in [0, -1]: results = [] diff --git a/det-yolov5-tmi/ymir/mining/ymir_mining_aldd.py b/det-yolov5-tmi/ymir/mining/ymir_mining_aldd.py index 0a90e3f..219790d 100644 --- a/det-yolov5-tmi/ymir/mining/ymir_mining_aldd.py +++ b/det-yolov5-tmi/ymir/mining/ymir_mining_aldd.py @@ -21,7 +21,7 @@ from ymir.mining.util import YmirDataset, load_image_file from ymir.ymir_yolov5 import YmirYolov5 from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_merged_config +from ymir_exc.util import YmirStage, get_merged_config, write_ymir_monitor_process LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) @@ -161,7 +161,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): miner = ALDD(ymir_cfg) for idx, batch in enumerate(pbar): # batch-level sync, avoid 30min time-out error - if LOCAL_RANK != -1 and idx < max_barrier_times: + if WORLD_SIZE > 1 and idx < max_barrier_times: dist.barrier() with torch.no_grad(): @@ -172,7 +172,10 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): mining_results[each_imgname] = each_score if RANK in [-1, 0]: - ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) + write_ymir_monitor_process(ymir_cfg, + task='mining', + naive_stage_percent=idx * batch_size_per_gpu / dataset_size, + stage=YmirStage.TASK) torch.save(mining_results, f'/out/mining_results_{max(0,RANK)}.pt') @@ -180,7 +183,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): def main() -> int: ymir_cfg = get_merged_config() # note select_device(gpu_id) will set os.environ['CUDA_VISIBLE_DEVICES'] to gpu_id - ymir_yolov5 = YmirYolov5(ymir_cfg, task='mining') + ymir_yolov5 = YmirYolov5(ymir_cfg) if LOCAL_RANK != -1: assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' @@ -190,7 +193,7 @@ def main() -> int: run(ymir_cfg, ymir_yolov5) # wait all process to save the mining result - if LOCAL_RANK != -1: + if WORLD_SIZE > 1: dist.barrier() if RANK in [0, -1]: diff --git a/det-yolov5-tmi/ymir/mining/ymir_mining_cald.py b/det-yolov5-tmi/ymir/mining/ymir_mining_cald.py index 4a07d32..a357e59 100644 --- a/det-yolov5-tmi/ymir/mining/ymir_mining_cald.py +++ b/det-yolov5-tmi/ymir/mining/ymir_mining_cald.py @@ -20,7 +20,7 @@ update_consistency) from ymir.ymir_yolov5 import YmirYolov5 from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_merged_config +from ymir_exc.util import YmirStage, get_merged_config, write_ymir_monitor_process LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) @@ -67,14 +67,14 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): pbar = tqdm(origin_dataset_loader) if RANK == 0 else origin_dataset_loader for idx, batch in enumerate(pbar): # batch-level sync, avoid 30min time-out error - if LOCAL_RANK != -1 and idx < max_barrier_times: + if WORLD_SIZE > 1 and idx < max_barrier_times: dist.barrier() with torch.no_grad(): pred = ymir_yolov5.forward(batch['image'].float().to(device), nms=True) if RANK in [-1, 0]: - ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) + write_ymir_monitor_process(ymir_cfg, task='mining', naive_stage_percent=0.3 * idx * batch_size_per_gpu / dataset_size, stage=YmirStage.TASK) preprocess_image_shape = batch['image'].shape[2:] for inner_idx, det in enumerate(pred): # per image result_per_image = [] @@ -108,7 +108,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): pbar = tqdm(aug_dataset_loader) if RANK == 0 else aug_dataset_loader for idx, batch in enumerate(pbar): if idx % monitor_gap == 0 and RANK in [-1, 0]: - ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) + write_ymir_monitor_process(ymir_cfg, task='mining', naive_stage_percent=0.3 + 0.7 * idx * batch_size_per_gpu / dataset_size, stage=YmirStage.TASK) batch_consistency = [0.0 for _ in range(len(batch['image_file']))] aug_keys = ['flip', 'cutout', 'rotate', 'resize'] @@ -163,7 +163,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): def main() -> int: ymir_cfg = get_merged_config() - ymir_yolov5 = YmirYolov5(ymir_cfg, task='mining') + ymir_yolov5 = YmirYolov5(ymir_cfg) if LOCAL_RANK != -1: assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' @@ -173,7 +173,7 @@ def main() -> int: run(ymir_cfg, ymir_yolov5) # wait all process to save the mining result - if LOCAL_RANK != -1: + if WORLD_SIZE > 1: dist.barrier() if RANK in [0, -1]: diff --git a/det-yolov5-tmi/ymir/mining/ymir_mining_entropy.py b/det-yolov5-tmi/ymir/mining/ymir_mining_entropy.py index 86136e1..6457c9a 100644 --- a/det-yolov5-tmi/ymir/mining/ymir_mining_entropy.py +++ b/det-yolov5-tmi/ymir/mining/ymir_mining_entropy.py @@ -18,7 +18,7 @@ from ymir.mining.util import YmirDataset, load_image_file from ymir.ymir_yolov5 import YmirYolov5 from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_merged_config +from ymir_exc.util import YmirStage, get_merged_config, write_ymir_monitor_process LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) @@ -63,14 +63,14 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): pbar = tqdm(origin_dataset_loader) if RANK == 0 else origin_dataset_loader for idx, batch in enumerate(pbar): # batch-level sync, avoid 30min time-out error - if LOCAL_RANK != -1 and idx < max_barrier_times: + if WORLD_SIZE > 1 and idx < max_barrier_times: dist.barrier() with torch.no_grad(): pred = ymir_yolov5.forward(batch['image'].float().to(device), nms=False) if RANK in [-1, 0]: - ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx * batch_size_per_gpu / dataset_size) + write_ymir_monitor_process(ymir_cfg, task='mining', naive_stage_percent=idx * batch_size_per_gpu / dataset_size, stage=YmirStage.TASK) for inner_idx, det in enumerate(pred): # per image image_file = batch['image_file'][inner_idx] if len(det): @@ -85,7 +85,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): def main() -> int: ymir_cfg = get_merged_config() - ymir_yolov5 = YmirYolov5(ymir_cfg, task='mining') + ymir_yolov5 = YmirYolov5(ymir_cfg) if LOCAL_RANK != -1: assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' diff --git a/det-yolov5-tmi/ymir/mining/ymir_mining_random.py b/det-yolov5-tmi/ymir/mining/ymir_mining_random.py index eeb08cf..2edc598 100644 --- a/det-yolov5-tmi/ymir/mining/ymir_mining_random.py +++ b/det-yolov5-tmi/ymir/mining/ymir_mining_random.py @@ -15,7 +15,7 @@ from tqdm import tqdm from ymir.ymir_yolov5 import YmirYolov5 from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_merged_config +from ymir_exc.util import YmirStage, get_merged_config, write_ymir_monitor_process LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) @@ -40,7 +40,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): pbar = tqdm(images_rank) if RANK == 0 else images_rank for idx, image in enumerate(pbar): if RANK in [-1, 0]: - ymir_yolov5.write_monitor_logger(stage=YmirStage.TASK, p=idx / dataset_size) + write_ymir_monitor_process(ymir_cfg, task='mining', naive_stage_percent=idx / dataset_size, stage=YmirStage.TASK) mining_results[image] = random.random() torch.save(mining_results, f'/out/mining_results_{max(0,RANK)}.pt') @@ -48,7 +48,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): def main() -> int: ymir_cfg = get_merged_config() - ymir_yolov5 = YmirYolov5(ymir_cfg, task='mining') + ymir_yolov5 = YmirYolov5(ymir_cfg) if LOCAL_RANK != -1: assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' diff --git a/det-yolov5-tmi/ymir/start.py b/det-yolov5-tmi/ymir/start.py index 11eece0..e0fea29 100644 --- a/det-yolov5-tmi/ymir/start.py +++ b/det-yolov5-tmi/ymir/start.py @@ -3,14 +3,11 @@ import subprocess import sys -import cv2 from easydict import EasyDict as edict from models.experimental import attempt_download -from ymir.ymir_yolov5 import YmirYolov5, convert_ymir_to_yolov5, get_weight_file -from ymir_exc import dataset_reader as dr -from ymir_exc import env, monitor -from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, find_free_port, get_bool, get_merged_config, get_ymir_process +from ymir.ymir_yolov5 import convert_ymir_to_yolov5, get_weight_file +from ymir_exc import monitor +from ymir_exc.util import YmirStage, find_free_port, get_bool, get_merged_config, write_ymir_monitor_process def start(cfg: edict) -> int: @@ -19,20 +16,10 @@ def start(cfg: edict) -> int: if cfg.ymir.run_training: _run_training(cfg) else: - if cfg.ymir.run_mining and cfg.ymir.run_infer: - # multiple task, run mining first, infer later - mining_task_idx = 0 - infer_task_idx = 1 - task_num = 2 - else: - mining_task_idx = 0 - infer_task_idx = 0 - task_num = 1 - if cfg.ymir.run_mining: - _run_mining(cfg, mining_task_idx, task_num) + _run_mining(cfg) if cfg.ymir.run_infer: - _run_infer(cfg, infer_task_idx, task_num) + _run_infer(cfg) return 0 @@ -48,7 +35,7 @@ def _run_training(cfg: edict) -> None: out_dir = cfg.ymir.output.root_dir convert_ymir_to_yolov5(cfg) logging.info(f'generate {out_dir}/data.yaml') - monitor.write_monitor_logger(percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0)) + write_ymir_monitor_process(cfg, task='training', naive_stage_percent=1.0, stage=YmirStage.PREPROCESS) # 2. training model epochs: int = int(cfg.param.epochs) @@ -102,19 +89,18 @@ def _run_training(cfg: edict) -> None: logging.info(f'start training: {commands}') subprocess.run(commands, check=True) - monitor.write_monitor_logger(percent=get_ymir_process(stage=YmirStage.TASK, p=1.0)) + write_ymir_monitor_process(cfg, task='training', naive_stage_percent=1.0, stage=YmirStage.TASK) # if task done, write 100% percent log monitor.write_monitor_logger(percent=1.0) -def _run_mining(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: +def _run_mining(cfg: edict) -> None: # generate data.yaml for mining out_dir = cfg.ymir.output.root_dir convert_ymir_to_yolov5(cfg) logging.info(f'generate {out_dir}/data.yaml') - monitor.write_monitor_logger( - percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) + write_ymir_monitor_process(cfg, task='mining', naive_stage_percent=1.0, stage=YmirStage.PREPROCESS) gpu_id: str = str(cfg.param.get('gpu_id', '0')) gpu_count: int = len(gpu_id.split(',')) if gpu_id else 0 @@ -131,17 +117,15 @@ def _run_mining(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: logging.info(f'mining: {command}') subprocess.run(command.split(), check=True) - monitor.write_monitor_logger( - percent=get_ymir_process(stage=YmirStage.POSTPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) + write_ymir_monitor_process(cfg, task='mining', naive_stage_percent=1.0, stage=YmirStage.POSTPROCESS) -def _run_infer(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: +def _run_infer(cfg: edict) -> None: # generate data.yaml for infer out_dir = cfg.ymir.output.root_dir convert_ymir_to_yolov5(cfg) logging.info(f'generate {out_dir}/data.yaml') - monitor.write_monitor_logger( - percent=get_ymir_process(stage=YmirStage.PREPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) + write_ymir_monitor_process(cfg, task='infer', naive_stage_percent=1.0, stage=YmirStage.PREPROCESS) gpu_id: str = str(cfg.param.get('gpu_id', '0')) gpu_count: int = len(gpu_id.split(',')) if gpu_id else 0 @@ -155,8 +139,7 @@ def _run_infer(cfg: edict, task_idx: int = 0, task_num: int = 1) -> None: logging.info(f'infer: {command}') subprocess.run(command.split(), check=True) - monitor.write_monitor_logger( - percent=get_ymir_process(stage=YmirStage.POSTPROCESS, p=1.0, task_idx=task_idx, task_num=task_num)) + write_ymir_monitor_process(cfg, task='infer', naive_stage_percent=1.0, stage=YmirStage.POSTPROCESS) if __name__ == '__main__': diff --git a/det-yolov5-tmi/ymir/ymir_yolov5.py b/det-yolov5-tmi/ymir/ymir_yolov5.py index c463ded..463db89 100644 --- a/det-yolov5-tmi/ymir/ymir_yolov5.py +++ b/det-yolov5-tmi/ymir/ymir_yolov5.py @@ -16,7 +16,7 @@ from utils.torch_utils import select_device from ymir_exc import monitor from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_bool, get_weight_files, get_ymir_process +from ymir_exc.util import YmirStage, get_bool, get_weight_files, write_ymir_monitor_process BBOX = NDArray[Shape['*,4'], Any] CV_IMAGE = NDArray[Shape['*,*,3'], UInt8] @@ -43,22 +43,10 @@ class YmirYolov5(torch.nn.Module): """ used for mining and inference to init detector and predict. """ - def __init__(self, cfg: edict, task='infer'): + + def __init__(self, cfg: edict): super().__init__() self.cfg = cfg - if cfg.ymir.run_mining and cfg.ymir.run_infer: - # multiple task, run mining first, infer later - if task == 'infer': - self.task_idx = 1 - elif task == 'mining': - self.task_idx = 0 - else: - raise Exception(f'unknown task {task}') - - self.task_num = 2 - else: - self.task_idx = 0 - self.task_num = 1 self.gpu_id: str = str(cfg.param.get('gpu_id', '0')) device = select_device(self.gpu_id) # will set CUDA_VISIBLE_DEVICES=self.gpu_id @@ -93,12 +81,13 @@ def forward(self, x, nms=False): if not nms: return pred - pred = non_max_suppression(pred, - conf_thres=self.conf_thres, - iou_thres=self.iou_thres, - classes=None, # not filter class_idx - agnostic=False, - max_det=100) + pred = non_max_suppression( + pred, + conf_thres=self.conf_thres, + iou_thres=self.iou_thres, + classes=None, # not filter class_idx + agnostic=False, + max_det=100) return pred def init_detector(self, device: torch.device) -> DetectMultiBackend: @@ -163,10 +152,6 @@ def infer(self, img: CV_IMAGE) -> List[rw.Annotation]: return anns - def write_monitor_logger(self, stage: YmirStage, p: float): - monitor.write_monitor_logger( - percent=get_ymir_process(stage=stage, p=p, task_idx=self.task_idx, task_num=self.task_num)) - def convert_ymir_to_yolov5(cfg: edict, out_dir: str = None): """ From e5540762cc04a978d32b4d8f3c5e5c8ffff084df Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 20 Oct 2022 15:55:06 +0800 Subject: [PATCH 151/204] update ymir1.3.0 to ymir2.0.0 --- det-yolov5-tmi/ymir/mining/ymir_infer.py | 5 ++++- docs/mining-images-overview.md | 2 ++ docs/mining_score.png | Bin 0 -> 66640 bytes docs/official-docker-image.md | 13 +++++++++++++ 4 files changed, 19 insertions(+), 1 deletion(-) create mode 100644 docs/mining_score.png diff --git a/det-yolov5-tmi/ymir/mining/ymir_infer.py b/det-yolov5-tmi/ymir/mining/ymir_infer.py index 92fa935..34cd978 100644 --- a/det-yolov5-tmi/ymir/mining/ymir_infer.py +++ b/det-yolov5-tmi/ymir/mining/ymir_infer.py @@ -70,7 +70,10 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): pred = ymir_yolov5.forward(batch['image'].float().to(device), nms=True) if idx % monitor_gap == 0: - write_ymir_monitor_process(ymir_cfg, task='infer', naive_stage_percent=idx * batch_size_per_gpu / dataset_size, stage=YmirStage.TASK) + write_ymir_monitor_process(ymir_cfg, + task='infer', + naive_stage_percent=idx * batch_size_per_gpu / dataset_size, + stage=YmirStage.TASK) preprocess_image_shape = batch['image'].shape[2:] for idx, det in enumerate(pred): # per image diff --git a/docs/mining-images-overview.md b/docs/mining-images-overview.md index cec5f86..b376652 100644 --- a/docs/mining-images-overview.md +++ b/docs/mining-images-overview.md @@ -12,6 +12,8 @@ view [ALBench: Active Learning Benchmark](https://github.com/modelai/ALBench) for detail +![](./docs/mining_score.png) + ## reference - entropy: `Multi-class active learning for image classification. CVPR 2009` diff --git a/docs/mining_score.png b/docs/mining_score.png new file mode 100644 index 0000000000000000000000000000000000000000..7582616606ff9578549cfe72a8dbd4ed094b338e GIT binary patch literal 66640 zcmb@uhdkl7!2oVILu5{nPKYMM$&)>lC47XT%fxaGIt(qM})y(iRVabB;Vp3oktPbse3F{4yt3Vyc!*ar1P$AdhJ)LOI zzFxubopO}jX&4l@Tu=U^@>XojR;3XT*>b;BFtf$~fu+=>~{LlIO-fn}UKOO^(P~*=; z%|L$1o9MPq@HX5%&l2ZyCu(?iL1n@-TZ=>O>S71c3-$ZULI~%4}X6h9Cy^n_PIjD zJ36Qj@06WfJN)howe;OLzT!@s@G#+z_0c}DF>37g6$=VeZu84|;>IIG-~d#4Lsal* zn_pCb_!G1o3+bS@@B8N&Qy?2t7d~y2na3a)EvzRrc|PJc_G%%=PI`GUF`}+b!k!IJ zjvmx?#t|`J_eZQ;d(ZxWBu;q4x?Zfgbeb@2d?(_w+}5EVfmMsUrJZ%a9Zo^9n;32^uzV5+2>OsgfYU|hsd zBVQz{cI9sk>veY2>2bJ6&D>o%F(w(Jq}POzaQQVo6~1}TI~8f;N@ zTEnIpR+QtV5!=;AHk-XFa!U6W968YRPkMx{5rpCqJ6b!3TsS+$) zQP<3o68Y+uhRuhI7?==71BNkK!oPiCt1W5$HGByPqg9a;!Eg{IN=r}?RsB{jXY+aI z{E=_S&Ca;eqSK{rLS?EoL|sGU8}w_#Qm*;JaN(nLe1F*4>z6d#KMNnx+rr^M`Q93q z86lytXF;1S={YifM!X4HInt@v>X2h!i0|fP@9F+j|2CFY`@sWZ7!3AzWiZa%wIPaG z#`pK1rED<-UAm?3P4E>54^J$v!dxWq@6+6{vj9&oFNA?{=GvNbZs?Jtmcps`C<>u! zU~r@Njaa_hRO@)~PVdRlYSmyVc0$ag+V-Nvq(8e{&>n}lc-ru=N%U3buAP1bZ4VER zROvK6)Ap3R`hO;xB)|?t-6pd=<}lkEO`9x({am}Mfxbiza;G9w6%*E+0YvUorY)(> z*OGXC{`z$|)OOZUQk?tncyIk>Y%JUD+Y$01NAABgC90pitb05)t}SO}VbNpm*3xmX zJ?A)9%b%2#^x1XdjaR>1x?tPMPY266OgwLxTVJ*m?M9;AqhhTb9;Raqqg%>mmayGr z2q!0JPX_F{SD(}i0n5f~%*^qyXl;el&s`01v9bQAyF*g@<6g_d)$9WU1K&D225KC% zD_f78TF=jpokDUqa^bit>;BJHT%e zVP)!=A=68t@lniOn3Xy@YQK0Y}VY+4h}di{D1RoOK(J)p|!6Lh63 z?U%N~#NyJDIiiAJK%f@rK>iR^pke;HxrN2rPEGQW!{COZ?=ZWkm!ef^fuqSQQik#?RdJ3G4^Zlv5)axO6l8rGM z5R4SPaUpuD0`)6#hKY@6_h2q@r>^bvN;ues@RRS?-iSK3@2!uKQ&Cmv)Rx;RKY7A| zW%gs5X!K#1muErngT!HB$U2OZpFjTnd$zNaB?S`qAxl3)-UQarg9t{~`iSJ@>(btT z&|5QKU&h73$7)@Aa^b3hpfMMr>5>JM>3xl@YuEC+Np5FDZFQ~Ysq%u!V zg3|=8`&I_as&V*HL>1U;yRmhMvRcfN%GAinNVW&&X1P_%8!pW^LcpDk*12gL8PV0) z_s*gtNFdE;N2`C=MruyvjqO3MHtzjP6MR+NoT`*1?jpI_yi=N81_Zz|cjwNX>9v}E z`{kd7kx@}F+i#Z|eb*n}zfaVi#$Tr6yD{F79k7_WP$3i#qr52`2|SaIiOKi&D2kC! zb`+4QuyZ+f)r9$_ys#qWI65nN%NIbyk&eHLhmyR=ta6t1&oY*P!Sm~L_UphHB%K-;c*z)+uhYo!U876P0psbq9Go0#D z%W*$dxK2lBKQ}j57OeQVg8v-T))Ox=`Lyk5g*PiJYqfR~DAqCX?qeAl8Td3P$Xj=z zFIFY!fqNC_;=;6^?u{9?%2~ApqQH&1M7ep%27tTgISH1S3ie9%*Vofq>*UUJCIX>& zrsg7Mu{X=b+1WXu;kIJ*ux6IH;T^ET_I$T0Y~ZGWSotsHDSKXs!Ro@@ykqvbU+xC0!EbFpTYk(Io-IL_X|AqaVYCGqD^z2l|x zO7F4R_xq<)!)`|FyXJgO5$}RpehXFse|>m9QeQ85jPDj4Yj1DoSqME^F21~P=Er~6 z(p)nIMOTmS;&llO42<%Wl9Jj38eD-peF`?Quz*QmHm}I*L|s%dF)=x;28S)vt6ekY zVrS29?;mOj@@sn1nar~=6|lt2$joeM<@&Qgon_8FBmo5GkGF5%-m^YGGESxAM&Qb; zfx%k@jLgqJ$x2F^*{%8WyX190&bwc3-y+~}AYV0zpZCgGTick<%*b?fUD?@K<;%wU zA+nR`vce5|x!-D*9;b6s930>JGDV7wR`XzRm4H*f37>SVXQf0a9C^B!f`vDR)^`yL)?4r^owMTc-XGZ7e)GXcz1neOB$Mn5E~dCqG?K z9C28^8!;C+`QgKzrodgNM4tp!h1Lg$Y5eA+;K!Qy36c-mTQjxVdrl?4Wm5HUibflI z9?OO4f#tk{f>rHu0T|>>*KTG6G0Df%qurf@g@>7KXS-Dk3t3`_MN}MyJ1{Iu=-SRv78W1?w@i<2`oy#>VKiL_N9ynxKjw)4r=eo^;nH2#xY>tWH*r=K6? zw%3m?_mhSB`z^x;jp_FTugub!B>sV{UDBAXY(?R8@Dr(P$go)yE8-3BS0?JPV|7XDW298XJ?CxNmY+ zJLWo9KbJuA_VeeIJ2bObR#njW5$myhj$UQ3~Vght_mJcamY4oiLrGTYJDw zdi?ou8*;YwLP5y;k51JB2syo}##ZE#q$ej+=VwdR&Pqxb&d>JS>#erfO(N^Hw{BJp9jigNWa_#|!K!Fkjln z$mXM!GCTF2%N9)$gil#k&8uyd1L!jk4*Wy+o2Cwtx(t=GUB5n%KBiB`$dX3K{C@uk zqzxYujzJhPX$_VFddn>$^7`4cXP1w=kEW~4P_e+st4^O1;VJZu>{h2$1AwX#gj3^mIKd3BB1@CCG1k6*5yN;Vw;FXn?ndE~r z?0eq?@Eac{diz<>T+htRG~5%kZfy<@IoO(!UlqX+G)(?*o+i=GVwFkwNlgC>n(!p{ zL?mZ(@S$fp%8Ja;TT`-r?r5X~i~R+nVby{qtM_<_*7a=Pq9y$3iA(c#w{O+LQEmL( zF9jqA-{(njjPqQuCXqbx;tbTB=LYoZcm(^Q`^9Ssa3&sYmPIwZt<)CuVdRDEW4_B^Z=r1qB5^iZmG$7s__V2S64X z2t1J!ij4ri%~c0K%{K;1UY`6_htsPstm}$IcH=wdZ>0J2ODrFV%lh6w zKgw=~m?W}iZdIdd@l?$}pgZD1tbd3soR9$nW-@i2oR|z2$s#>66JlgFZc(a^o`CEu zPG_8*Xu*3BxY1tM*@o-b8|#pMbA+4cNZohn&Xtchu;ov*8R3EEh&aTNV%&kbudg`R z)WTR*v)&G=5*ZcC-})-?jWN_a>3*21m!R8VDXp%q?)=gc$*Z)qMX?Co*GmbXE<__N zDl3~dq~%Yx6ww1e-+L0n7ke@nhV0SdM3hXDAI+B1qRYOKhS4Vnp3VcLEWc6nDlqGX z%bjr|eUi^Xy73y6zaAiWx_tRE59RN3cGFbV` z*i?Jrjkt?h#azMm;gb>9vxAw3b;rrJ?Edr3q1nFK`|^udQdXy7mb{yy32oVZ9TLID99vJk!U>q;8x2B7ZebO{eVSMWBjGbU*IZ zmBE4WZr=1=A3=pTOINjh7~u7bZx9FF)@Q_$*XismL+f0|FraL{Aek(YZXFtc>f^m) zo=OM3KRkZ>lH&EGXeSUsoku^Dhet+6;u%Z1fy1!M%E8eN*%yGyq#OsEJlbb*$*Cks zRaR2E6LH&>mM$ItBOOuJJk{jGv}q}m(y^c5R^CYwu9oiO$1f)B^@J~8N`8fBY>MuW zf#cL2iG@;tMUixop50t7$yO6mS5vzsjQ<{1_~||$V}5=^I;*7n+vej-lBD>(#izM4 z&4U4hhQ_IWUjjB;_P=*~i3$gSV!zu#umOa#vcXhUhSubV@i~K0N=ghU;79ShAlEhg z^J}HAU*UYs*Xd*9qV(utdz5S2s|vJPH^|P-)}=u}S5kku54((pdA(fS{_?ZO=o$9i z_yd+RC;k_nynE!EQv$h(ZcKeKmFn@QQ@tkXyQN-Simd6oyYefZ6H`q;Tk}hbTVO?_ zE&^W4@_Y5vCm4mFs2v`i9PW@nUpkx}SS|$$f|w_F^X5(YM32+v1KTQT-2yhU9 z_NjWU2(uinlYE|3bVK^R?_J&ZGi5zNnm6WjOsc+tNff{Q86%6Y4 zsOkrV&}IGj5qnam^*5xlOvCZq6!c_^s=2{NHC1@iLL`NYI=(6WgO}6qCd+uS z1@iEyf`UlD$pGvUta47HUf$Xte|Efie3Cpj;Wx!E_^Z(e0bKAh>cnTr()e~RaJiMu z7ghH+CT`03mKQ4)CJo%mWn&YN_S>*HyV40t#*s#!GVCYT;4LCTdeHWuZ2I#rt^AN6 z>=buHL&G?D3`;jaG`v8z?muqL1t6z5!PffV<35?qztXMPntu82Qo}Nc>MVA5Mv+ft@X^Phtc_~3PAe^B znz2Ts#^$>*7Yc!>b0Z@!KtPg>nL696b7IX(OEao4dV%fodH4&!X}>$A08F@nrIP#m zYRD_(>~M*;^<(`^G-Gfs9F(B|Cph1(4?90QmHqoeabXPH5UL-?U7Q=p1Hb@M8vysn zZlOtN6X@^i2QUokpwQQnFno9KzI^;Onka#el-|n!Hlw)n2c062wJzlbC4fY5A0!w? z2Pz%b024Soo;tT`JzXiQ5_TE=d^yDA%K()>NTvu53$#1(c0sBh#?K0I1~6MTsHzN; zC4r{N?*7(4*-i}O3)?KpaxE1qEESbMROTE%`*Jy27Eh8z<$rcu4;-!llfi4jT}Cc= zkID3Tb{1JUqudRj(~J6l@<)l&ce{Sk{x*`E{JdXHS$W@*P3$@=tMyFcO55qWn?=Av zDhn3z5homW7EH)!K~4Mu??Jz!x73%@?XU&VA+k97A61FI5 z!9txksQTWU$pTnOA5zRN zlunnf_-HWZWMq7o1L&&8k|VDCCUCX9r*+Z zWC82FIoa~S__`ZQ&>E~rGYbQdSO3|U+)#jARaaL#kJl&Vg`G9XmQd5tVGvaf_ zt>j4i@BpaGChD#Ww^p`VnNfw))~`p3#y)h((@k|C+OUQCblol98u#k==;p7}v9Qq4 z)uje43ww}OFW7-nwjOvDLbPlDyZnoUZdGQpS<%~D6cm|U5Rm79OOY?pDKf6I>dBUR zeN)8VpsWwL41k0Qr6);rfUiqu3%0E%dr5YU;MS)4Xv7 zOk5&u!!8Zo(Z18sFw5m1%`fxVDYY;eeQ3XXB1|<27Pb{Rs|(kLLM>Vwz3Q*ln>O6k zSO{$96lxx1zy6yg@Xzg|z4dNTKxsXEco`tue3p$Vg^wn7e|SO(Dfs;+L^pe)Y&!_l z-&#YCzxVc5c=m4$SI@z?g&?2`|C*?9_I)AUx({>#s&Sv(XCBm%xC0CzcYiXds-2Q+ zG8^cil8TDwm`h!C(7tP3KRr1)c{8XiH8Om3$l&!U=N<5*GBPye63_BH^`;wExoCivc0Ynyaxdl*q-eu zC@MH2pN7;Ay{D7on2j}KUUt> zQZkAoK{dAxg31<<*C*SK%fQ#%^Y@no^)r$|EEWK*DWK@9!1zqLf4xF@EWLGb62OAr z1;5<_{T-O&PY&0H1v;}jSIfCTkd&O@FQ|alvg906#L41?sPpbSikIXV-N-^;sbb@? ziwRQsV<`Rer_@O{1_pXviq_=|>ruH)niGK!{m%SL&(Cgs-~~v#04pE;)6KHDNYH-f zu(_&QB+@csc5ES*uuAOg;oAD7Vpp;~7q-ax*7I)`H+>6{R!j8&4Se7h)Ihcs?~(Q zjmu>lgH{E^NQ>ORu&ax?OjC3`)U%hrhPo`zvCKu6#(? zHoA^U-D=!2XFlqe_^peoWml|sE~Uk{){zZcIZpKptip-v$SL6(!U^*i(I-xrJ8Ygk zpFVv$01Xj{s__KSHd0A8a+Oqbn-Bk*`UW~cE|0uwx5mr)=V=XdoZOug=}r!jR)1Y5 z3`oR`Y>|${!51mLTX#RA_6Am1+$vC*jDGhXy`SAsksdSiLYgL+J@iL+!(nE{@8ChR zrU7Wp)jwA(o*)~(nytG29=t2CDpJN+9P_<85EOl#ogO{2!2pL$*Dm9kEyb^`y}K_d z;D0TegN^N{r1NNAM?io7(#uWpMop{n5PQMNZ0SgOm)Vr&58t3P5$xnbKM!&GxY6T; z_odt(ln3NnNLW$(@1nkShwQA5KKzF4Ot9vKQVr%O$mPo?9Ssejfv)sTyVh;0PvLAk z5x>5o(ZUn|nsoaH=ydYb7B6h=3l8Du0{3fzYBggPXLgP}_4?C76UwauSnn`0Z*or? z!ZoY~7bq%FjJ$2j&T`w5?h@{7LQ~M4I|Z+#%q;y`$=R{QX_JQT=!TO0==|Cm z6pE$a|3PFKN=t#&O zOh2y_c@SfuYnw`-#yI|!H?7T52s>d^PF2McvweL~AF4rL$uMGCA>f2IGhfv<#En*Y ze10pLCLJ3=pA9<7+5qsX+Lh*w7vt08H#D<(N(opqNf@|8pg`EsQ_i?r2ZIp@%4~(V z%o2Iy@qOwrq8KgE!k>@%`S4}cHIV#)w=w~<@>uSs81cGO)z$iEODDG}7{a~pL>vB{ zK(;8=1BEAbk_wkxQ+Sop`a)G+1MA9kiWUFiaYKmX>uc-2ZGgj|cI)XiZNf6!{Vx`i zR`t*8=+|!5eJtoYi{zRa_b}7uqFN6pZ+8;~Pb~1zrzbEGNS$|czsjDili~`VB&S(U zS6iePK;;`BOZLLqPoX)4r|y;zOi_&lUSYSr_ndDm;FUwzsD|(T>LI))+k=3kpF|I> z2{_0@DOeopj6C z!p!Wkj1oVAeY8?_GtMtS(<(9=irIJVWYu0*@1HLUMQVWmn;$mlvi|RIM^81o9}i zD#m-JnaG8l0+aBbKv5Yq`0v(B{PvjpmZqlMrn{&18gs7GzD2}ATXV#@?i51_Pb?|X z>wN&$>$TlTPx1UoQ}bPrXV;|2yxSzV`QQSlAmMFaK8zEyL-V};F|0gBbJM;8U$LLj zELmv$NPy{gg2)jU{MnW{If3qyUkD={`B^q9t>wi|W(_R0E%;NHOXl9o0a=fkZOZkZ zaJ>fj$+3%BMEuXSKbB`7SE=Z~1 zehu!E4MN&YaQMsGm6CbdhCIV>hMpETtS6*n7ps#(V|2iFIX(hZyyQ3Jd5jR@hm=m( z_?yC2TdlC^y!(14vjHke!qrKSy|rTBxs6G>LdOgVj16i>(b$L1sr$bPe6(*=t(G;3 zWA=Re(zH&QxfO5l(<4~eXD?eFEqVfHNN;F7WF()CKv(kt6cP{+cQcxWNCUaC10oG~ z+hnSl*&DeIr5+GuIIU-5vDrWu74t-P&wBB zCZ*@{F{!WGbT-W>=^f#JFJ7v>82!%~qOfM&G*d3q4W{O@Y0JO3`Vy%Lm-buXVyc-< zWK_mmrg7N=Js$A>!M~I5bF5y|Cq2GxAzc5Ql^xg~bx9ac9r_YxgZT7DE$-T`Hj@!%VYl{wkKsds(dQFgS*>z2jA>sxR*B~kx9=*b@|>Cf?#&dAFHcJ zt*iZWLsHUtd}Fvb6d!vm&_TaQgmV+pbGG|@Z66|6 zC#|~1<4ah&EyO=RPo^h45&QO|5}d$8WI~<1zpipJrTt|7Mj)S;XI0vGm=UXj3MW|f zbL$2zf{x&T7`g@SECNkE81iGB;1Vsf!e_8&Tct9$l={C=yQ z69;pSa;G;^!6LOeT@&Ed>~9lp;mIQ>@!}Ii!jN#MhwgZgksBBD6h`}>*hK|Wn_m5U z^QBj**5AU?Hx{uzWF{ZQCg2SM{I>P>2K2zR1)d&1js;H@y`Y?0q#LjEPZ^uW+im+o zT;y5M&gW;=GKLr#2IgB>7ULG4fZ(E?I6}V1z zs+?a#|J}Tr?n^U*G$HP{MDnIuK8G_eHyG|Izj)zkbdJj&tP+Cj^nRq&~P!Dkhmm8Z@9ZshaG!f0lGBj8>XRjht)zMyVYgxlnJoTz|!{wGyM5s zO{0IIRDv!b+iw1Z$fd#6nLdycdniRPG3sp2VVZxe(P&_Or!M|D23K@M9BuPjJcwUpm1{Co{_q{YP9A?FgQmjBEFu zqd`3@JHbyRPBUuS$9M+XvVo3iu1Va1D@2V+tpH-E{I!;xXn|66VZ z+SKmy-T!E{Rxaat9ddc)ww(#jS-Qu;b%a5O#GYm`64WQg!4QMK^wU9_;Dx^Be+^L9 z2ymgk`(g*gt_kUcu5a$0}D@wTG}8{@tY{e0*N zA}m=FjVT7^@CavS&o|k@Ze77%_O`z3tn;h1d+!GU?}xGE|7nfhm}q<{SSV9in8w{Y zhTf^EtDiWZoxh(wATiSa&kWQo|K%Sg694N11vlBV2FZEflqzwc&|3Y~)t^N0QEHZD zvhhv@1Z`Cl0z!N|StL4n>O)o|Q4q){(dzd|fMq=5br6b!Lut;Ea9&Xk+$hg;|5Acw zUx+`=Jha$7vc+xm{x#e9q8^eV{HZ37z?P(@4l5f!A{GS=$(Rc-2ZOE>dMQlbdA3Z7 z9_dl;Yp;9-W_1evr(6f-3pjen%)2imD@MRmCXx1;b&s}WmQQ7M7?B`vbU*BozJz;zqob-m(x}F}h9qFx-I%fjk7o zN+;Fr*ceghen{ge0{`CFQ-AV}bYz-`?it0y71a2+1p%faH1yB|?PTbTQhr|m4mKPv zX2Jfo`WpQE473K)56&s%mgoYjBebb04+cIb<+6M|U>1E8kwk z`N95dDs1(&=NRr?SVoJM^W$3kGtr2dO#u#LY3r$KT&rF$JWbjT?6IVRD(96>(5EFw&v(9V0G*RkpPsk(pF>PQ?OuU} zBOS>pJp`WTJn~xFkf=cEm+E3~Oz%47;&~A3jPff8;&IB8tA8#l5rc$0yto1-QyL%u zG_~z}SqeJlGF9LF#RG*154MDA0r?MKU_m)m{7wo?z&Sn+GP}wx2OFC^BR@?(?2fcL zqflcg;Cz&3gPfU-6Rg~W0UVF?mY5hy;DP6qD$hM6Y$CiWqU(9cUvGgSC|sZp7rMMZ zO;Dfjyye;GOILI_S`TF+boVOM9$rbfyZVZz+3^;25(JR2@>s>U|0Zr=CAAXbWQb$>qSh)2%70F`4)%!Pu*Nq zV82F&EF|pGG3GW0O@?1pmR7gLBo{4~R`H`(l$8q3jvP-zF{g-H?0kn^@b{*=iB z^^%HDVHyCr*pHqWm(cB`C<$@M@u9<+cGU4gFf6{`%zFV>FUv zHfPn%3?a9h9`M!fv{^cy5P;wCBob)js38U1b8O@ zDl0MM0M@ofussV^XfY3%GHz7)EM8BEV+$YmFau;v z+QD@Z{3TMZ-QScV@%U73;rECf%2#*vI_?%{|AT6Q&j6i&vUAXh>1yH)+3A%Chx;;x z1HtXgykG^~(pTXf=8D`6dgkPTB$tIT$pk_)I@Vl8{~1(bIbX zK>M3QHV}ur5OKiL<)o(Got;`Tnx?TKQoVlBGMpluBI3@pG8~D*Y592t>n|aE#rhWB z#3L`3W)ZN85}I8{y5w*UA@l(9x+*WykC^s(jM~S{CRi%A)v|v@E)!G((gkvUc>`Go zT)XepTSavqKYpx{E1TK*nwv~I1ujN|rRPdj?XaW_wn1jbus2X&J;6eE8c@F0g`ez5 zUK=kxq92M>_P#RfOqhpA$N(|SN$I(Npbhe_80H2ed;`A}pscj=yqD}##u3K0sxz)F zZ!A45cgnhxf;^^bMx%JAchTI*MWoxJZ$)LDFy`VeL z3iZtUTlLr&v4Ivyfnp3EEx?3+N6p7PV4AY&+1UXvd4_BKdU)ctYn50NNBQR!`X-iB zB)~+~mW>4Gt`!NoZLzVjP`N&Pe*;ZaWUa$)pAOI zMu8KgJ^BK3FFw|q0Lgj`DoxvXZKf|VG{64$DLq>7H1t|AYjNjA4oT2Ki3a5VYjn8! z>yDrSgvsz`s(Jm{NB@aL!WVfs5=JghisxIsoNV@-NEQfZMI%tFgXI-VgnvTid5#O7mqU|$vO=}SUoN55CsOq7R8oUNT>a6a$)xLTa514 zR97jr%Tc(A?^B>Wwj12|YO%j}J&!z2nEKuA?4M1XiB`jT^>=y&*uEeD zjfMZmh6ep&LeuTtyWHa*2Dm?fqfcp_P*AuuQExe_yCo&_(#c>$CRwvp0~nS8d?8VH zg&&LuG+X|Kfze)1NMB3g%zO%IsguGVNf~Shh)D^=eiw?5(6zs;di2VSY0BOky+6K) zF>Tel4#i|7pexX(eI}Q~`Oz=XUbs;U!57D+L6==wd}Zsq%#JYwxsQ~H-NNkU)7w71 z>Iv?L)9ycX?JAfXbGOKnY^;qb)O;ZN?sjhLM_O_gx8`k9yLbvD%mgvg;KdCZ9{9j) zRPDd_=-!h^uS>STHA!uLsq|>pEs51a^TB(FrzvbW-O&wI6z=c5?jBPeaU<4QsAgoZ zT2TIlu!vy1N$jW90+A#eU$)0n$u!FP`ay}NH}nY~$K%Y6F>p}XLT~#K!?`EPzTg1` zlVh40?W2*vC3#emd&vThlW05h=Q{RO4VWTi4tIiEp>R?ND`Y@&(e|8MsJkJA+?_wc zO|MlL&_yeH@dn0XISI#K+Pc={F-OWA5CTSftYEqBYi;0vDNH_}o;ge6u?|*gHh&Pf zx2pBAIx^|V(b^jsNcI2^wjT_tya_@~mgYkc1>!0b1eJej0TSg~vokWwHSH&9_Cg5) zqKbI#C)~z|=Xn4SoB!}+fj2@oPd19(RF^W_zQS_)(hvcCrjK8W~@Ms zC01XhCclN?Z;0Y7VCtG1a=FPjnft|ocYKL6OC@@HxtD#pbOd;rNR;noS~5)e^0q0u zsjtw;1mWc_Dz&++VxV6#Gztg_i!x0Zqi7v6ehX!qwl4x(6Zz|9948 z{`{%ITrW%9#MMuZ75rkD=BRvxI&s4N`qr|Y?@AmW2#EBHHZ$x`X0nhp7aG=%rlG6( zUV2-7dHi}~v2k%y{y(M%n{I+0|2-Uf*#D36C!9L{A1ZC-7n=etC)Rs z9BvflbEEyv65?ut{Kwq-zJ2-Ypc-drMrJ<{pCSXs&4RR*5_eq_q|B-Sr*9T|i;rfQ z@&wX6ZD%GUK=5I|Y{*R?bJYHu(r$4|(~f?(JT@UnpW z{8`pR7h~29NTZ_0TQD?o02yK<&MSagoIkjk3;_XoK0z4U1@EKPbG& z6k#p><{_X;fgw%#H-X9wub)drjsKk#4joC}Kw13yAhe-SM&+>gh)Y~pqk_L+fDjP; zL5k8%Ll4MB_J4l7z(g6JOz?p_0ek>KZ@AWl5e$yK{|8;)pRb_}Dh+-~&&o{w_Gu~i zxBex@`25|D^MFTIDIRG7H&p#z56H(3MXoZ%3^qJS>Kkf)ll$E)Xjl{9?sJ}wR_yEL zLJx|`ktI~}Po{lQ)h$+gjykpx7;0Qdw$Inm4F#*+!+d}qGs z1u;qc%pH^=KPGA6lU0uEZM&V@M#W8?0~rbIFCT|jqrZwt4V$g{Fj4Ie&`ZsZi5+K% z!!HIXbGq*DJ2GRZ`sEVtGt&SFQf+Iju7BX3JD{=-%G*z)BOq&2ZEae%1T6YZXP4k? zB5U~v{2A!0TxX_UIS+S2gDNkWiJ!$bnATg@s1}VVVfct!WQ70ppnSjV1JQtMh@?YT zo|Ka#)`=Ws)$ffpO1@Bor-OTn1AXhY^4@v`jnDd@J`0H=Uj8>KMiY10Ab?E=>KipJ zEiF{yU0%kUCFkZq0AHqSXzvy*88E583$Yb7rrf-d@!#uR(A#JZ8&|Q!%=zF6v+nBh zjsgC{EErc$w?S!(1#W22ld@9QR|H^_5)N=4x6@BKQjGd{1uBlF_@rf1j4Nf6k|vVJ znfTc%0pqTxE0y;UKn+k4dks_q;WNk|Ft=(MP3Rm9{ymIM;n{5Q+SPgA{=8}T;z1ZC zvdY8zb$tiaE4Hc~li`8pmprRKZ%l(9jvO+OP~!Dm%e#+G%o2zoa1DwCja9Q<>(?<% zshndX8TcbMFocVC!P}}WDYZ6*AVG&sbaPLIe5n8%av?tdc|2S`Swa7r33 zjjey0FsrFvVmFu@`&C!#{PdQzc$GT$OG3P#Q@=%nMKVqSCnbiV6vd=qA0x>`kK}b3h}X&=CboDLFx4v9KhYH>DBwP3 zd13kpX6e8A=G|v)=h>-grIl(xOrxf!SJl;}CeWS$k68rJ`SOAPY}NqdUf}aNP^K<| znUuGHt`CNn0NshM9t|?^3*qa_QihmkjKkHY;qObN+I(+U*BG@1Fjcjn&04`x{NwaiPK!C#GuxJ3mT&7??b*eytZ*76$(ShcG z9f&IU-3M%-XJRz$Iv|={FnlDUmD+|yqs@+U0F@#kQ$owTm6`mruYNGOdj-nZjycMU zF~Rlr0NM|eq3X`T@jl{_`)4F%_|nPhot4cCJ;E41NGrsM*ltJMmq@@{z^+2@>`BFh z*Nr96YzGa+28Tr+d5hTqy<9<87UeeI*5c3`)cpj;9-V;2cfkC_0vG@S#{O&o&&;qZ z7jVm(Pw?GLVJF*XR{;X9G;Wfm2a6ae_OS?OladbAwt<)+j3J6h*z<_(d)9BA``RgO z#I~NJd_9=4)#Fev*w)o+WU{kG!z$B!Aj8WF85A3D@K2@~cdf_?rChq^_#3nd1_Arr z@d@-?vZ@LwEowfJM#~07BR;F<48rUu7~OcCltgsT)|L%`pmrw*+mtM_lISD?j$hWr zFBAy!u8_V9$~jzjOR2xoV=~Sp&EVNlaI?V&HX>WpTjeP6{X1pWmo+d+&63orystT! z@{aG@T-jpZibd^(7e7?`)|9N*jZkna!-uBR!7L;iDQFVi6rG>k3!8W;s`i|W$|(5( z0EAdf%{e0mMDHarG0P2*;6|CztT~S~6S8`fHdr_I9Edih>8VTDsY0rYx-4*E3Uz|7 zU-+V-iRa2dX+g_0-tuO{T0h^1t=z^}ATRpT*pf+zev2*-EAVmk4rN6rp(a>}GQl;l zT@R*nu8Y5tA$&YS$XAxDFF7&{UMh84hk)0k6=8-nobFhGe;ddRLqP43EB znAR3r`$i=14Hh)dDwCTb8RD%$5H=de13;9d2YB_%garG3mMORvnCN%`rrbd3Zqn#o z!pa6XFx1+huVV5z$Ul8CGV*s7t}9g2W;$c!E5zE!q4tVHrl9+`08?fx7u$US(2HzR zIE$~NTiY^O1PwvSn!G0Uc%^xh-V7KCf}a3vxtQloDH5hU!33%jFlWGS%>CbMd|7+S zK28*Cp03CfpK045iTSoPuOu9H9ZDDNeWgx10&MazidvC_8Ppi{lyqLaMUqPz#JR+WM<@*rm}R^g`!d{P#_c`f8ev38yl z>#s6nL5R59qy;yO&~I_Wnd$Lx3XO8yiI{nmvLn_;icRrxPb^t5QR4Vco1P*J+9W5R zI49Q6RZ9DHLU z0jCexhd$JRh8$z?HP(`rY>N{1qtx+9_M_uW}HJ4;ph^OGGX)*V2|BmPlSj41|NC6Am< z*2_J5tRJ<1gnD#~@AuSlIDBa{rVRt5=mc5yK5aR`!Hq2TrhDgS!giL zk~$vw|9E@rsH(mwevn3_1Qeg7BBC@%H(~%Hs34v41f}EAjRh)#NViH$cZW(SU4nG; z=U@%!0OWrKJd^%MNgtM3;>r zKWSKiGw;1d&4t|JiGwAY#DyhHks+IAJ+P&lz6wQ$f@{*<$Q2?I5el4^!;{C zx1>jQO)47g74xVb)J)tVa(XdSGj^vgV{Ni=15mT;&|!@a4LzS1;cM>)JICX+V2T|hu}CNQ)8);nwDfd8eHZan0R}G z4flPT6G7&%zX1PQJ_Z3=0geq$;u|bPf^T9eEt{)o+`1(-yf^&`3Sb>(S8eOSdqCC? z3AH>F&t@c;tN-AGQ?Qzr>Uh-#aZkIpJjcd@ z+8chhUX;L(vbysAgl7SJsNV#tKA=40Ek;AfqS|h$)cmwdU_*+Hx4b04#A8LH(rx;k zhx-8viuIRzKraL4q!983l)3tLYI3sO55kM*Acun9Gmy0}0soYkkZ_(HKrj=O?tmSI zLn#dxUKYXE>{lg-Z+sh=UZF)@#L2<0@Kwo%KKYeeky_b6vWfolkuzNGz6r+XgVDA( zNlvtOo9Ac7G%2+e{WMHi}R@H}W$@ zp)12_g2#{o6p-A5K2jEf>#p|I%L`a7A*Uk z8?X>#+7?lV^Ev;m&hAPP|s_Nbu7v!CO`nm#m z^1dtyS@31!tm&h>6)xMU%1I1uJ376n-I|R9{xtMAKzRiZ6a!y8fFDF?zBN^sjJT2d z+8uXF9lkPyKTA#G}z#1BqiD4D3hMPCvxsO+UAD4yQ?LWi%#}O3)2f+7KuAJ<7 zCHCEKPdJ`a-r<|FoP!S%)jEakCt6{P(5w! z!Fxe4(4+YHJ~AjMN>E=mI2iO5xK`i9p!X#V)efn@`j>8PqzE4GwTc5+w0u~`@!55V z0(7Nn52eDUnVkNqX z{sTK#wqz8R>Y=_p9yYe4H|X(R0$jpFT#>Qad+R32o(r}KwW6f*_Gs5(s7~V#%Z1`N z4GpLGyWNi>d1Ad{-=xGYOTqO?lZ|mye9BkLui3uYOg0o$!s+0D_~SJ=8G|!{&-h z5x-hd7foQJoPL6Ik#YAjE~S%yKgaL_xW z^;`M2U({CwZtNtwhC zL_fMe#z&snxl3YiDt8tr;`hu12s!OhlUEq-%5$TZxoDPAL68HjzVPYsC9YhSN4 zts@1$e@{5$oR~*}8?ylI(zA*fc2)g2ncS?;7v&XU4Bc*&resOy}y4|8d-c zS2C^BD8CkjIk3OKqTv{n0``A&J)H;`ss> zccK&h{21UVY-ug`NM|9fV4D3RHc7#{ASz)B)+PN;B$QMq1NA8 z5dmF$4mJ>9ZfZax*5!Ol?5%8XclLB`LhBuRd~flr>u}nD08oR!Y@Q3`Vfyetg-C_c zYf;_!yCE11315SbuJFSxV>q*@Wf;3N_CM7szL!&@ruDo>blgLlt08~1@ky3tSS{pF zDJd#0Wy=G>%Oya9n*yHJAfYC%T{FdntFK01a>Y;r4vFv{BvdPW+J$C8wucp!_TF;B za~sKY&It0kFJ_Tzt(+P2 zk^h9?n8X3U4PW~En_a+$5uJUnq*ciMYNdDa`Hx-)NC(kUL9HaRpcx5pj8C60-jdan zWvzt?H~kB&;S9MPa2`i>7mC zO(rgg-cP512!!{d0Z=~cTjDo|3(IFaIl@VaiqTJYSOYB*j~#&{@kOAc0mN73`^Dh1 zt$bCUEPlWFT~(|kv*;jzpWc_L_IET9Cm@>(;ACdyxIE;dsZDyP9dz$(SG23d$}}^? z-#-Wphh^>cMAcjy2Es;TWjbSY`*Xb7Z1IqPcB`vZlhL=Iw~rX0S(Dh+XyREFl%IXz zuEy{CxZBMQXXzt9fA0W8!ehLKNGIv{teUAzfN``0`jOX2Xv4mzHk_3H$+0%wz%-&} zKmB`+QF+k}+>FoX>9re!n>tnTZr43T=Hr2;_jJjyUsbq7hPMS%O>PY`(H+}`*mDQv z`}KJimMp$9O-fq73R+)ogZ8v6wDC`L3xeM03TTDcM`oq}Z6ZOFZQ4ZCjs>=&I1@`}9AWZK5h#@W3bX^&u8L(X&qu8{# z_%Ji&88x)OJR-KvFq`Ptd~ePu%(0i+bD7Cv3IYY6iWE_{Cy>+BWER0j(HOKqvAWlE z9-62MbB|fMqJ>DF-|1s5FA7M;sFF37Jp6)DeXvB#h5NN8`p*0#$e}=MHZQFUM{XzX zZ;Zt~XOc&dUrmG}>ar4k3*9!;)+}KQYvHx48bL4}av@_gk)T8px6ogEi!!n8O~ZZ) z&D($dTdRcE9uJ9T(;p2qbmv$}ew_~-HUa>=s{H-?+&dom?s8o3VR}?xFJ<;Ot#QJ=_90cUTNdOX$_EAg2~q9ko>7@Yj{bk$Z}wx- z2FN?UzhI)d)m(Z5SR+Zm1XBTk7Ho~zxdfAFuSOUl`3Ixxy5C3j&lhn*{2W=u^A>6Xv zsQs(tHq&-*NLxwo+G9NjV|S=bGg3st&H_y&-9*ObJ|?EpW%H$%o@D8(6MQ(egvH*A z+PGwU6*g_4yjJAei=kWi0t;hzTL#`wj_F#6(4l*At9(BM@H+!yV>s6See-^c&fXQM zNFs~Ekw>S{{8UMYB4+3L+F<;z%zuuvY0n996$0Lkl?vMxTF@%n>k0l@3*NjRnCM;M zLjHh@33-;JP9{?bpdJ$L^`4-Z?~4gHzl9*{_Q#y!nE2)ME)6IF*b7F)22nKRl9NMR zPT|g2k}>tmR4!zt{f?>pDV(bpu`{M{U%{6{U?Xfe(q?GkX63Aj%se!P^Y_)Gfg?*L ziDD)j?wEU@Q8Yi+mZ(ufV!=QqneEJHzd~oJX08CwA@Nz32~Z8Bb%TF4HdhAya)Q`~ z(0mjop9WkabilZB{xPYF;WvE`wciqb*n(|wybD7qy(m?rO@QlpWcNHKBeSLT9@5bj zQd(MNLdSC;^Z|%VBki$d35tAM3nF()?Bge@?(g11y(TUcbcJ2`vo$yRQkPh;N-KIc zwC6P&)jmeeJmUE7yC=8p6xlx%o2)JrlZ|MXzS@kPZ+$EUC~dcEoReHE)Ge7s$+dqQ zB7^K4u3rjB6q2XQMY*JlD-v5+Me~~y*mB!m3gqWEwk12ie|0sq$Vqwl@H|Ubq_#!B zyJ*6N3W_{#Za=uHo8A~Y$i9o50-6taL{26!LfyxM&NdJ4?+d@n=V z3d)&1>3^@z;a7d0oE>6$+lCD7`POZlaB|-oM*k?y*j8tX{kb@|Qg|?6Ua!q=E@w5J;2MB%Bnb}nTKHFe7 zMl?K6Xi(@wigw7EiNs%G*~$#5+h%un!j<3Ie9LB5JpnG>L@2DDOCTm9@0Q(ovvd3_ z^hNA7rS1V>}edLldj8(f-k^Zl!|R6Y(J4}8uutUxJ@?1SIk-RaYf~sR@#4z z+u;4A+|tk!p5fM@h3$d`BOX`~7LKyYn!`E`_6>G_Vm;;P%ZvQgolVU;LTf+W;|1a< zh|B)9T4w>6qSD(!>`ULJ&?@N7QU#x-n5KpNLqWrf-<@(gPevd?h&`$$JNcGSGlXMX zj{}$e!oaK5iUf?*F$fq@{CfSl3)#GVNo%m((-k4oKd#$Yjz1sH;qN?{ZH07$iYmqW zv`qaNRTRD%E4R(if(E*qsf~gOf=<<4VfvU7{It;j18bWYz5*O?-WI>!m8h<^U-VJ) zn;gM4zNBEN6eYKp+zg@%pmPE<*&tHVpI=Ic;PPn|y;kz=vns5_T&xij^tiP zi~@-Mo=@k#Z={g8Ix)m8xm`ofwBu|>^-HF0t~r;d^$3NKEd3cWG7njVm)nhXb$AB5 z?SVEK2Z7yQt_yQkp_%8hR7J;Hw3?R1unZ)u zFJjIq51J|?e^i)uwple=WR%{J6H-oBsD=4VJ^+g?_OOoOEN5$lRHhP3NTAKHH?IcR z(L69us0om&pbE<;lPOUPpu+yR;O6QUkNGcG2;cEjZ2}C|hk|7DL13cp2d&RAG`*~P z`qFn^M#57xvV!B1A)11WO7CNz;=_XpnnR`_B$A+?XI&G@@KGkthv0NMH8&DioLs|p zpVTWT)wJvv1w2dNzdM5jMW`pgJd6>2>u^8GQ7*K)mO4EZ77P)Po`9FPx?7^&Sn2=n z%=EUJJHcfRWVtjXLwXbj@z%+P4e7fH3Hc2ZZ~W%8m-(`vcZO03uq|?_GyD$bOt*n5 zOt#P!t6>*x{`g9l4>AzMOC_PgeP4O9!xnol!49>20_LI2RN4h7nF3hzHAl{H&LBx!xS+VdUyq7Yu` zLIIVD<#%1{xpv)l!0wM%Akw)>@&Z=A2?QQmt9Fofpdh}v(Vr034qcJ$=QAN;hVE)1 zbolz8jb}%BeEFq5&yM7?TTy=#5`N>41M?04lde_^5RN@^T`m>~Cu#h9P6h}C`E1lk zur+rK(T)F@;U-x~sFM(|@5k^3!DuM7vm0&rT0+8NcR6F5xGR8yGFiXiMfANlRLqPj zHBKaC^Zx>92CkoDW;Cd{P7_RYn}jUA1aj5(bOQg z<1)+{X`Ywz>rc>@HP^tM>nEIYaS8uU4u{yC!@7KMj_<}M0#TGkr|~Y;w>gEoNpImv zy0Js(vl!L+xgO^y86G8>`5$Y#D~<7Y3sQoHFN^+$Hcr8gtyf$e=T}8hZsV(Fy{s>H z*~SZB(Ea%%r3*cy@cH8_EJdJ?k?xEDX!cvLL00@59hE1gV~d@uI=mQ)%sHRw^XCg7 z)-*;R;2BVYk462j&!X%x(BuLLHDG-8IE)wm#87I|~OfK5; zmG}#>`}KzfH~$#=hW;+Y!x7-r8r_ktEeIQ47u%I+$VPyxqP(zsBUU?u%&;rv!wjJ@iZ2nrLh!KAupq>wF#{)Vr3?f15458<^$Q=_Ou-`~HFlrC^s zqZH-|b4u}O(6-)RCALHQ$H@*`zw7^PE@M!RQhwzo?Vv^1~@~?Y6K`{asnh^wXi!Y76PUds0Qx&Q#Ul3bM zl}SKcKBh#v%5)#08g??B%9oPzg9az$x+c2j6dBtLY!zQh+uCAKG`Bn)L_vlZslpm( z_>oZKSI6dHxr{O@P<+iaemBXk%SMKTu2%uuOvhhlBGBy`TgPu`)MZ!iY5+EjlM3|gjWwTQxngUYh*$lcsyjfVRgyZZu7${q$ z&CBO5AP})~g}b7|Dk^w5dSmxZc&;u2UmC=^YXCj|%)K%N8Gf}gSq zzx(2zq4n*j5YZbb0bq~DoA`CDVt1o4!y~+6C3ECdP~}Y7oBBnC_Gu&QD}I;{T4e`( zGFhV!o4y{3GlqOq*JEXjY3xXMrGW#SuNEJ)h#VBYsK+);Qd6yoK>srbdRDt0ci;C- z-37#Tn{+if6u?PP#LC=5$nHM?^zt6y7b{)E@D&g(t>cPQ{LheomAB`*Eg_Psrb@O% z{Y^12=o@;2jgC+44y*sHNd9iUZ;MC#z8wuScfaye#q`0OAU-hGt5$9&cVPPZZJJ>k zvel`Ae3}HQ^J=|~JB)lrZ$l~{kvN<1Q=>-H!goPz?7}Z+@&bJYbOjx`>>fX*3y}j9 z8w|nx5G4M3$4xoUrb?N=&=~#koD`Y|q_@J=d^2xanQWO_IJ-m=QGkv7;p7lY5og3V z6;EMU3S*GZnM#42_s)YPCRjgoct^q!;_5o)Wm`wnz&!#|#W*ni*^#x`RciuC{EDsg zhH&ya)R$cScDW6XD14I6@j7|GkPg3&(k8FZIKcFVk?HG}s-g8-9q}-9uNO7=8 zza$NxCIA7it!2O#gtl}5hye={=_AAUkt2-|7S)T;f8xXRTUYB=Bmj@|{P_yzjiv|y z4n<04UQK0;co4IDxk9htB=hmaX=* zh06Y>mhH3w%i(piOx*r|OE17sD1Y1!gc=^l1LqEYf31wKcH5GYj|H?fH5(#nvWm3j zUBCN@eXi7^kbIl?MqYD_#|X!kJdpO%4#Tt<9uMSOB^NER$z{6i9O=7)GyvxBjv#wf zqYlr@%~JCoPeRUv@o86Gu~6M7p!y9x@JJ?Z<=vL*>2Szpox9`zLOcCM$e+%8hU5od;Y|uBx z+$GRw&sB|1vvQkrq`Xqvb&cdSp7h)Xxz84@!VqZkA|xL;5woIitfgq(J<1IP*Q}l( zD(J$ayCP4%OZJ%@IXV+U3!ovg#MXgZ5vKunRDFk_fgkU$SX=8>@4y?Twt@~+0q!hG zEIOd9e;|7M##Rp08Qn5g6MvY6UAej?*mp#i^0tTmrJ#1OF++;iC%*8!BvvSKH*zF@o`GQ4tg!YlqeCh298 zHBGs~GwE%yT{bKOd3yxeaxHWSXc)ipBN|NeF`!IU2Z}tKIx~~>@_cD%0hzndk?Sg@ zK~=@&#AFrK8#HA(V;(qetv!0^*hUjd5&&D=@{vOrki#QN0=~iS@?Z8_grw^yE@n@~ zK)8ab>Wj>&9u{OI3RjhlQ0Q+ytk@ZA81JwFo}&v>U#6^bdqJ~vFuKHk=P5XPVIIEtRl_Up z`{~?d5Fe6|QFxXcKYX(ku6%WGEigpzatR!1=_!1{KlAB-aZGwQIsKwzQ)^A6Zqy7! zN*3b70pX3&Knluf#AoX|@1-yQMFvD}X3M>RqjINi<-bz#r5w77e!3!4{z;;T_Vsqj zoaDeI$!tbN3pXZ1eU;C?^+K5iXVGLq`Yw{sCB-6~TEFeJ*I(~QzN<_RDro@rG{1DQ ze!{(RV{5*uMX%ZqzyblY1Cpm+Z8#rAe8>0y$~hk61=&@f5EW(AtO5xXqQPx-`KZmidD~!K+Zm>PraR+!P^~d$VXIZVp zU2!o6#I=GAf|CmNUsKS^rEIhwzkEZ`Lk;3HMaN6p%aAAU(4zFOM zED-RNcGRwE(#J(VIAi+;vvH0ugY9~5ug;hN)|3}ow3OIO4Z*ai1hF94K~7((;JKIE z%Y!!~CXyrEK<2ErNb#ivbHr|86}$lFXo?M}1-+3~+wa$Xsj>S44UuQnlEh0A?5t@e znvoGkULW8iY|F_TLzAB(fczR1`t2oj!zHu<9up>f5HR^KRE)BQ&qyv0Nh1BvvVSNH zSQ{EM46CX*EJF*lV(x`E>c6WAe*66~CAU?U{_+DBi1@fcrR>F2qRl!n)24=w!=!}- zDR}rkm#mvA&ki{QZ2rGrGRviN#KQ-&z#IM)nZRDu^YO^#!gK$~JP0H$vRb7YFrgE( zDeL^UjC?2sye^;mS|vD-4V-3>ejT2*I9yRe@$f1HNU5-8`LFR!ibbbRjHhOe_a^TO zdqCK?V@28kFFTDamOjf8KTi1XC$bT*Mo&a|@AQ>U+=G`$#nG2;0jCrq{l2$~eML{rZ}b)Qu55ku zA`57!-J?v_h@PQC>xt}WTR8XKtD@E_&n6FnkLkwAnG2L9?sx>YlGU^2!Vlo@)$l8_ zi`;TF5rMta*Z()}nU^fAzM%idFjKAFKh=6eX6651mYv`Rnl6lfAdhaT3E*FVu-6bt zhHim_UfAx~d;@d9OYi@kDauS~^hu~{18O6UzoDG#y80wazx%RDz?@OQ9<=Y@ZD3l# zY17`Tjq(zDe_zy~Ph&oIdF#imor8wEI|pyU=0+yB*SvEAgUg2Nhr95!AIE{zA(q?~ zcgAh+P(MP%_YAY+8=RcoG_1+xI(aF@S{7c%Df~Wy{ep}jQd_)7Pv$lx75fUkcxt(2 zi-Q(6lCrbHWqHc`h#S8hcuwj#)YyoEC7RcMEIy-8_?V#&u59)OvnQ=BB_D(6rAULh zA1P*ECauy)bNlziR&e79cyNgDfB6jh4)gR)NHIGebtf<09HV}&A0b-Oye&169Qep6 zUa3FGSHR~=o8j0&+)4z_bE5r?jE5gK*fg9r`d8L3B_vtsoY0O{->DtK*PE84ow!P%!H?YD+u3aI~vUERMtdqsDeB#)rkIH0H>;{7~MNuVYQP zhj5p=^vs#9T$w+6qy>vQ0c(%4iREt|eej*Ve-I-&%3CpGU1NoxQXab?CUQ_=Hx*N< zEUWcJ=GT5cPIqj=p!gV-A*En>%8v?BW}PPKZV_KwUXu)ukna{5^@cDgKagv1p&eeK zZ}{e3l_0YsOZ??p=|zT175OJ;c-?N$;AJ|7i_I>{WQB{@7|`fv_jNNhNv^1xM*7|S zRe6Nx5YApPGbc+!X|n8;j&=4u_L%>L=TN)0lv*EL=ABE2eD|$VQEw4z2QRQvoVj(Y zPve6O-1EXHJewm!cs3}wjAKL4yxO(TZr)#5KC5M+POexPIh{XJw61~< z!2REb3mZq|ZLOHt#Fcf~0w38iCx_XgCg%kCa)+E9$m{DO@*ihoe3A{C%8n-(wVlWPDT8?y zu32w86Ag&=c@A?mZ-YAR^^YQ_^f5;+{|xt{nUtlFBYNp3VV#CZf-!dNOhQ(X*WF62CYb}JIt*dpdym*4QysuBDe6&xkd{jiVyzd~28Z`lawr3k<9%fz;xJvRcH} zB2Z-A)81vh(yA*Nv?Hq{T76g4t1mDXt=A1nV^d#Y@jO*6N`S7E=mwE>ZrkJV?}~kb zqVoZdv9KVH=aQw$2zN;Nf4xIuALP42o86uYHC+@nOkB83 zyY>?$K)Wl0oxvTuM|s!6UYS&{m(#F&;CpH(m>1)8+dcbng3i*{|Ha{;{3^sEq~*rt zi>T*Wo_#v*FrA)f#gWF6WjcgQw4($HyX;IHtdg^54Ge>3y$rqQyhbn288Aaz(_dtd zXMyVP4a43X&KwG3D#`%ES?kV;4z_Tj#!1Gt4yC)8Gdq+{kKZ@IG8zATPPM~{1Q8_!^LZfV5C@ag2P6+*gcZ|2KRX4LpY^2Hk ziv?Y5K8KYzHhD2fG@1sVZ2OU4c2>^?Sb@01D+?m9^0|f=@Gm2-&a3&S^jP1SUDQ}T zsypE7$t5ROPR^oVsBWTN+{PXy+DP-Pqn5^}*{}9@=?WViZNI?NvzHpV*TItkhyIVg zHq1CNXs>6<(x6+eSvQqZ5IE-Zv-Ou9zuCZ5=o35x{?GyZ>9V9)T_$f{aqqsAaa2#% zn`WN>CJ(U;TnqucFT({vBX`XN9Ajrq14uS*R_XA9Q%Se0BKJ)&cpb&pkF^;#&hpHl z^UHSBUYX8&^7kk#$86e7<4Iz?zWmd+uH=tgYP|daVxR@N?PrWs zk9>drQFaa5(W>4MBC#sWRusJTkaYrFI$jwGL$uE=hy-o&(lyIJsn8E3SGnb9+^Yu> z>2AtVq9oD2MmNqg0e02PxW$?SI|a?;?W%$$PrQsUQYq4}M@R=8{eZzFy`G~EkzaIo}@@+Iduc ztGjAsG_y#j&_&<&87=oC0y5`<6BVo%sU&LFg?|2J;>uJBPr_D+y!Jy%2IW4kT$F!y zq0GRZSHIB8^4Iuuo;=9~Q*X_&T+9g@OrwOSo_apS`qJ1C2jzu6JddTr%KJWQLyHd^ zI)naBMD}K8A-gjd#D{idch-YIT!ZY+$CdwccNU!yDi*z0dPK@ch9a(d5g*DRfG96^__ZInTy`O-M&|9TWh@{ z3};UF<{TwQrWY{u&UVtF3=Uq_u5Al~1FlxbW9zbL-wFq-A_DWyD(Tam5R2?g`mix< zU>Rp|3VVVGBkO_#;ws=WxI65Gr|O%ZubA~OCug4y&rYH>l^p0X7A@=T zwB?%q1OH>T*(aXb8%7Bc?SsZQPW6 zQ)@bQm7z7%9v@}!>Ulu=oOdr{?|_Q*d@PQhA6j~zl|hY%o5&y^47XZKe?mj0*=IzQ z$XDsdT2?2hmkS96=<1=+Oax4})`11RZQG9xV9{MdN8^{^U?)(IrSblewh+aGeM-(f z2%@T3nEsNm-mXI+kbyJ=*4+QfdryU!ktFrb|6eZjKk*oSdSEwV?PxEHIvYyB#{d`O z^XSVPjAvL;@#RlHdYsGqKKLt~`@ZE~1tuZ4WsK~;yn?iP&4@3Twc}O~>_70&d>LWx zRR|~c^rg_DMjFs?rO46*_*8}9L-HvP{yx`v#o#U3CDr1n`K-(*@F?Ky@X7pyK@P#a z$wwoU#el>wwCLkG{n;leYjlBer$5qSK5YOF*w6?;n7t)FpH=3T<$+q2~pG0Q$ja z{uv&0iwC+z`uPL)hDgJY{6X~Sw|$4Hz!^%{-TL~U&@Bv@`2gM|n2H#Iq~|kJ)9{*z zn3!fDt83*yBLSg}3I4`s|9603ojsQbx6dHz_#@285jPwEe?o%k_>U-Da9E@U8*lHT9D@U9B(QltL6Yw5qw$O3?EgITM;7!nnw2nH{dGKYLMhwG#=$Gcbi z4BWYKz!^5{V=mLF|DG*-AiB2yEbDhjSgz z8B7@qdaa$*{)W}w!0vDK4#enf!?x^!&qX-lllqar3YvA+6LTLEg%r-LO4(Ky30J7* ziPaV~MEXY`!{HB$V-|={u3lc0qxN7&G;B!3@77;nHkT}_)hc{J1*#(WV?Y7Q{#1Db zagFK#SJ=Liz!}moGoHrb2_T}^>co6VtiP>&RN|hN%0==ScSzecRbX!IA=Xr6OpOE;y|cDoW2W9%#J!2%hBG&U`-+OIAixN#RPYSgr6D2~526L%f%OL!J-rgxM}r^J zc&M7nxQ+J#Jvg7(xmy|-Fd-6?K)~Zny!Y`Nu)$zepM#$U*K@?z7}5D+=gK^)5d(uF zR}d0J^nw&sRM^1z9X{6%aTo$VaPR)!oj{FgS`-!2etr3IMb-TGjG=H65|Z(kq&&8s z7I&15`5RDU4A>brizPYd>DUz;TkH5q$+n_+CRrQoj9*)lpM(wQ(J4ar(hWCNR3xFO z*HnY&DvMAFtR4fGaYCM|O;zx!QS$JR93LM?4B@?w_7U=NzU|k+ljBB9&|eq=*Pu-> zng;EQiHQkOSahY}I^3W8^wrAQA;g+F>UsRr&DqYv7o9J`@&&A!o4{1FAyd1+eDC_X z^s;?EqPGxL6{>bXcf=m-S8Z3vesCOYFAq15oY<^XPflev9S3=5FL@SRiy0v`yK#k3 zPEOv9$hB4Iy{ba z7bz%Ea|#)b>mOb@*eW-~LbY0II}S7StMX29`f=4fa1IY?&^pLYg12E=(C zmW-K{2#6Ab-Q;b1`Ii^56mpt0+uB$hiymItV9$WijPejV=(1n^>&btLAVjX)j z{`%3hT;xQ_iQ?=0^r$34L>3{2oJkQ;kE zRxn#P|ei z#BGC1u`Mv;yZ+6_%(Libl$nF!vT;~*&r0!iD4r&M36bmZnK8C81_6mqu^sK#2Sra!#|X_0F@;P?&;HM~Ov9RZ#3kv}Ax#pq?DCKA@|OFG%K$M9@<~On@@rvoRzY{v**t@`f85D!2GaEISSV8YEh2o z$3g3gk9Bo*N5_}$P~16CR|u`0jcCcl8O^sYSmW*%{@}UrOJ*^);>P{2?>2S7dCN+J z9~mq}f$Da60yx+MH(kDB!;jGk1#hi??ibFVU;TYG=+ToWbuS4?U6zXavV<-jV z@|>1*m^@a0^iD$jiy-B#%+)XJFL=Ju#F@hfMz!BFZj-lOoV(r&*_HIPv=_te zzkkm@*or%9;_tSRnR*t*bP`YPd&^N>s%!);Y)kqmkXL(+@(Woc(IK8F`KWEUcXU4k zR!{A<`#xuY-kqeC(&K$6%zQCsDp-C!;pET{v{Th|b({5JU}Q-Gkb8$s7N74$$7KW0 zEp_l&9S8+=@yWOJ)?dDU^-VaOx=yul*zSG&3uXn8Pt@9G_d^Uwq84*HBerUb1?~2m zo$}osU&6w#iM!pVB->J2zEt6Z^-aLLUcd{dA_sFL?kiSDXtSQQ$B1w7uvhyBp7J!u z6InMS#Jw;J?e@Gqo>9=X(|DrpogSuD=)jXVIJ>Kp36<3+(PwcZIb?h{B1o{pI&Rd< zT;y-avJ@LGg=n|fjy!hA8mkwFu@CDdeeh0!82q%#g{~l!`KIO_o7dr( zz#5RS%W9N4=x-mm#(Ox7cy7@NaTr&D`h@qwaYXfI@|Qm!Xyut)Sq+coH6>7a3Vjwb z#P=4j@%7**Vm;s5Ff6_6q_~qL;tt;CIy<0nY!Akql%;L+^X9a?1}*Z;o^|n3?rCXh zU((G+FD8MmgvId5*elBI1Lc_N3Ep1+)*-`<^*-Jk^4Ms7+Pqh?H@e#(!qQ*|a?YTW9v9WEm9((=mI-y= zXfxoXe&?%kI>tI8>SM|ht1(NL;xVW5>jUA$t00ZI*Wd2d0QPfTDWo2v+>?hA8PmVi zKZs#wUb8O=Kv=WKVAy$hMsnQWGo-!s;mx{S}u zCCsQNMpEO{sNi|Cvg7+{XJauLt(NrF7ap6wDbviwzKu(ql)cV#{@q=~epzO}GYFEj z#~ptaUiOb}yVk$IkaZBkrhGOw^?I6u)h@7^{}ntR#k+%>!As}}){JOV#$(sJ<*I5S z8ds=l#dc&m18*hqvh(t$rkIXi`~otr!z<;*mi<2;%QMzLPjEDsuA;auju#ddR^guZGorN+XFq!F$>68~pjR9HeH5uE|30D#ntShC$dx@!CNq1blXz0! z4F!T;nq}UEa5sjf>xW#=Mp>hSXTIO4i>O7RzQ!ulY)8}_LeHrpv)lXkip{}d!i(7l zgF~I--CbRBD);7jIXOQpE-pGLmA>D6?IguFf#wxsa)(4U_za#*tb=`ituR=NANEe& zH8o{J{5@S^iX!5SaLs~3LgTT&#QIX`Utm7jBuTKW>dpo7M1;r3GnIW(5FrV#J=2Si zZ`5>t^d-YWUye=OADPJTX-cUD@k?EeUI!s8p<6Z8Y-0n*J!+`j*BrIxHAlN=whHwq zk0$&#F^FKs=zedBPRK>IRF&yKiDe_0f{O!18IETZn`dMqo@f@f>;DyAc^Jo-u9;Nv z;sP~Qu=UueGNT9cx$W!kPVMo)A`hpU^*2!bbx(HBntIp#U4yo#qobprblMYj!s9B| zE({*F#;1cMz;$CL4`d#`fBy~^@~*$ty+m{X-Zf~@rxmwv zWPMPE&P7s-&{P-|Im=khnDkq2jKxMqH6k~BVen@-^<1NM&Czx*!7O5237h3bC*!ef z>ja7NbHn>$>H&NA0PI5o=m?y5)u*F=uvyPm*AEvuqHa% zm*4K@7t+EtTWi1Kq168FxxTES19nJfd#g#buSL%Du3PZvardok8REiBcNeF$($>Wn z+gW#tZ#)qTK5nb=JO(|^OLpps*)I}^DTYM8XJibRo``2;WI)1UQzy>J!Eu9=bD`tt zZn=())|)#d0n)WCZ~X@UiSTG9wXdFN>}>TJj^>DiTb#(^F$AeVEwAyPZw?%py-gcH zN^!4u(#PiOE5v)yPkPheA!0w}GREjN>LOBj_g=tZav8$~XV76M6YHuijovH^0rR9* z+p_`^4K1-HQs-GR2Ytg$UM}A(5LSwDYjocG>+SCEX1&IQ@f{^J5G=5({Wvo2_L7w^ z76YbkMby~n{=2v4|D5q3k3YBOzv&)lmeAbPq*Z1=jXR2gWI5{~!DC&~@AH`-UNw8* zW~&v$dQX1fu>QkE3%rN#=(cJ<#jB#8cITUSVPox^*s~6HhMk#V0>x!tZL=|*R?wFy z0b4u>ko3h5$hD`WhvNXA7C*r|9VWkXT-icVR(P z`#xOcovArF;zi~;;>$}R%9aB-hN@ptwS%Tk@lmaU1skj6Tak16Csi1p9!Yx20voO0 z&aI0HrcR%uT7?TZ*S0AB;6?kch+*Yg_19!}!|xN9jGi{9OCljbRr-jEbp>^OB*Q7>~Bdo^xP4&WJ#YiYv`RLPBe;HeLm-^H+!` zkX&fku+qiuA8W(@*1}%R$w3WISh2CHEP zNOO>~t=2b-X$sp|Yp7%W!G_{P@(8-QFP#!chD6;LlT8xYUj6JutX8Skeb>$QJdR^+ zNi2r+XHNmK=%s%?Hs9K8@N>pvMklWrJHS|DhQJV2?^Ce!Ce;MOCbl zCB)lBKQ%SA3byLSTf0a>Q@PPbTIMhbrYU9n^BG_*tf|u;=QLmzx#z`S1E$$XhG-T3 zobVqgP9p}OAZjZY(Rx-Ib}gRr*>Am*5Ss&#qD#Nbq&a-;4X=kUgdqP(@4MyQf%yZ% zjqLA&X$O1|zk1N%UO!s#P5__O&*9+>KDhXiAfSRItVmF%5aq$@6=0TC#{@kv#+JnM zRgKSm)#5M_V%VQNN@KB{c!4<8uWXFlt7N%zsY)p$$73sG9LTe_GI6CT^@4bDRTt|E zF1X-{wuw2XW0q@ccxdN-A=*E^N6d#~U?soaywrpkwlZK^_@6v^;-GzS2v<|%nBE;H z;k=Gq_(SQvB;}}?8B=8CdL2$OJEvJ4|GZe;F+loDtPxDeU7;B?80HRxglN#2SzEL5 z^XHLz(FM0zSaxx+cHzfjew-L@6dwArb@h2*B}SkA{jNR)!N6xF<|Fu0zXx?>ei%7b zRaMw{DUs?28Yr@Z*|D%IPeA$WX5rilB57fbOzr{jvq*6V*2zf8v$eGay{~mdrVDDv zZ%W2BOT~jOu$cEC<4n)UnEdvEXTtkr$2&^kIR&VsWExaOgWbtk?BoZ-Ll#7#q*Y{e z@w58}hu#m&OD`~zjxRL);t^dDMA!x1#ZG;4*XVI$F*Z1i)|IgsUc99fmRSOu7r%!` zoGx!Y8IRGN3|BC@%UWuYUG^4Pg;0@#UvSHx>j^Di{g|3U+12=v!pl=5XFJQl1DcU= z`5j-!F9lCwu^YAc{|9Gp9aYsDy$cfpqJ)47D4=xbrqcifB&1ZNYg1Bg>DD8m2$Iq$ zDTttSvz4xmAR!?jAX}u77Ua&wIludTcZ~bjU1M;@kh58PtvBX7=QE#YPU^K*;lP-@ zJTAq9wd;A>>0M9>4l2p8!AUG&QvKc|>bU>5Nr1;3l_3&hW9EA-M~Y3M%d^{v1-2?ywQ_p!71F z>@(YL|LfnSTqZi8dF^<-989^-&(~pN{uen1;)a3RxtB0Lo+js_cQrnXCg$%es00Qj zKoe1Y@^M*NYy`+qAaSVVkNLjLzU$NqgReQR-AMPDPe!6$6VFlG_=JT1iHV+1Pfm|h z972`ac{uLpTIh6^J~y;OOJay9+D7d%zsZKIL(}!U6^@3f51@*^Ow(SkE72VX+6B>j zPk@X@W)iS9gkTFuP$%sF{WQ{FcW%xGTyRFo`wZM^ZbV0`M~6y^hnxD^^6M>|Ub-ZSkSOcx<-N*2u%EFgv7U%(DXhy6H z*pg%j1_3z9$;}mYnUKaQ?3jQBlF10%cYv~UbqAB?`vWg`p;+x+e{^6A#PXdT9hXt4 zI0(rw&_4GA^a*Z+SNAN$LI2v)2R}3rEILYh<|Me!@KBZj0Hyh@)FAz(5kq`Ikxh5# z;yhMF@XMFqJ@>^wi!Jsw<7%GO?gcilzf-*iphR;R^(t;WB3$j#gV`p$&f9w{%wx@dv)OJ**^**$H0Hg3(0ea6*4TTnKZe_3LUwvP#F>N z2s>(#G9RpWlm1@dpV{6ewxbPPO_=XxNF~I)zmS?TYN|UjKk=r%{7f!XamkT7t!csG zcrG?@QEd9tnUQ(l)TXdn2}mNuQ1O&F-}e%dQs6y>^ompSI-S;nQWV-Iz29P{j3h1S zx}3&@nbXn>88_fg`&n2Ph7{uL%m+BrxwNt#4n{PtyR+f@NBray}^Lp3ACk(hKagloB$k=>KdAy1Je0(q3FN&i^xTk#@<7t1| zzGr`6I5>ZN2i5m13>U7!huK7z_o%C?WLV-8dQ>nsZg3++!3CP$-sL`rlZarpMF7uq z`u6gD%u3Pq&=`i2=+p2=0Y+=;^4Vw9PKYufWkHpjt#2w?&t?=#;-fMR&g9y|L+0df zCaTL^8@>=&S3LQG&*6$R^Y6VaT^QGMO^{-z-Ixvg_bheR?9>I%n`p=|h>l|yt_ zB{US7noai1l_HB3S2Jo~K~~)Ej{}EeiuESyc>38y@s&sZ8Ugy4y5D80c(>p5)M)>! z=(9*SyBPaZ8~woyJ?22jfOj&N#^0^1$@#Wt+;rfBOBR(sg-DOp1WqztqqS?9Gr4=b z95?Cex)#408&1tnq?@PXk~1$|$PGu<)%g=ttP!o>*s)6|~cd>p3C zi=^-(&Mo^{9Df&^ZmGKtr_#Ylmn2?M$%?>Tbe|HTjc%spnGFzMzmDwv{rB{@SEeIu zpDg>D6Zo%xw$u$rer=_Nxk4Se%dmW$`v9}FwG!H3vbVECUMhCsOQ@}TVJISVJb5+r z#Tzmtg}E8ORmZ>gE8y8hyiS;pAv%58qOH)EUM^1~y)oe}lVdnk`(LL_HY?aP+++*8i zYjo1_Z{sFv#k@QwH19IXgXnh~9y29{j!nriLvBr*_TMLU$3M5|WEiz7y(GKDH!$; zIx?%tzTTHC#__DAhvnl4)y1LSe&p4H`U7eEct~hNj++rE*n6o63Kva$9O!Ko7&Xw; zwGC*&Gby-Vo>K6XC!_A>lHhG5%YsZ~%wOd=Zz#F7dA)*CtG?`YxQ^%TTP8+*i8GE} zju+k(I@#RuaSo$BFW5Gh|2y5fZT&|wd#xyx&ZSvcJvJU{mkg)A_9QkK-v6qvuI@pc zQ1RSgSfbR?n6V_F)URn#OFMkl_(rt#eg`21PUHKltI48rq|kj?d7tXo#xAh`x|nLz znEQ{H)XI-{9)5RgDk9s0L;qfhyqJXBjFB-3oV!bPA#K!l3n>it@So$?YxoBfl@iKT z`Et0lA}0lez-Q0jDJyvMwD0NL(bB4vl+Zk(I`eC3?t?8MY4RQ-Vb6PQ4a|IB4nb*9 zwgIDK(@V<^6RKk9abK{&(pa5RuFLVO?(OdREWX<=g4soRuW$Mp5Kvrv(Pe3k>Ab09 z<*j{Tp_6iKsWha3(^F>Pd-Qfl=&M-LIhL6&n-8vo{vQ%-ehMuXwjeg1cr z%_|zis|Cm}m~AEQ3%|=%d~dr#^td#ZF*aQTtwF@tNlu}5(`Gx#gz92k`)9%e(?BJ- zXUxj0<73wJ)rW%Qu3FXGXA?L2hs8YOhu&A@gl2X5K7IR(r@--N;+(Pw`vZL$8)d4p z8+f;Y!H;KcayL5yU?D2k>i>}u$vUfc0P?&BCmvA^#}Js1`co#I zupt-^xf3Fnj(NL-s&pj|+K}`3fLVEcSa1`DuFjqoJOi3OcruqiefdSZMe)wL5(*jd z_Av~u6wEKWJ}()B+T@GaTsdXEfs(er#bs1LK4;4j7gLn?fnOd9TAM&71OkAnjJL(b z=+RPJ0prR@crJYKswLVT+knj_d>QI9?vJXww3~<#vXNp&b(5L3yd+qcI&W?@cMVlH zAmbIpJ$v!gK_03PnOIUv=w%!e*eY|w)|6c(VM}02?~`5YQ2gzY-?-3*qAAl+cF`P; zHc=5&a=*`lCQpaW&)~D9wz(n?ukbZv^ca(3G)*D{vp>Hqy8GeLZ=76oO3DQ&p;`^z zu?&Rk1qB7)*V`}Q8~?0WLs?`~_qXOBgnd2`?J~b7WFVU$&}<)hYens*|Nc~u0E>4@ zKM1dzt69a z`gX2jsrZexmk)C*EJ=aFtT&~_#?}l8FWW4w{kx;6x-?jY6(USrySvrc1l4=*2~-0# z0FTDX${GT#?4AH{U2Y}^-glCSC6UyGTQlLe<{mDh5udQ`;OsPv#N@gj%8$yME{WX4 z6&>V`dq=G|^aLyKdZdG^S=~1^wnCo{{;^9-LhoM?9iHhK|{=tl&tCL$d=4YZj-mmQg*P7(x;WE&!$I6BhStY5=|p8Ph7pd zJEWf;Oa<(GaQRl7sQ>48o7wD6{o4bmWF(C*>|NrUHZ7Uh30WO5yGfhFP_4L@g>h*} z{zfrD5!wDSe)mQ-wnu=@eDzu4zV$;saabXld0fYj&Ex3kdG^kbzJbvVU0Y^9KL2Rg zW_xZJ~oZJ5e6O-z^& z%%y2hYjN|;4T4lTU+Uk7*)rX72Q4yH(syO>6IQ4{XQfX{cc-?0+8aXABrP#3E~LGs za_i)9KZRF5{BHhMGdxY{vv zw?OwK(UeX=L&lXLX*%yBx++FuimmOb$XnZ5{^XICSbK}OsR|#$ACDg3mc`2|ph&n& zb!zhHcWhON@2c*xKPgeJWx>JSa)UQ^4LRK-++k(<*Oon!8d>?CPN}l=t|-5-F{ZFq z^+HOmr`nAhb9Njk9{%JMo=*j@W>*-gbDf<1Dx54U=~d#6$2+UT{WLmJat(LQ~D z#MP1)Lmr1b#|pC#j_{(-|0Q|y(bNa5{(||h3**~kTDOsy2wOGrvv@;xc)57`xz5ho zSc<9tZ4)i}zUa{|iU(g}JNnePlu!{NjGd7^H|-{{58LQ%Uc{tb(YwafNxq)L)fSg> zHr14rbKB6spnS#7#^&LCfkS$|Vct|=j)d=_pT-H&A;T>T^lV6`Z`hOvZbQ5wa<%le}11n3w(}$ecJY8+sqQE*_o6MspT6i9_Rq@A%Yqlqov z8gnXH|9qW#9kTJI7`!%&o>9#MPW`;4i@YD)zF&jRPb#->pQkr*yZUmnsnT<5Y6;^0 zbRS0zZr2~?ihA^%M->73bphU^nzp(Ko}7#T#Qn$!aJW z)+m@_Cb@!O#_R_jYTB3CWkNRRyib+tmDk%V-7z)oeL`>60Xa-z>`Q6lN4V>>moD`s z+%bztcK>P_CTe-jEsF~72+x z*7_*$S`dF{=9h2b;)T(Bdd*?RH+}p9bYD^)sfD99P_f9U)8$iH*?aOKZbw{ZvDsPL z0ffE)`FJ;+sNeSeDA`{;F3>UTl`S5+T0u$ayA8e)Itp87k(^?!KvYjp0J|6;Kl}3z zd-lPN+}8%X%Qyay1m)F5(Xx;$-Omp7@e|u8*O%z=CSPS_#T|-4<{!RI9{H6?kE!BB zXmUDn+S8J%R;hH%jvxLpmx%n)C#lFedoThqLbt|0EBiPT|B!gTRQz?s&&t(ML1Ie_ zj;otlKCoregC=9RP6^xK|#& zwK($S>sN*cyhD^1C3i1*dfxr{KiNr^U1GA~-&e_g@lxEqR$*)3N$`pt^U(CWGeJAi zu?slpa4xn3J@e=MY=pMFUD%HF`$g!(GMQH}`r#o)bkXxpX?A(&OrdXO;^3{jdcEN3 zD8weMNr}rnc}!|2CN1}*Xi%BdF4yhzD^1lO`6zKY71R1$WAL&lP_vL(SX{#ZahC`S z$^TmUjQ=A;DT8o9K0kmA(zcSxaOe2_<0NIHS@L(L9||tSl4K@+-L6OW)9V)Jl142o zvb=UZ?TCPXxIj5b%w8(L`T_M#)!1z4tG|TGT=0;TBr>?)epD>+m00Vj2-flc`f9l1 zCs0_F{kp&Z>qJkef?*8*k`>}x;aL@4c;;2t?vgu;lX~|3NDuI7=$OhLj)A;W*TU>ko zpXKVm%Iw^#M$TphEsTZo?NH$e(fBn}$;0RCEX~h#&&4qONXxFzi^(dK1s5N@AN0aJ z?Wt#P128%U`%p+1LI#rL&a*aoh#LX_;Eh<pP$OCFm8PKR9 zsfl9^V;Xpkzdu$^9(-_cAQWx4y)???GZ%xbB{CN8V4&T+Ks-EN%;LMMQWw)yU3TZ; zbCPA&!AHJf)N**CQxD#bhqw~0V@=VX@t9%{k29#7Br6+lkG9XSP7%oy%n>ya7gL1W zGPz}YRc8bvTx&IR2wq~|k<1fhl9??{W9I#$`D@t$9!>*y<0=0lZ*j5R9UsAQ4aYam z_p?eXV<`Rjc;C6as{D{Vw$G(exX4>jod3GOuH7T)s+lh>F-o67N+f+^ljFfaEJIwT z_JeCoJ}{vdleASeuJxc(R6Zp`W&z6p3j4+`r4S)w{kua$ z%LW$d^-_HX^A{f>8iZiXAxm@N3(~m@I$$ub)Gi7&Lh>^Xx;IuHWT`y!K3H!Tg?!s= zq)UVYWQYOsr~5As!oZ7G@S{AJ?n=5$c)zOJ7rhlMOemT@jkzmNaKK?ef~(;|rADuI z#~bY?Ix2C$y36dAi*flFLMi|djZ+$C83CSD{en&ulaLY~iI=%bOBPC(O&WT_kJv@uP@+EZQtJay%|3&UcRV3tRFiwi}>hQ zZ;L%X_TXHYCu3sI>Rd~sxnxJtt8F{>UVw7(Mc`uH)S(c<>VNl@6ey3dOl6<} z5G)R>xu>7pa1z8XQ%3c7L*sggHr6R6Oivxuu&a~dO=6On2~DWdqFuEkANv1(*g!h* zJQOjZLj+zBd^y|jP49LXl-o71*iGq$klistGXkd_4BvrY5>w;0bM|DFQU#TjXqwW1o(x{z~=bx*Bu#(hV11S_pVMHMBUuYqBKP zV@!5Vm{aG5i(5{IPK`a;WBeRtlED4cFj|rd#WEb?0hip(oizpPPE-`U!_$>%GR( z@Pk}AtElpIM7nJS1i3b|8GHZ@ZQw%L6u%kv7kA(B>e5iR7<+#|kD}9R{wLFXR3A6H z%+&@?<*fT)8;Xppq7Z2VKEp2x9_RzmK~qvv5KjU2R2pRQOuWB8fzelyGeLj>nh-kg zNAuW|-KqV5N+!7m@Ng#S8;z(rcql+9vsCaV8-i>P-hPF1jWRp{qTQ=nxv)ZGC(?NN zaZJqu42g)iT!K5v0)^V|cf*Javoxrz7dNA>9j>2sjU$wkg9D%pdgMo04iGw;1y0ZS z3UeKHPF?ay$(|y{saP8+8GIo9J89+GN0_M66jF1dtgNW4LSH_oiv@t#-7pd~zFEjC zP+$p$HbFtUocJ;;4zVdMSCc6iUwoPtn3Cif-;oC7qGk8%0SPBtOlxZ^v}tTF2|9Fw z#;c+BkOu?P)-%5Nbc`ulxa0Gg*vr2{lr416TAv|z@bZECJMk26Zp8wEu49<)D%1ZQ zta#yZvp^g3sJ|* z@YcO=*p;VnR$5`Y5(ph4;8ge+o@THi7Mm>1^EfV_id9ZfbnJD6+F1%_*?4Fl0*&nH zvabQ`kn@uFtplXS8!xkK#3-Nd)sZ_LQbLScdp9IBkeh}P_-69y*RpM3#M678@gwiw z776`Q;JTN?-5QsfjJy>wDwgr*G9Jwlv)rrHU-~1DlUJ9O&6&w#P!~zHB7LnOr>`Q1 zD5Vx``JBeLc{|jiz6q;jF^-yI_pl!5?IcXG{L>Gc45*7F{?yWx41|M(7lXM&lw|RR z*$ieSvuWAyt3wmUHAYPgRD7J-ZHFikZ#bROuV67RLb$n8TZ==9Jp!CEu~@neEgdUm z?F-Eq|L&(~KRgP4qoFmpMnxlY8ENDct+35dHSypyFdY?*jEwrJ)FD*zjp?ebB#+$W zfPQ!GI66lez{)`L@wB59^L+jBO+Y61k)^tWH8P&ag=M%l$z%!rp9i9`WQXt~M=$johg zhx*M8Uu}-orGKQ@2Be^k;N!ZL+GqSF^|6RqQ~J9e&E(!f(@zC}pt5g7;Xe2iz82+n z=%95^B@#ty0kN`c7M!<;%FQD^FX7Yb%2Tf|>7gZ=y@}LF_(D=cj(pqA35CeGh=h|y zHSs1Y7Q~w8UcvJDIpq?R<2CN zD079JoF>BIrO5+w0y5@@XZ|CMaOYzGGtF;gb!-t`Nwv$|$kA4T?emlW z#GnWHkBgZr^Ti`0Wxjtxo(o-oEMQT84FY#g7sU_Tw7ys}fEZ5gGi?^zz%vD)yC*SD z-&B*v?*iGFM?iq$k32r@7!N-*F||ffY8Cg^M4sgod9)W909?1;9yGDw;<&<@7l3{7 zyNQZ0R*&@r{z!8=saLFn!L4`H8CP#5=-?-EHYsIO}>#CHPhQ>sVT3D&X)`;dw` zxw&Jae3*I%a*n;g_S}-whN;=UX|Z}bcysRSatKSWjEL%u#YtU}Dc7Tq79?qyUi9|K z#@C$0!PX&nY86TuFh^Nh2ftb+EW0VXG|< z5M0hFx@sczw5yI3C2$0*_9ZPT6no*GT%=!De}PxFK{vCXWt`}*&3aec7(lv~pRe=X zDeU>+gKRKyaGCOar7iq0QKm3Y%YaTKnDY+fr1?7??SoDvn)3@YZcgyPB_tW0d!aqo zsd@M$v)~^h+R9$z^FhK=Y+x4wPnFQK1KUpinCH{}#7uTjz;vaA8+aq1VIp=EvMc=&*SK9%W zJSDPo)#oVZR8_m`p4CwADr6Nbd1!NTR2(Qk@=MXvaS)I=z{d>8#$8~9*5+nzY%sc z!G02V7o7-DSLR5Z@Sc4>0Yo!#DXCea(vLV9gibPCY}yTiv|eCL_Q5H!$OEn0*w{W` zW!lFVpN*x-E&jfqeO9YoSt7&|?L|0cX+C2`xyI5sNk9Fk*-ui0B%CbJ9=(98dFv-T zOgufsozGsT!{rw(!Q2pn5nB2jf0w2C>>HU^WU9iaOqDqti)(-J{oTx0s^Ag=B}0#8 z7<1e7$41^4K-vas?M>wM9UL8Js+tZ3L0UB7-p<@lVGIi>>rGB=$i`SDN)n)(%NqR; zH3wlq_~~ zG#~8t`VMs1`rWSdmj-1G;Wq*+AlB(>Hvsl^Cho20{DJL%@SQqn8sXI$AsV#(N-Y&b z=P0_FNK0Z_)R|X!6%19Rt50-laLspGT3n4+NOth^$pZdbnM$GAw6`SNzG0SkPK%Z? zZF7q3FSVq7KcRihvSs^zxYrj%Y5^>ldLZ{%($OLWDPX1oiRpYWBsKDHSRtvquVuEb z<0slWCd2_%vr6fCk;V8eX7{hz;d4x}fSzSZkLx<6luT)=^`fI7D)0HuZ6k8q*r~rM zE|J`xB#`CS29`%d8^jSJsKCwaOEKx`%pd%r0ihDSmOBn8##t=kx8S|3`i%*Hn?npu ztt;lKj0zF`>Sxj)fv;axp!zj)w|3lk(ppx8U9%?paC>T{(aq{5lGuER!=*PmWasH* zPJYKT`Tl%OfDodMkGq0@t@QO{Jn{`PPQ`>p_deGXXQuBxq1ru{n6Cc2JpSEjW4>+q z*{UG-_eALPWd6-^0wjOO8(3TOJTVqLk6GDhZ)-et{qPpZhbK9A9C%aH(wJW7z*g8) z*%2NaoR`K;16e|aHzn#Bnkf6nIk{}QJ_p`9(fb7qvuVae<(pp`22sV+l7_J1V_@(>-;zET7u$)@_aWtc7ZO zE)lbqot*_kw5Na0C$y)4+quRjck`wBueHrXyNRk=1ZJXT7kii7P;$dPe<==d{8i_x zCGK5SH7+|IovwoXjHj3=oX%1Dw6jU#(kR$s+STVoYZEZV9X&XbjOLd~ zyOmW#sYarW8RvZ_CDslFjj8=dWvZmq5=8rCcIuM@sLWj>zpM$qy+>U{_LuPBqs$%V z)-&0~rbO2nDS!yI@%y(AhzMmMh%YE9!2>`%4?kSsE6x2T@6KP)(9oE< zbhJwba#`IdGvxFb3sM^WHt*f`CN0Tau|mZl`ysA@h>1#|`Q`y?kkqn5(fjI@pm+9l`yIM;q^i6rxM%mMBhTY% zsDnq*lBcB!lSs>prt~O^h|D~2g3q=88HgKucP;BGFlm?{bg+iKx9Ox!6f*64ThiDC zJU||yZLQAMZa=O9!k&x}8AazSQaJ+Jotwu`pW4D^j>`&FN$ZV`Uvy^Kiw zNB9g}GG}&#Ca@*u2f&_rhL4K4CjO_yt7nk)P_@q1Ap2|>%sv9}y!XH9{&}voQ=be4 z{_`oC<)$~BB0WndQBGoI7f=_6LpGXwiiUeG^xjw1Y2i`OL3l8-_;jyHRz<}O=$;5- zl=B}B;JS_ghWm+=;-z`9U6Kj78Qc@htytqs37bMrejx zsHErju9ad_s&yQXgVaCD0WeLUd?BE8$GPt>e0?-pY0EcKoeP~ z?d-a9XM7QJv-78KXA{i|K$v3Wc8K&*Gg%C7lPvB)O8_F1)kNsCq|6}+Ah8R^zpI>; zqU%?Hg77^su>Bi_L!L7-9+TFqMtBw3T!df;cIY2{<3%S`eLdN~mK-uNjKN?4T_O-) z-l};xHU&Z_t)6H-4pa6N@<^*ycN^{u)P$eF^4Kko+vUbj{(T?dPZA>DqwGQ2JL4)M zXyzEaa`N~V!I&rsxvufFi!pKMbM0$YVORvO-@q-kAP8Qjr!x;NzxY28IAQLEMj=#s z=0F_KUx~3-$(RS3SrG;b#cq4)yJZ~Z9U|+dhVa$ZFRn?WF)^o7M|H6y>(9k@y`|`*{|0FNzZs%c=`?)2;#&)nn+KZ#y})}F>NSCZHhuTE6g-C$2(`4UJK`Q{&1U@7`98c(q) zDP;l^SzBSt0FmmJJgo61xyk9#CEThP+{o6PP{veCYfmW&(Y_;;_9q&DrSVhi9x(2z7ejQ=%a zwfD3F=wKUuouBw_jj35_nN)fx?nq8SfS-7a_M~0o5jopI5Os=YctdJQa(WS0Bjqb} zZNULy$qo1*3}DdYiJ2emk01?t|07&$B`31_!U~-2zj5hDk~M7X99X~u-~Luu@dLBL z8F%FWII8rhTSMa5c5l6>8?M5ti9@--mXV z?oi5$-k_!21(9>_({ubhko~+2gAMfOcm$U_EmZx=gAq!@(k%F89J%?W&SrB_%|X6C zi%ygNnJ<(yys0^XcOHnAa004hN8n9hMT6284?oeK$mvH*;>Y*vZfQYvZw=TiU{{(7 z&)cnul-Zyz+Mq=*y=9UPJ3dOo5WBoQrh>s>%CE!M#kgz?6f!$Ep4HZQicfk>sw(_S z=zK*^aI$UEXu)J6&75yzvI0pw@puR=s&1%faJ7OOb3A;!FkJP!GtZD8gWI<&>&!s6 zDuYkE4w*=3{LBe_aUg0Z*2+Hb0{3g-CaenuP~-UaQCcvNKLfT>L#Gkb5Fm19$2;ICdM$)HF5c zp7Iw%r~;9Lue=$uB7KdwZ&%4kK9-r?$U3XF1BRP@#IJ~2TIsaf);6)9QF)J=Sasc_ z*q@G69TpIdhO2qEwl=P0K@!>-Cu~|94`iNT&jtbLX@3$6B@Fy7BYhGUTr!u6096u~ ztI*M!9AyJToOo9EDt0fszI_Wt zq7+rI)TDQI?;e=X3#i#Q*-?%=X`tI6xOMByF7qR}=cXjGpXxs>pUBA9yhsa1{tg)V zqmNRVtzJD#XHnfEWpCFgSqZ%rJ1~lP1(($b0?vKJ=5y2|S_6O`zGVJIbliv(3iR?; z3j?_XY3^-|%Nb=r(I*I{Vjjzewu9knBf!y_({U}rhfmcH&-(+)jqmws+?`(;KF`PD zMR12884G!%cdXt-v*P{@sM`-+?P)26-v1#%wjpij&>iz$g`J7(Dk>z%S&-@kVK7w+ z7C=~f5flA%?jL*U+>!IiNk(Hai!&4xYU%TEXmc+;Gp?~PK!&N(A`#m5SUMZaZee_4;GeFi3nTvG_0@s#{V-d!Aa;@^8LHM&uWqi*3Nw zkT4gq6IN5~UJxPgz)IO{8BS~CCw`sm%Y0Ol|Fa(}?w{$D5@L$=!?NRQ=;aQZ;k4d0 zD$7n2p8746;PLUVO=uYvW0y?WcJFs~g7Zb=jhljKNhY2XfHBzspG7x{r#dtb&2><{ zjPDsKR!DItMrEA^<-t-3K#Cb)=z}*lHeQXiM=^K9lNujq9Xz*LAb6WNZE_*+>mj0j z>$UvVNoHmG%mYAkD8a$Ov4+$(u*0PdJ0XQRpw`DU zlopVF<(D+7JT1HG#^?qa);2jaM|2;dD-kMWv3E%r0N1O9wP%v>%SW9uJEl!eH+$|A zzGvznS`jbnyc$}2e&(iu{8HN6)76!OJ%68;?6k_e))9E;#3_}+e%D{U&odJnmIy#& zCCbv#N?zC$!vPRl_F1F8FH!`ws6n8u=Ij`k+NdvbyoR)rL+YH$GvtUPr{mhq9Lp3o zC|>uQGz{!KO^}8e!Wsr{1t}>j2&%uZn%>7gGaHyiue!;*dA<;%m&316#@4Sz{}>O< zZ#>HeZKq-+C07dOvdG4{g$at~UHqFg%%Bz|?d%{Se5V&ARqUXA0F-okWs8)(0gBOq z4s(C9Us8$Rq|xMeZd^_4$1Wedaj0vWE#bHQdNZcDhGy;tH ztyMc7{98SyM|%=Y@wrrWB9*4A(&Q3pBvrMfuv(2L7?6;P$?p?UPZ$hhcfLL(@Bo9O zqzg23)TR4Q7#xM!;m7`ozX=N*Ejvw0_&-xR0;2q15z)s$9RyvZ!Rxc_WTtHut<~<% zvaFgX4Tb*U4g8a-%(EC2@BmwJSYY>0vPW%Gak5ssR5U3z1*d3^yadTx&s)0Vo@2Rp zg51PsPw%htJqHH3l_~~;wBvyrdGqdF?wbxN4YmQXjQ@X)ksJ47-+gusQbz9GUXKsB zkau51YHwL-v=d(`5O)DLjT?rZ?H`u6bUdqM6dznuoF(0!nj-;jyJ!mu9$29e~wXoO^dL67e?jR%Mr~lE|ClLT&++Tvz~|mr#3^fs@@iRfNbM&`I_r z*EvcjS&~CGq~#3s^gy4ucqMBSdT^}j8XJ$-W}Lyf`WqYeTEx(q}x`xHM$1Nzht~O$A_Pojme;Y z3NR)U^Mwb1uGBT`udlp-Eb24-b5=#|rp1a=5iOAxZ(saudIJOVU!er3DHfVZS5{s= z2+lFWYWx2g63*7Vr>Jg6($xDBhb)McDKJuMjdye&0 z{CP#;39hE_){`EAbKWdTKhpObFe@)<09lL)d;_^u-~w&m9{#->TPd?W_mA{n@4v4> zNYOjJgwsAZVQ0FxRl!m}E{OoxRqF(U_Iclj|@GO24ai zD%XOJ5K^sahgemn*GX_V#hXI9Rc?!C zEnP$j7%ZD~NY<#%Qdr5)^PJtp)_J8sU&F6ZR^kECjmW;$k0<@8j~BY>lN@|A(3Xis z5Eh;MhT8wNAVB{SwkJvyg?FZYQa!YMC9W#yrPRabrYM7Hp{9LTYTZ7;No%3CI*4Z_ zsjBs-Z6)}Uk8PkyLPCzk*N-l2>Oj`K!BsT6zV1nMWx|ty zD8_@0B?p^)uI*d#4bMVfsP&wbjmWP>=0p}$>SP!^E;lbPluk!JCe@`j!a7AFd`m_Na^ z7LsfFmrvW1HsaHqDD>I|v8|JT`KCv(b8gN_7#BmbAOkoUInyxrCyjf#<+^-a^+o#v z-#?H26&l|63#}Es8yF{L=j(Jqeyb>!Ks!|CIj{S+Yrr5BlJ-A4C4-g+x_HY1 zbtV;P)ZmOzQQN#^q?{uqI~$3DiVK{`*n$3?2y~&GfINaV{|&V!lA(A)DrnkOi+L#U zCMdT8h$H>!%!$}8@em~v1yRL4Tr0`%=!qf_BEyk!5ijg=oeH3=ZkDOq4}Ju7N)+=O zKO}>RcWYLBGUBdU4$T!L2;X?)f!$ADdkJBsDu^XI<}p^E7-@5X#%lRF1pM z!jF@eGZY43&BUkqJ=QLVAf0}F>(y7+$0QIhEQIEEqSz3(Xo67d;t|u_3ky@8QEwV zc*mR)S*8MV>4o|aCVn%@w6n9dqSqD#{xc=Y|9y=bnGD|Bqu1rUfB9ql9hcQxI?rfo z3Yf%s;zairm?YdUheh?UDi{UvwX-$w!&@CX>K;o`Vdo)ozTzXi1_HK(bzPDdNL|#H z4bSGLTAJ_3d5cW^@v=e&&Msj@a&2zlTHxNNl=nYoD6~^#lYHnllBELA=)W@ZHw!!EcL&s-thnyqW|I>cP0tZ%q>#|W$AoRSn!Q0T^X8)(xS#VMY8DNP=IUm1yF4i$L_VsU3}_& z821gtdRf=|E&@BzwTH3yV4)cl6if3woVSsoZYJq@^!gm4^aQ_-3eWVT;yFmk+q&O+ zfn@aDGpUu_4>B9KHgtU)B0JQrm0kGqgIstpZSyqr`Aziu7m-IrB73;hR_}nouJ+qG z|N4fsq491ca06`=!ZQ-;d6556^Fmr@gL!6WhkUOCD;QM@6eZFR3StkRp{?ijdkZXP zM>bM#t6rA5c#%+Qkx5=Xf)_OLnec9u^|PT@;SB)EAiSvHwX6f^U=ChIbB0FX)~5@T;|3G2@yN(9W?t%w)Rh66Z0--O+-}6UvcD$_4Xr;O+s(9% zG1A2Q0`skga^99dqQb8$_e>{65X;z(ok8}*k~pV8j^DAbb0}8$-*Y}B!!sp#JV(R= zv09u0<0d^>TPlNIZf4J5ug!&V>sWPtE^>)7u(`x3;tQxFK^TOa&gVtO>gro%q-1wd ze=Z^b5L^SMKsCB|r{$aLeT&h=8Es(?SxKez-_#d7Kg68^+G}=d)>(2b^2m&Jc{k1S z$Kf9LR1xK*@*Ebx81EZ0AdG^S&H={oK+3VRcGZ#>5U8I90oTj)8DuXf#N|`Y5BOLQ z-n^Hk0k`>n)Kt%Kmz0T+$9aL1Ll_zcKFyyD3c~fQKZR*OcKyM~E&_ww*lb0ps0YX~xtR=v4UkbI-Rgc?qC{;EDmcRuow6 zbJ&Pf z)r~RBfvg5F6@d!6#mWD)5D{fjS0(D*`}Uc0M@=Jq7S1{u-B8{IUEJOP zO7*Fmg?qpu2D$E_q@=FVxSI9vkmn~Z0j=|2$|Hz-Krc#YZ5iZ=Ta+F1>NE&&q3zcsfQ3)n8!ZbYK$>P1e9T@i|^BD00df@DnHi z@x$KVchmhw>g(>Bj43MM4})0(UAgFG!H9kz2TkS_tw3~aBG)+@Fh!pKnW6zp+#W3* zqK@KuYw718h^W;K3|bQ#hF~5M;d@GG#xgIBHV6O_BL~v$U!MgKvh%Oc_YrMi{2BV? zHD!)Akd;BMBhtz_An~=ynGn7NJyHRO056*qi4=V6^3XTH z%{IR2_@>y^k-1BE{Jm>a-bXo?niVe-y>D0uBeo4Tdu~PD0#(sk&G%88JPt8W3t7Yu zhp&L|JSLQ1jUcC42t-!l$+Yx)TCZRcwq(_|aRINIr?}A0$({1zGd|v8M+-Rly9xvN zAHLrAd3F(PHfylMt5U2+h zERQnBFT{EOjuexKLFiR120-eD_yvq&>4!H@m%--(0&=^fcRH%+m-@TguEeCT9=hbS@-r0 zEQ#{k3`91eXQ%_>klbUAueH9eVR5zch~o=yS47Y4CFi+qK;|)gi&BoP0hs=Mvd-vfTmBBbVcT&(oferI(xIAOL zqfa-hzCNy_wNcF778etXoh~}7iq91V(Ld~^H-DMf*!q#pN`Z$5R^~-aTZD|qbFHFr z!%h(AH_32a7&S8MhS?ilEwu16a*l3K$c#~jWt{zc0Equx5Ym?F(=~Jo(sgL6b4ZA0 z!02$aIVInMO@fqpM;tvZ{jzL015@_7whe~Yar+JT$Bb2gKF?{`7$N3$XHZY?+h=Q0 z&K5@fFk2Lt@N-sTJ>Cw@ZLJ5%iE5sX<67$EP=E$ZW=kX%$|}`BZ*DM5Uk5dTCyi7% zsCXBN6kwRpZ(VtM5A z>y)$bs+VCm6f~DgTbU?+2}Se5od%vAAF%<{kw04yuZFn{iek8eQw&TL*vGcLQOLPn zx0QjAkzXV#!|7mD0H%ObR806eNM%*$^F*x;qGExS77%~Dgql(#^kH}9+!-&mJq@W2 zZUmxu^5h8zH#h3PGFwo9Y|3^=v`jzPm8dK@1j}N<>sEqsxk|REMj%AoNUBLB7QBpo zi%rMX3^5mRU)p66xx}!XK1m@@48oWH)!UaxCB3!(qL^f+mUYZ>N-OJ>rpd{9DATOW zu&k^cFijfFFwF@B;bhs#(mIw(C=JIfE2SdE386I2L`*Ho3D8tBRGbhJ(YvYhzQ6aq z>%D8;d)M#Y??0}k$lm+ApZ)A-_&lGdtLr8vv;&ZN%#IEqzMqO%dG{Cp_(TP;ChadA z)CN1;up40pVg$q=RcO{0>u+B>flK;j=@9j!>Mr(&jR44mtDkIkYhJJ-a2vp7iOK;l zaF%62-OKRCGEiQjE&E4ODC%?$y~&OyC!zr#WSr*MpW0S^-UOLJn$?!*`=p>D!1(9x zoZ9PsaItpTh3E7j6lyXr#{11&$d)>XddgKtz=;uE|5OC5;Kcs*mu`UC?2pGgfKj>S zystSB_iIgvlVGF4sXwuH{f%4qc*}re^5e(=mP@6P+3HmE|Av;q@?eHG+(5-Sg9_WV zCdyz8EuI)+=@3j=lPFsd35uP^_ftwv_-*0NeSt1Yi*S@w0XLexDz65ZLtw|p%X^{k zd=O?1ru#B401`61(#`RzTzuJ806fs0|7Y2x0qXEW)U@H4x%IL#9(%4YH^3zSi6$|T zH+MHThA*ist!3*$8i^KyeQ@%yGN#u|9b0Dw;ly(kHR4V)fN^mqTCJS$7ChKDTjo-H zzmU|03~Dr)uQUzq0D$>)iv5k1f5oi4MPh@>5`YgC6E}Ez_F<4k0mM@rUize*HAtsI5z}iqxKfQehRIs29X<+cJX!$$I_ODAYLTUsuC@7 zkuJ-K4ZPk=PPs4qfNP2sQDwWXK8v)iu~tjxvWCJG@FUoZ9i*3kHP~vRmvwn>FpySw z_$fU%=kKdQ0BN-M9WeQr%I(gY06a;IX21@)U>gSG2uHCO9vuZ}38`DIhI9KLUDs4D zW8~Y#Hp9rr%}Z_&J5jhPNKdUhWqo&+Im#0OK>9%; z0A4Bi5-;nR7?%zMR?u0U)XN+6t|kxMJUqE!s>`Hhf#e8la#l3-?!B3%M6FIYi=?GP zZ@~|>D{KI*$`3N94P&@J0NDZS;bh$f&rd+F!uIaimG7s=Pe`Kk*Kn?VTqgjX23} zJxzdk0o-8NzIO24OMwc{`;+o4*#_v3z{to*5gwrMPf|KNUTDk>h<0R66i#*_Mg}ov zpL&sq5Th;YHgoLp6ZcLjgW9c^=T`l=@NX)q&3%#lo6LcLPdhj3zWkqv017Fuv}^m6 z|9{hCRkV%%kF4;al!rrzc1DIH*t6azn6!wdF~7j38j?IuH{#2`=Y~vP8e9FkrNt@9Y1d8dPs^e#k0>i`bwM2W$83~$T!oTUUfmKP zme~MGHIV!*kJDY_AS9e*)PVBrgy+$`?^%>(&6ws%>PbsE#yvLzbw4YH@eVydw3>St zdWWG%t4ZMBFG8#F{(c+TUejcj2oqD5h)zn4ts#5oSMd%MT#K zcam)Y_EAUFUb1p)tNDu%R6S%~qHPk#30mHNm=5LFa_f4+QC<$hWB$|DG?3#FKX2J& z0vd@!w{0GC61*oIsZ+7Tq>42DWJM_%QkW`8*ghMYm4k3@rEhaKF|(@1D=)@rM=g!1 z26u z5mFI9f)aN7n0~tgM{lNR(tJ!Qd<+)}+A)jk4lDeYXHz3Q#wKFeoTMS*NqEKJk@D^?$b{>5k` zk2Q6iuZ%f|_WCwtQ6gzo!64=43@XkKnTY%{F4>zx0Bckp@K97u2R%-)5SRAR?kjGD zJXJmIfZ_(7qkxk`C$;|-Sabk z`hPR6{2KPzTcn;dsJl5wZeBm60#^FdAD zBQ=Rm;2!04wBchTCFzJE49TmO=i^PNCFji!EJ{F;vFU3&g__vv> z@!ioQ+~eSs>3zSATLk(Qk0@0619@oGeSb`gxMW-E(umVZfGfAI{;SIf&R{H)z|Z8W zczuz7z*NQ0+D4Ii0g)}8AKU9WU{dqS*>MU=t6@mdDcC*BX}f9MvJVipMzjLMoZsM< z5m$wah0bZ^YSyNfQN)TjXA`Z`7wtc1Cbv;|6F(9}r-8ncI)9}{vex0HGhsJGUpk~d zJjd0hoz&y#4njl~H&Sa%w9Dia*o)CY+^N%vJ|^nAHMb(AK2N|zTuKUyGV**zaPu8t zC$59{rNiX4Xm1NRimvR*QO%#b_!_j}yhHS=s!?IR=rQ;+WgN*=oF}em zVgr-wrxA9jFAtD&I5%P`$^W#!zAYJd?x!cm$v(WD^jZx~0IVCR{5KdxSmz%sHS|24 zQ(nh$J}NGnx(9f8fiS?p;CDOa)!V_U!MQd&O&X|Jbi}wIcixIu-m@azik2doIoefigp3pZsuI5`AWzLY|h9>!eloZ)$m;HzGZVXw8Y`ZWdRH<#-{sn`s1Ud%73xM~tE zg&H9|7D6TA&Y-xYqtQ2nBF1gXgVJj0ek|X+kRguw*ez-z0j?@DcqLS3u%v8G?jew}M7-2v$r$v*uEHWQ4oDFOi1fk|Nel4$Q!;`1)j^!F zG&y5aVXF7Xuv}&y3YBHYY(8Qq;8Z<2gqIV=r~N;zA&`tT0@Z(0^|wc7J=AW+t|Gte z;MN&0PSW|T5(w$^4c7k3YPTM({DpkwuMI73Z&lbJZP=rc=Qsl z?Q=hW9elxK_4HVdqRGL(FZU^^j8;pBx=y{%i$tX^aHd}31q^KlBC`~TOm?T-b$29g zIS8}%ew-OuAA2vUv8?#cxhtOT*0e%a+cwo|>2M>byvOuv{Gf}(!7r;}e2aeMxDDg}1u#hSMp*TuphE~I3YAGPVAMR#jXBi&KJ}jTH`_yM zhD_ap;Rsp9GAMh|KgUo3W01j0yulc2J)9ta1aZob!Mim3mmdFp1-$V%Kl$>TsPKmo zIUT-&LrLCN4oXlJZ7F5Ytq_sF8#}umwkQb!6=~RAjPrI$f0wyPV#lw~UdLaUPwMdh z{nq&1h2QMq2E!w6VjKkQCn&XwMU}$zzveCaj%|?#1*D)4(24l$sh{_N9LS9Zkek&% z<;G)TOVnCx9$WW~1=Y3z?e;l3xE0`)Ok_IN?M|_`V@7XkJ4=*CsHt}e(^y7P|4D(= z-HbMzd6~62DU8UBzc-6eOxFv@DZ9m|hmUC)ei^Cv+Rzo9^IxMD5CN<^ zk~K((Kz|=eeO51@2HZuDlUMZY00T{kc`mjGP2r89e4DJU+08>g4x_7jAtM_CG*M>f zj4|jo?lEzVKlk?aqJ)Um$^R&Glps-A>QxZKZ+SCdfpjBWH9gSZo8G20Yp=)+In~3% zG-6|tgRuRj)g-ofg}?O*fb;8Vv6pKKyFED$n)H^q2h=b{apW{OA*(7WpaYE4-g4S_ z(;l7Yj|cL>qPI88MxGXBsDKx(S0csT>71bq=C&1B4qvvSeI=-e?7Jc2V!1|RlRvJ) zq_FkKX>1Xi-;6Qu6IBcn9&S6sk{lE#HDX;&JzTP7Zzob$bL&;Yd6L*|?yrzFyfPIx zDK;;o1>0F^EASLc&ZwsAMB}Wh&p*@VzeLqc@y?{x6_mQh0j;ly0a_nx2ekey#>)v3 z`GTp^HeM7lJk}J2T@z&=WifUV+<$jfNiFGg>N%NBl_gaY!Fnw-Pg+#sv)GT@gpXxp zpdB8OcwgjrF5ajhJt3**)3Nt+%j`9Bv;sATC*;_WE`>#7t{Fwm%oc%(x6u0PFMqDl~QFF30wbM4W zaFcez>Nk^OSf5$#EgcIDKQGIuqpy&aysFqfOHm_XsG+49kKh|8S zTr-^{y?7N|9e)v?5Vh+SSInq)tt%^#)P3wL@wo;)ZZ%i90q)YJQ0 z4+xr-knd}oRLTtPO>(ZGb<0L%)mtk-bg3fqe3}Vd?l9)Y>wN)AgUoJm!I5KI=#(wW z$w2S?zBU9lrXH0YbIuc{)Sf;2B5_mTYPbv5iw9cmr-P-mO?4-gP{^Op|H^#*Z!$rD zn!o>U8UJ^E^^69*@*QZ#b3HxIS#g~oF+Zt)Wx&)btEnX&_}3Nboikq#1X#5y{!e6q8jOARAZxOK)NwHwXxdG`(JGsstp>qPa)%W|4uIVl_$E7A21AG# z5;SjZ-NbCDoR5QJ>)7d%;`yopVY)A(l+<$y1*-auttdvQX;4zzz}9{oaH@>d0OcFN z)8I6~*4l4>MUx*JD zj#oiW7_vnC1I^8As*L>z{x!AZ#y5-yU4^P8rj#y7;8k?eD=0W!!ia6*DcpYiYEwbt zd6v5vV-&UPMnxL|QDw7^b>-Cj{D%S`a%uJR!lL?1h2V;mV?MS=h+@evL(FtfZ5UhY zyC`shp}L_)cRF?HM#!ktW;O$2KxhQ6@C(*o!7hIhy4<_hs360pmqh*HKYt(8yETvD zSXW?kz^h;I6l4=sn^jiITp>M|#5D~Tg3u7Hu}y;hD7R;%rUiPCM49Vzg_=Dyr`Kif z(RJFbAesoUMkgUch!w`uY5`t29|*98W3l%Z?@~2}(>@Xcy44N)N0Pf7{Pi~* z18+L|jObJQ)1qwNubnHGgZ8)*_Rf0d?enZ-At5(xn9@x&g9L+Jng>RAPvbO~Jfj?c zO%cYs7|1?64cmXk1|ffIDTN!&Sj?xxA(L#2=J^Ve)?2Q<>MZDTxc7$R6C(O@MXGQU z>r_&YHFN1kb741*r7IqLurSv6aWV*^tV(Eg@Q2O5c4F6X6|Zhfue0}x$AG%LwEf#> z^+1E(Iewk=ezZaLImUI<_lgXE;M459fmLY9D&u8AaHqFy^29{IT1^%`_~+eoGi%u;p};WaN+Ae*s?7sxG)qm z9L2w?UxfI;t#$A}gW^MiVd{wnEOcH>n&S}w4Y=S^BIzwGLL(v>wQ3;TvDWw9D1|b( zYXB7f;5XfT3k$ZRt&~-NZ_upLa~IV5I^g9ut9>-$U4|#nM%Nxby@XYe71K~8H&FcI zvjcZ|uW_Y&Zgcp2=cOBs3tLGx=_1x7Yos ztQ2$~)j281ORP$HE44)I_V(Y29vs)QM2HHaj=AbvK-`PNc#RjPx*mqix^3vu>27%~ z{FGV|f?7kM&F;IdQul@I03u&A!1M(7B4a85gjIGSoYHY&j?6D09pzEpy+rt$ZV`}1 zl$qx&3J=MNz>Zt4%kIw&_@y8a{bAxxKj~;v3L2$;$zIHl{G>g#bqmctsZp%OlB$$0 z<Kc7sa~e0f)ku~cq%04pH3xIw0p^bV zG51OF_uIzL2-!_c=eNwBEHxh_~rr~II@jw&cwHd1;t~fA$N#RvY>FM zrF<-GOb0N9Z~vCu&K|9o_6~}G84)vTV)T8br^ni9;J~|{5j|x30ci2ic}{uuxKEkH zKg6l}=u;^Kt;$zRuczK$?AM%3IL*3Cw3KDZIxkThhZ;ELccGR&dX1pVoqi61+KYdk zR%pzJ-FFxKBB*EOhg`p4ah{_v;@2L{cqtZszAWC+HWG4% z;3KQP{$0ho(?N=az#D6%+uv1T)$vCp@mlf<{rL{|<=Cl{piJfSqta@ip|j`MxlT92 zQ#?IF$y^^vg1}OkYyKz5aSerm82XfnvZxdX5=je=nTw7YHYsxwpiy4YiXo(udN{a| zU6)#(b(NYH_8@*^LFGkdX(<*MceQsx^5iCO7!UWZw;POc@LhbO&mcx_bZ=lsGn7-F z@nG2lU>_SofDUTiqtrnoC``ZxL>s;?8boG?T)@~1l}&W=SHNjIh&19CLN9Au5-n6* zXQw@VT-cq~lI9DhqMg!-Svvl&!Q#Kcp~?vHf0rHn)5iX{;Pd~dubwxdZpa#9bmk90 Pz_Z(PpGUEK$nXCH@Ralg literal 0 HcmV?d00001 diff --git a/docs/official-docker-image.md b/docs/official-docker-image.md index a01a91a..e3362cb 100644 --- a/docs/official-docker-image.md +++ b/docs/official-docker-image.md @@ -1,5 +1,18 @@ # official docker image +## ymir1.3.0 + +support ymir1.1.0/1.2.0/1.3.0/2.0.0 + +``` +youdaoyzbx/ymir-executor:ymir1.3.0-yolov5-cu111-tmi +youdaoyzbx/ymir-executor:ymir1.3.0-yolov5-v6.2-cu111-tmi +youdaoyzbx/ymir-executor:ymir1.3.0-yolov5-cu111-modelstore +youdaoyzbx/ymir-executor:ymir1.3.0-mmdet-cu111-tmi +``` + +## ymir1.1.0 + - [yolov4](https://github.com/modelai/ymir-executor-fork#det-yolov4-training) ``` From 0f5e8437d9648ed0a595695a452126a4b6020cea Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 21 Oct 2022 09:47:40 +0800 Subject: [PATCH 152/204] update dockerfile --- det-yolov5-tmi/ymir/docker/cuda102.dockerfile | 4 ++-- det-yolov5-tmi/ymir/docker/cuda111.dockerfile | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/det-yolov5-tmi/ymir/docker/cuda102.dockerfile b/det-yolov5-tmi/ymir/docker/cuda102.dockerfile index d50072d..e972962 100644 --- a/det-yolov5-tmi/ymir/docker/cuda102.dockerfile +++ b/det-yolov5-tmi/ymir/docker/cuda102.dockerfile @@ -11,6 +11,7 @@ ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" ENV LANG=C.UTF-8 ENV YMIR_VERSION=${YMIR} +ENV YOLOV5_CONFIG_DIR='/app/data' # Install linux package RUN apt-get update && apt-get install -y gnupg2 git libglib2.0-0 \ @@ -29,8 +30,7 @@ RUN mkdir /img-man && mv /app/ymir/img-man/*-template.yaml /img-man/ \ # Download pretrained weight and font file RUN cd /app && bash data/scripts/download_weights.sh \ - && mkdir -p /root/.config/Ultralytics \ - && wget https://ultralytics.com/assets/Arial.ttf -O /root/.config/Ultralytics/Arial.ttf + && wget https://ultralytics.com/assets/Arial.ttf -O ${YOLOV5_CONFIG_DIR}/Arial.ttf # Make PYTHONPATH find local package ENV PYTHONPATH=. diff --git a/det-yolov5-tmi/ymir/docker/cuda111.dockerfile b/det-yolov5-tmi/ymir/docker/cuda111.dockerfile index f7d8538..a3108ca 100644 --- a/det-yolov5-tmi/ymir/docker/cuda111.dockerfile +++ b/det-yolov5-tmi/ymir/docker/cuda111.dockerfile @@ -13,6 +13,7 @@ ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" ENV LANG=C.UTF-8 ENV YMIR_VERSION=$YMIR +ENV YOLOV5_CONFIG_DIR='/app/data' # Install linux package RUN apt-get update && apt-get install -y gnupg2 git libglib2.0-0 \ @@ -32,8 +33,7 @@ RUN mkdir /img-man && mv /app/ymir/img-man/*-template.yaml /img-man/ # Download pretrained weight and font file RUN cd /app && bash data/scripts/download_weights.sh \ - && mkdir -p /root/.config/Ultralytics \ - && wget https://ultralytics.com/assets/Arial.ttf -O /root/.config/Ultralytics/Arial.ttf + && wget https://ultralytics.com/assets/Arial.ttf -O ${YOLOV5_CONFIG_DIR}/Arial.ttf # Make PYTHONPATH find local package ENV PYTHONPATH=. From 4520d9001edfdd9bfa272b045a78352d29e4a786 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 27 Oct 2022 10:05:48 +0800 Subject: [PATCH 153/204] update ymir2.0.0 docker hyper-parameters doc --- det-yolov5-tmi/train.py | 9 ++++++--- docs/official-docker-image.md | 35 ++++++++++++++++++++++++++++++++++- 2 files changed, 40 insertions(+), 4 deletions(-) diff --git a/det-yolov5-tmi/train.py b/det-yolov5-tmi/train.py index e8e794a..d1fdd8f 100644 --- a/det-yolov5-tmi/train.py +++ b/det-yolov5-tmi/train.py @@ -438,7 +438,7 @@ def lf(x): torch.save(ckpt, last) if best_fitness == fi: torch.save(ckpt, best) - write_ymir_training_result(ymir_cfg, map50=best_fitness, id='best', files=[str(best)]) + write_ymir_training_result(ymir_cfg, map50=best_fitness, id='yolov5_best', files=[str(best)]) if (not nosave) and (epoch > 0) and (opt.save_period > 0) and (epoch % opt.save_period == 0): torch.save(ckpt, w / f'epoch{epoch}.pt') weight_file = str(w / f'epoch{epoch}.pt') @@ -497,10 +497,13 @@ def lf(x): if nosave: # save best.pt and best.onnx - write_ymir_training_result(ymir_cfg, map50=best_fitness, id='best', files=[str(best), str(onnx_file)]) + write_ymir_training_result(ymir_cfg, + map50=best_fitness, + id='yolov5_best', + files=[str(best), str(onnx_file)]) else: # set files = [] to save all files in /out/models - write_ymir_training_result(ymir_cfg, map50=best_fitness, id='best', files=[]) + write_ymir_training_result(ymir_cfg, map50=best_fitness, id='yolov5_best', files=[]) torch.cuda.empty_cache() return results diff --git a/docs/official-docker-image.md b/docs/official-docker-image.md index e3362cb..9c8fe43 100644 --- a/docs/official-docker-image.md +++ b/docs/official-docker-image.md @@ -1,8 +1,41 @@ # official docker image +update: 2022/10/27 + +## the hyper-parameters for ymir-executor + +| docker images | epochs/iters | model structure | image size | batch_size | +| - | - | - | +| yolov5 | epochs | model | img_size | batch_size_per_gpu | +| mmdetection | max_epochs | config_file | - | samples_per_gpu | +| yolov4 | max_batches | - | image_height, image_width | batch | +| yolov7 | epochs | cfg_file | img_size | batch_size_per_gpu | +| nanodet | epochs | config_file | input_size | batch_size_per_gpu | +| vidt | epochs | backbone_name | eval_size | batch_size_per_gpu | +| detectron2 | max_iter | config_file | - | batch_size | + +- epochs: such as `epochs` or `max_epochs`, control the time for training. +- iters: such as `max_batches` or `max_iter`, control the time for training. +- ymir_saved_file_patterns: save the file match one of the pattern. for example `best.pt, *.yaml` will save `best.pt` and all the `*.yaml` file in `/out/model` directory. +- export_format: the dataset format for ymir-executor in `/in`, support `ark:raw` and `voc:raw` +- args_options/cfg_options: for yolov5, use it for other options, such as `--multi-scale --single-cls --optimizer SGD` and so on, view `train.py, parse_opt()` for detail. for mmdetection and detectron2, it provides methods to change other hyper-pameters not defined in `/img-man/training-template.yaml` + +## ymir2.0.0 + +2022/10/26: support ymir1.1.0/1.2.0/1.3.0/2.0.0 + +``` +youdaoyzbx/ymir-executor:ymir2.0.0-yolov5-cu111-tmi +youdaoyzbx/ymir-executor:ymir2.0.0-yolov7-cu111-tmi +youdaoyzbx/ymir-executor:ymir2.0.0-mmdet-cu111-tmi +youdaoyzbx/ymir-executor:ymir2.0.0-detectron2-cu111-tmi +youdaoyzbx/ymir-executor:ymir2.0.0-vidt-cu111-tmi +youdaoyzbx/ymir-executor:ymir2.0.0-yolov4-cu111-tmi # deprecated +``` + ## ymir1.3.0 -support ymir1.1.0/1.2.0/1.3.0/2.0.0 +2022/10/10: support ymir1.1.0/1.2.0/1.3.0/2.0.0 ``` youdaoyzbx/ymir-executor:ymir1.3.0-yolov5-cu111-tmi From ff580adb643becb910ca1bc4877def1facd0e392 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 1 Nov 2022 09:34:56 +0800 Subject: [PATCH 154/204] update docker file --- det-yolov5-tmi/ymir/docker/cuda102.dockerfile | 2 +- det-yolov5-tmi/ymir/docker/cuda111.dockerfile | 2 +- docs/official-docker-image.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/det-yolov5-tmi/ymir/docker/cuda102.dockerfile b/det-yolov5-tmi/ymir/docker/cuda102.dockerfile index e972962..94b5eaf 100644 --- a/det-yolov5-tmi/ymir/docker/cuda102.dockerfile +++ b/det-yolov5-tmi/ymir/docker/cuda102.dockerfile @@ -21,7 +21,7 @@ RUN apt-get update && apt-get install -y gnupg2 git libglib2.0-0 \ && rm -rf /var/lib/apt/lists/* # install ymir-exc sdk -RUN pip install "git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.0.0" +RUN pip install "git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.3.0" # Copy file from host to docker and install requirements COPY . /app diff --git a/det-yolov5-tmi/ymir/docker/cuda111.dockerfile b/det-yolov5-tmi/ymir/docker/cuda111.dockerfile index a3108ca..be05e87 100644 --- a/det-yolov5-tmi/ymir/docker/cuda111.dockerfile +++ b/det-yolov5-tmi/ymir/docker/cuda111.dockerfile @@ -24,7 +24,7 @@ RUN apt-get update && apt-get install -y gnupg2 git libglib2.0-0 \ COPY ./requirements.txt /workspace/ # install ymir-exc sdk and requirements -RUN pip install "git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.0.0" \ +RUN pip install "git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.3.0" \ && pip install -r /workspace/requirements.txt # Copy file from host to docker and install requirements diff --git a/docs/official-docker-image.md b/docs/official-docker-image.md index 9c8fe43..8df75e0 100644 --- a/docs/official-docker-image.md +++ b/docs/official-docker-image.md @@ -16,7 +16,7 @@ update: 2022/10/27 - epochs: such as `epochs` or `max_epochs`, control the time for training. - iters: such as `max_batches` or `max_iter`, control the time for training. -- ymir_saved_file_patterns: save the file match one of the pattern. for example `best.pt, *.yaml` will save `best.pt` and all the `*.yaml` file in `/out/model` directory. +- ymir_saved_file_patterns: save the file match one of the pattern. for example `best.pt, *.yaml` will save `best.pt` and all the `*.yaml` file in `/out/models` directory. - export_format: the dataset format for ymir-executor in `/in`, support `ark:raw` and `voc:raw` - args_options/cfg_options: for yolov5, use it for other options, such as `--multi-scale --single-cls --optimizer SGD` and so on, view `train.py, parse_opt()` for detail. for mmdetection and detectron2, it provides methods to change other hyper-pameters not defined in `/img-man/training-template.yaml` From a3959cc3f51274e66f927f14c469e5d3247ca604 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 1 Nov 2022 14:46:37 +0800 Subject: [PATCH 155/204] update doc --- docs/official-docker-image.md | 39 ++++++++++- docs/ymir-dataset-zh-CN.md | 127 ++++++++++++++++++++++++++++++++++ 2 files changed, 164 insertions(+), 2 deletions(-) create mode 100644 docs/ymir-dataset-zh-CN.md diff --git a/docs/official-docker-image.md b/docs/official-docker-image.md index 8df75e0..39363d1 100644 --- a/docs/official-docker-image.md +++ b/docs/official-docker-image.md @@ -1,11 +1,11 @@ # official docker image -update: 2022/10/27 +update: 2022/11/01 ## the hyper-parameters for ymir-executor | docker images | epochs/iters | model structure | image size | batch_size | -| - | - | - | +| - | - | - | - | - | | yolov5 | epochs | model | img_size | batch_size_per_gpu | | mmdetection | max_epochs | config_file | - | samples_per_gpu | | yolov4 | max_batches | - | image_height, image_width | batch | @@ -20,6 +20,39 @@ update: 2022/10/27 - export_format: the dataset format for ymir-executor in `/in`, support `ark:raw` and `voc:raw` - args_options/cfg_options: for yolov5, use it for other options, such as `--multi-scale --single-cls --optimizer SGD` and so on, view `train.py, parse_opt()` for detail. for mmdetection and detectron2, it provides methods to change other hyper-pameters not defined in `/img-man/training-template.yaml` +## docker image format + +youdaoyzbx/ymir-executor:[ymir-version]-[repository]-[cuda version]-[ymir-executor function] + +- ymir-version + - ymir1.1.0 + - ymir1.2.0 + - ymir1.3.0 + - ymir2.0.0 + +- repository + - yolov4 + - yolov5 + - yolov7 + - mmdet + - detectron2 + - vidt + - nanodet + +- cuda version + - cu101: cuda 10.1 + - cu102: cuda 10.2 + - cu111: cuda 11.1 + - cu112: cuda 11.2 + +- ymir-executor function + - t: training + - m: mining + - i: infer + - d: deploy + + + ## ymir2.0.0 2022/10/26: support ymir1.1.0/1.2.0/1.3.0/2.0.0 @@ -30,6 +63,8 @@ youdaoyzbx/ymir-executor:ymir2.0.0-yolov7-cu111-tmi youdaoyzbx/ymir-executor:ymir2.0.0-mmdet-cu111-tmi youdaoyzbx/ymir-executor:ymir2.0.0-detectron2-cu111-tmi youdaoyzbx/ymir-executor:ymir2.0.0-vidt-cu111-tmi +youdaoyzbx/ymir-executor:ymir2.0.0-nanodet-cu111-tmi +youdaoyzbx/ymir-executor:ymir2.0.0-yolov5-cu111-tmid # support deploy youdaoyzbx/ymir-executor:ymir2.0.0-yolov4-cu111-tmi # deprecated ``` diff --git a/docs/ymir-dataset-zh-CN.md b/docs/ymir-dataset-zh-CN.md new file mode 100644 index 0000000..8d57d1f --- /dev/null +++ b/docs/ymir-dataset-zh-CN.md @@ -0,0 +1,127 @@ +# ymir-executor 使用说明 + +更新日期: 2022-11-01 + +本文档面向使用或定制[ymir-executor](https://github.com/IndustryEssentials/ymir-executor)的用户 + + +## 外部数据集导入ymir-gui系统 + +- `<1G` 的数据集可以直接`本地导入`,将本地数据集压缩包上传到ymir系统中,数据集具体格式与voc类似,参考[ymir-cmd 准备外部数据](https://github.com/IndustryEssentials/ymir/blob/master/README_zh-CN.md#421-%E5%87%86%E5%A4%87%E5%A4%96%E9%83%A8%E6%95%B0%E6%8D%AE) + - [sample导入数据集](https://github.com/yzbx/ymir-executor-fork/releases/download/dataset/import_sample_dataset.zip) + + +- `>=1G` 的数据集可以通过`路径导入`,先将数据集复制到ymir工作目录下的子目录`ymir-sharing`,再输入相对路径导入 + + +## ymir系统与ymir-executor镜像的数据传输接口 + +- 参考[ymir 与功能性 docker container 数据传输接口](https://github.com/IndustryEssentials/ymir/blob/master/docs/ymir-cmd-container.md) + + - ymir会将`/in`与`/out`目录挂载到镜像中 + + - 镜像中需要自带`/img-man`目录,辅助ymir系统对镜像类型进行识别,并对超参数页面进行配置 + + - 镜像默认以`bash /usr/bin/start.sh`进行启动 + + - **注意所有 .tsv 和 .yaml 文件中出现的路径都是绝对路径** + +- [sample /in /out](https://github.com/yzbx/ymir-executor-fork/releases/download/dataset/sample_docker_input.zip) + + ![](images/sample_docker_input.png) + +- [sample /img-man](https://github.com/IndustryEssentials/ymir/tree/master/docker_executor/sample_executor/app) + + - 注意所有的`xxx-template.yaml`只能是一级`key:value`文件 + +### 索引文件 train-index.tsv / val-index.tsv / candidate-index.tsv + +- 每行由`图像的绝对路径` + `制表符` + `标注的绝对路径`构成 + +``` +{image_abs_path 1}\t{annotation_abs_path 1} +{image_abs_path 2}\t{annotation_abs_path 2} +... +``` + +- 注意 `candidate-index.tsv` 中只有 `图像的绝对路径` + +- 图像为常见的jpg, png格式 + +- 默认标注为`txt`格式,其中`class_id, xmin, ymin, xmax, ymax`均为整数, 所有标注格式介绍见[ymir输入镜像的标注格式](./docs/ymir-dataset-zh-CN.md#ymir输入镜像的标注格式) + +``` +class_id, xmin, ymin, xmax, ymax, bbox_quality +``` + + +### 超参数配置文件 config.yaml + +用户可以在超参数页面看到`xxx-template.yaml`的信息,而`config.yaml` 中的信息,是用户更改过后的。 + +- 对于训练任务,`config.yaml`提供training-template.yaml中的配置 + ymir-gui 用户自定义配置 + ymir默认配置 + +- 对于挖掘任务,`config.yaml`提供mining-template.yaml中的配置 + ymir-gui 用户自定义配置 + ymir默认配置 + +- 对于推理任务,`config.yaml`提供infer-template.yaml中的配置 + ymir-gui 用户自定义配置 + ymir默认配置 + +``` +class_names: # ymir默认配置 +- bowl +- cat +- bottle +- cup +- spoon +gpu_id: '0' # ymir默认配置 +pretrained_model_params: [] # ymir训练时可选默认配置 +model_params_path: [] # ymir推理/挖掘时默认配置 +task_id: t0000001000002ebb7f11653630774 # ymir默认配置 +img_size: 640 # 用户自定义配置 +model: yolov5n # 用户自定义配置 +batch_size: 16 # 用户自定义配置 +``` + +### ymir路径配置文件 env.yaml + +存放一些路径信息,以及当前进行的任务信息 + +- 是否进行训练任务: `run_training: true|false` + +- 是否进行推理任务:`run_infer: true|false` + +- 是否进行挖掘任务: `run_mining: true|false` + +``` +input: + annotations_dir: /in/annotations # 标注文件存放目录 + assets_dir: /in/assets # 图像文件存放目录 + candidate_index_file: '' # 挖掘索引文件 + config_file: /in/config.yaml # 超参配置文件 + models_dir: /in/models # 预训练模型存放目录 + root_dir: /in # 输入根目录 + training_index_file: /in/train-index.tsv # 训练索引文件 + val_index_file: /in/val-index.tsv # 验证索引文件 +output: + infer_result_file: /out/infer-result.json # 推理结果文件 + mining_result_file: /out/result.tsv # 挖掘结果文件 + models_dir: /out/models # 训练任务模型权重与信息等存放目录 + monitor_file: /out/monitor.txt # 任务进度文件 + root_dir: /out # 输出根目录 + tensorboard_dir: /out/tensorboard # tensorboard结果文件目录 + training_result_file: /out/models/result.yaml # 训练任务结果文件 +run_infer: false +run_mining: false +run_training: true +task_id: t0000001000002ebb7f11653630774 # 任务id +``` + +## ymir输入镜像的标注格式 + +常见的目标检测标注格式有 `voc` 与 `coco`, ymir 除自身格式, 目前还支持`voc`格式,可在超参数页面通过设置`export_format`对ymir导入镜像的数据格式进行修改。 + +### 默认数据格式 +- `export_format=ark:raw`, 标注文件为`xxx.txt` + +### voc 数据格式 + +- `export_format=voc:raw`, 标注文件为`xxx.xml` From cf4cdd73a5b1fd42db65dd3a2360a080f851f256 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 2 Nov 2022 11:20:45 +0800 Subject: [PATCH 156/204] update yolov5 --- det-yolov5-tmi/ymir/mining/data_augment.py | 1 + det-yolov5-tmi/ymir/mining/ymir_mining_cald.py | 11 ++++++++--- det-yolov5-tmi/ymir/start.py | 4 ++-- docs/ymir-dataset-zh-CN.md | 12 +++++++++++- 4 files changed, 22 insertions(+), 6 deletions(-) diff --git a/det-yolov5-tmi/ymir/mining/data_augment.py b/det-yolov5-tmi/ymir/mining/data_augment.py index d88a86d..595bfac 100644 --- a/det-yolov5-tmi/ymir/mining/data_augment.py +++ b/det-yolov5-tmi/ymir/mining/data_augment.py @@ -8,6 +8,7 @@ import cv2 import numpy as np from nptyping import NDArray + from ymir.ymir_yolov5 import BBOX, CV_IMAGE diff --git a/det-yolov5-tmi/ymir/mining/ymir_mining_cald.py b/det-yolov5-tmi/ymir/mining/ymir_mining_cald.py index a357e59..b2284b8 100644 --- a/det-yolov5-tmi/ymir/mining/ymir_mining_cald.py +++ b/det-yolov5-tmi/ymir/mining/ymir_mining_cald.py @@ -74,7 +74,10 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): pred = ymir_yolov5.forward(batch['image'].float().to(device), nms=True) if RANK in [-1, 0]: - write_ymir_monitor_process(ymir_cfg, task='mining', naive_stage_percent=0.3 * idx * batch_size_per_gpu / dataset_size, stage=YmirStage.TASK) + write_ymir_monitor_process(ymir_cfg, + task='mining', + naive_stage_percent=0.3 * idx * batch_size_per_gpu / dataset_size, + stage=YmirStage.TASK) preprocess_image_shape = batch['image'].shape[2:] for inner_idx, det in enumerate(pred): # per image result_per_image = [] @@ -102,13 +105,15 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): pin_memory=ymir_yolov5.pin_memory, drop_last=False) - # cannot sync here!!! dataset_size = len(results) monitor_gap = max(1, dataset_size // 1000 // batch_size_per_gpu) pbar = tqdm(aug_dataset_loader) if RANK == 0 else aug_dataset_loader for idx, batch in enumerate(pbar): if idx % monitor_gap == 0 and RANK in [-1, 0]: - write_ymir_monitor_process(ymir_cfg, task='mining', naive_stage_percent=0.3 + 0.7 * idx * batch_size_per_gpu / dataset_size, stage=YmirStage.TASK) + write_ymir_monitor_process(ymir_cfg, + task='mining', + naive_stage_percent=0.3 + 0.7 * idx * batch_size_per_gpu / dataset_size, + stage=YmirStage.TASK) batch_consistency = [0.0 for _ in range(len(batch['image_file']))] aug_keys = ['flip', 'cutout', 'rotate', 'resize'] diff --git a/det-yolov5-tmi/ymir/start.py b/det-yolov5-tmi/ymir/start.py index e0fea29..a1daa2f 100644 --- a/det-yolov5-tmi/ymir/start.py +++ b/det-yolov5-tmi/ymir/start.py @@ -73,8 +73,8 @@ def _run_training(cfg: edict) -> None: str(batch_size), '--data', f'{out_dir}/data.yaml', '--project', project, '--cfg', f'models/{model}.yaml', '--name', name, '--weights', weights, '--img-size', str(img_size), '--save-period', - str(save_period), '--device', device, - '--workers', str(num_workers_per_gpu) + str(save_period), '--device', device, '--workers', + str(num_workers_per_gpu) ]) if save_best_only: diff --git a/docs/ymir-dataset-zh-CN.md b/docs/ymir-dataset-zh-CN.md index 8d57d1f..ec9d60d 100644 --- a/docs/ymir-dataset-zh-CN.md +++ b/docs/ymir-dataset-zh-CN.md @@ -119,9 +119,19 @@ task_id: t0000001000002ebb7f11653630774 # 任务id 常见的目标检测标注格式有 `voc` 与 `coco`, ymir 除自身格式, 目前还支持`voc`格式,可在超参数页面通过设置`export_format`对ymir导入镜像的数据格式进行修改。 +``` +image format: ['raw', 'lmdb'] +annotation format: ["none", "det-ark", "det-voc", "det-ls-json", "seg-poly", "seg-mask"] +``` + ### 默认数据格式 -- `export_format=ark:raw`, 标注文件为`xxx.txt` + +- `export_format=ark:raw`, 标注文件为`xxx.txt` + +- `export_format=det-ark:raw`, 标注文件为`xxx.txt` ### voc 数据格式 - `export_format=voc:raw`, 标注文件为`xxx.xml` + +- `export_format=det-voc:raw`, 标注文件为`xxx.xml` From 5ccc40207104d0bd9036daeb1511096eec326cb5 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 2 Nov 2022 14:55:56 +0800 Subject: [PATCH 157/204] add dataset convert ad --- README.MD | 2 ++ README_zh-CN.MD | 2 ++ 2 files changed, 4 insertions(+) diff --git a/README.MD b/README.MD index 50ace8d..ee649b4 100644 --- a/README.MD +++ b/README.MD @@ -93,6 +93,8 @@ docker build -t ymir-executor/mmdet:cu111-tmi -f docker/Dockerfile.cuda111 . - [ymir-executor-sdk](https://github.com/modelai/ymir-executor-sdk) ymir-executor development SDK. + - [dataset convert](https://github.com/modelai/ymir-executor-sdk/blob/master/docs/dataset_convert.md) + - [ymir-executor-verifer](https://github.com/modelai/ymir-executor-verifier) debug and check your ymir-executor ## how to import pretrained model weights diff --git a/README_zh-CN.MD b/README_zh-CN.MD index 3579823..d1c287d 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -109,6 +109,8 @@ docker build -t ymir-executor/live-code:mxnet-tmi -f mxnet.dockerfile - [ymir-executor-sdk](https://github.com/modelai/ymir-executor-sdk) ymir镜像开发辅助库 + - [数据集转换](https://github.com/modelai/ymir-executor-sdk/blob/master/docs/dataset_convert.md) + - [ymir-executor-verifer](https://github.com/modelai/ymir-executor-verifier) 调试与检测 ymir-executor ## 如何导入预训练模型 From 2ff6f57722251ba87fe6a7fe3c64ed9bbb1f29f6 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 4 Nov 2022 12:20:50 +0800 Subject: [PATCH 158/204] add entropy mining --- det-mmdetection-tmi/mining_base.py | 2 +- det-mmdetection-tmi/start.py | 7 +- det-mmdetection-tmi/ymir_infer.py | 2 +- det-mmdetection-tmi/ymir_mining_cald.py | 12 +-- det-mmdetection-tmi/ymir_mining_entropy.py | 87 ++++++++++++++++++++++ 5 files changed, 95 insertions(+), 15 deletions(-) create mode 100644 det-mmdetection-tmi/ymir_mining_entropy.py diff --git a/det-mmdetection-tmi/mining_base.py b/det-mmdetection-tmi/mining_base.py index 27ba2f9..c357a80 100644 --- a/det-mmdetection-tmi/mining_base.py +++ b/det-mmdetection-tmi/mining_base.py @@ -2,7 +2,7 @@ from typing import List import torch -import torch.nn.functional as F +import torch.nn.functional as F # noqa from easydict import EasyDict as edict diff --git a/det-mmdetection-tmi/start.py b/det-mmdetection-tmi/start.py index 220d373..2911ef1 100644 --- a/det-mmdetection-tmi/start.py +++ b/det-mmdetection-tmi/start.py @@ -39,8 +39,8 @@ def _run_mining(cfg: edict) -> None: gpu_count = len(gpu_id.split(',')) mining_algorithm: str = cfg.param.get('mining_algorithm', 'aldd') - supported_mining_algorithm = ['cald', 'aldd', 'random'] - assert mining_algorithm in supported_mining_algorithm, f'unknown mining_algorithm {mining_algorithm}, not in {supported_mining_algorithm}' + supported_miner = ['cald', 'aldd', 'random', 'entropy'] + assert mining_algorithm in supported_miner, f'unknown mining_algorithm {mining_algorithm}, not in {supported_miner}' if gpu_count <= 1: command = f'python3 ymir_mining_{mining_algorithm}.py' else: @@ -67,7 +67,6 @@ def _run_infer() -> None: cfg = get_merged_config() os.environ.setdefault('YMIR_MODELS_DIR', cfg.ymir.output.models_dir) - os.environ.setdefault('COCO_EVAL_TMP_FILE', os.path.join( - cfg.ymir.output.root_dir, 'eval_tmp.json')) + os.environ.setdefault('COCO_EVAL_TMP_FILE', os.path.join(cfg.ymir.output.root_dir, 'eval_tmp.json')) os.environ.setdefault('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', 'python') sys.exit(start(cfg)) diff --git a/det-mmdetection-tmi/ymir_infer.py b/det-mmdetection-tmi/ymir_infer.py index bda229e..6d1f1ae 100644 --- a/det-mmdetection-tmi/ymir_infer.py +++ b/det-mmdetection-tmi/ymir_infer.py @@ -12,7 +12,7 @@ from mmdet.utils.util_ymir import get_best_weight_file from tqdm import tqdm from ymir_exc import dataset_reader as dr -from ymir_exc import env, monitor +from ymir_exc import env from ymir_exc import result_writer as rw from ymir_exc.util import YmirStage, get_merged_config, write_ymir_monitor_process diff --git a/det-mmdetection-tmi/ymir_mining_cald.py b/det-mmdetection-tmi/ymir_mining_cald.py index 65e6ff4..efb253f 100644 --- a/det-mmdetection-tmi/ymir_mining_cald.py +++ b/det-mmdetection-tmi/ymir_mining_cald.py @@ -11,14 +11,12 @@ import numpy as np import torch import torch.distributed as dist -from easydict import EasyDict as edict from mmcv.runner import init_dist from mmdet.apis.test import collect_results_gpu from mmdet.utils.util_ymir import BBOX, CV_IMAGE from nptyping import NDArray from scipy.stats import entropy from tqdm import tqdm -from ymir_exc import monitor from ymir_exc import result_writer as rw from ymir_exc.util import YmirStage, get_merged_config, write_ymir_monitor_process from ymir_infer import YmirModel @@ -250,11 +248,7 @@ def split_result(result: NDArray) -> Tuple[BBOX, NDArray, NDArray]: return bboxes, conf, class_id -class YmirMining(YmirModel): - - def __init__(self, cfg: edict): - super().__init__(cfg) - +class CALDMiner(YmirModel): def mining(self): with open(self.cfg.ymir.input.candidate_index_file, 'r') as f: images = [line.strip() for line in f.readlines()] @@ -276,7 +270,7 @@ def mining(self): beta = 1.3 mining_result = [] for idx, asset_path in enumerate(tbar): - if idx % monitor_gap == 0: + if idx % monitor_gap == 0 and RANK in [0, -1]: write_ymir_monitor_process(self.cfg, task='mining', naive_stage_percent=idx / N, stage=YmirStage.TASK) # batch-level sync, avoid 30min time-out error @@ -380,7 +374,7 @@ def main(): init_dist(launcher='pytorch', backend="nccl" if dist.is_nccl_available() else "gloo") cfg = get_merged_config() - miner = YmirMining(cfg) + miner = CALDMiner(cfg) gpu = max(0, LOCAL_RANK) device = torch.device('cuda', gpu) miner.model.to(device) diff --git a/det-mmdetection-tmi/ymir_mining_entropy.py b/det-mmdetection-tmi/ymir_mining_entropy.py new file mode 100644 index 0000000..dc18ee3 --- /dev/null +++ b/det-mmdetection-tmi/ymir_mining_entropy.py @@ -0,0 +1,87 @@ +""" +entropy mining +""" +import os +import sys + +import cv2 +import numpy as np +import torch +import torch.distributed as dist +from mmcv.runner import init_dist +from mmdet.apis.test import collect_results_gpu +from tqdm import tqdm +from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, get_merged_config, write_ymir_monitor_process +from ymir_mining_cald import split_result, CALDMiner + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + + +class EntropyMiner(CALDMiner): + + def mining(self): + with open(self.cfg.ymir.input.candidate_index_file, 'r') as f: + images = [line.strip() for line in f.readlines()] + + max_barrier_times = len(images) // WORLD_SIZE + if RANK == -1: + N = len(images) + tbar = tqdm(images) + else: + images_rank = images[RANK::WORLD_SIZE] + N = len(images_rank) + if RANK == 0: + tbar = tqdm(images_rank) + else: + tbar = images_rank + + monitor_gap = max(1, N // 100) + mining_result = [] + for idx, asset_path in enumerate(tbar): + if idx % monitor_gap == 0 and RANK in [0, -1]: + write_ymir_monitor_process(self.cfg, task='mining', naive_stage_percent=idx / N, stage=YmirStage.TASK) + # batch-level sync, avoid 30min time-out error + if WORLD_SIZE > 1 and idx < max_barrier_times: + dist.barrier() + + img = cv2.imread(asset_path) + # xyxy,conf,cls + result = self.predict(img) + bboxes, conf, _ = split_result(result) + if len(result) == 0: + # no result for the image without augmentation + mining_result.append((asset_path, -10)) + continue + conf = conf.data.cpu().numpy() + mining_result.append((asset_path, -np.sum(conf * np.log2(conf)))) + + if WORLD_SIZE > 1: + mining_result = collect_results_gpu(mining_result, len(images)) + + return mining_result + + +def main(): + if LOCAL_RANK != -1: + init_dist(launcher='pytorch', backend="nccl" if dist.is_nccl_available() else "gloo") + + cfg = get_merged_config() + miner = EntropyMiner(cfg) + gpu = max(0, LOCAL_RANK) + device = torch.device('cuda', gpu) + miner.model.to(device) + mining_result = miner.mining() + + if RANK in [0, -1]: + rw.write_mining_result(mining_result=mining_result) + + write_ymir_monitor_process(cfg, task='mining', naive_stage_percent=1, stage=YmirStage.POSTPROCESS) + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) From 6619220694d098dd7a8bb32a3dfb09565ca19fff Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 8 Nov 2022 19:37:15 +0800 Subject: [PATCH 159/204] add yolov5 training doc --- .../mmdet/core/evaluation/eval_hooks.py | 35 +++++++++++-------- det-yolov5-tmi/ymir/README.md | 29 +++++++++++++-- 2 files changed, 46 insertions(+), 18 deletions(-) diff --git a/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py b/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py index 735049d..cf07e5b 100644 --- a/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py +++ b/det-mmdetection-tmi/mmdet/core/evaluation/eval_hooks.py @@ -16,11 +16,9 @@ def _calc_dynamic_intervals(start_interval, dynamic_interval_list): assert mmcv.is_list_of(dynamic_interval_list, tuple) dynamic_milestones = [0] - dynamic_milestones.extend( - [dynamic_interval[0] for dynamic_interval in dynamic_interval_list]) + dynamic_milestones.extend([dynamic_interval[0] for dynamic_interval in dynamic_interval_list]) dynamic_intervals = [start_interval] - dynamic_intervals.extend( - [dynamic_interval[1] for dynamic_interval in dynamic_interval_list]) + dynamic_intervals.extend([dynamic_interval[1] for dynamic_interval in dynamic_interval_list]) return dynamic_milestones, dynamic_intervals @@ -52,7 +50,10 @@ def after_train_epoch(self, runner): if self.by_epoch: monitor_interval = max(1, runner.max_epochs // 1000) if runner.epoch % monitor_interval == 0: - write_ymir_monitor_process(self.ymir_cfg, task='training', naive_stage_percent=runner.epoch / runner.max_epochs, stage=YmirStage.TASK) + write_ymir_monitor_process(self.ymir_cfg, + task='training', + naive_stage_percent=runner.epoch / runner.max_epochs, + stage=YmirStage.TASK) super().after_train_epoch(runner) def before_train_iter(self, runner): @@ -63,7 +64,10 @@ def after_train_iter(self, runner): if not self.by_epoch: monitor_interval = max(1, runner.max_iters // 1000) if runner.iter % monitor_interval == 0: - write_ymir_monitor_process(self.ymir_cfg, task='training', naive_stage_percent=runner.ite / runner.max_iters, stage=YmirStage.TASK) + write_ymir_monitor_process(self.ymir_cfg, + task='training', + naive_stage_percent=runner.iter / runner.max_iters, + stage=YmirStage.TASK) super().after_train_iter(runner) def _do_evaluate(self, runner): @@ -119,7 +123,10 @@ def after_train_epoch(self, runner): if self.by_epoch and runner.rank == 0: monitor_interval = max(1, runner.max_epochs // 1000) if runner.epoch % monitor_interval == 0: - write_ymir_monitor_process(self.ymir_cfg, task='training', naive_stage_percent=runner.epoch / runner.max_epochs, stage=YmirStage.TASK) + write_ymir_monitor_process(self.ymir_cfg, + task='training', + naive_stage_percent=runner.epoch / runner.max_epochs, + stage=YmirStage.TASK) super().after_train_epoch(runner) def before_train_iter(self, runner): @@ -130,7 +137,10 @@ def after_train_iter(self, runner): if not self.by_epoch and runner.rank == 0: monitor_interval = max(1, runner.max_iters // 1000) if runner.iter % monitor_interval == 0: - write_ymir_monitor_process(self.ymir_cfg, task='training', naive_stage_percent=runner.iter / runner.max_iters, stage=YmirStage.TASK) + write_ymir_monitor_process(self.ymir_cfg, + task='training', + naive_stage_percent=runner.iter / runner.max_iters, + stage=YmirStage.TASK) super().after_train_iter(runner) def _do_evaluate(self, runner): @@ -143,8 +153,7 @@ def _do_evaluate(self, runner): if self.broadcast_bn_buffer: model = runner.model for name, module in model.named_modules(): - if isinstance(module, - _BatchNorm) and module.track_running_stats: + if isinstance(module, _BatchNorm) and module.track_running_stats: dist.broadcast(module.running_var, 0) dist.broadcast(module.running_mean, 0) @@ -156,11 +165,7 @@ def _do_evaluate(self, runner): tmpdir = osp.join(runner.work_dir, '.eval_hook') from mmdet.apis import multi_gpu_test - results = multi_gpu_test( - runner.model, - self.dataloader, - tmpdir=tmpdir, - gpu_collect=self.gpu_collect) + results = multi_gpu_test(runner.model, self.dataloader, tmpdir=tmpdir, gpu_collect=self.gpu_collect) if runner.rank == 0: print('\n') runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) diff --git a/det-yolov5-tmi/ymir/README.md b/det-yolov5-tmi/ymir/README.md index 1936a93..49839b7 100644 --- a/det-yolov5-tmi/ymir/README.md +++ b/det-yolov5-tmi/ymir/README.md @@ -1,12 +1,35 @@ # yolov5-ymir readme -- [yolov5 readme](./README_yolov5.md) +update 2022/11/08 + +## build your ymir-executor ``` -docker build -t ymir/ymir-executor:ymir1.1.0-cuda102-yolov5-tmi --build-arg SERVER_MODE=dev --build-arg YMIR=1.1.0 -f cuda102.dockerfile . +docker build -t your/ymir-executor:ymir2.0.0-cuda102-yolov5-tmi -f cuda102.dockerfile . + +docker build -t your/ymir-executor:ymir2.0.0-cuda111-yolov5-tmi -f cuda111.dockerfile . -docker build -t ymir/ymir-executor:ymir1.1.0-cuda111-yolov5-tmi --build-arg SERVER_MODE=dev --build-arg YMIR=1.1.0 -f cuda111.dockerfile . +docker build -t your/ymir-executor:ymir2.0.0-yolov5-cpu-tmi -f cpu.dockerfile . ``` +## training + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| shm_size | 128G | 字符串:str | docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | +| export_format | ark:raw | 字符串:str | ymir数据集导出格式 | - | +| model | yolov5s | 字符串:str | yolov5模型,可选yolov5n, yolov5s, yolov5m, yolov5l等 | 建议:速度快选yolov5n, 精度高选yolov5l, yolov5x, 平衡选yolov5s或yolov5m | +| batch_size_per_gpu | 16 | 整数:int | 每张GPU一次处理的图片数量 | 建议大小:显存占用<50% 可增加2倍加快训练速度 | +| num_workers_per_gpu | 4 | 整数:int | 每张GPU对应的数据读取进程数 | - | +| epochs | 100 | 整数:int | 整个数据集的训练遍历次数 | 建议:必要时分析tensorboard确定是否有必要改变,一般采用默认值即可 | +| img_size | 640 | 整数: int | 输入模型的图像分辨率 | - | +| opset | 11 | 整数: int | onnx 导出参数 opset | 建议:一般不需要用到onnx,不必改 | +| args_options | '--exist-ok' | 字符串:str | yolov5命令行参数 | 建议:专业用户可用yolov5所有命令行参数 | +| save_best_only | True | 布尔: bool | 是否只保存最优模型 | 建议:为节省空间设为True即可 | +| save_period | 10 | 整数: int | 保存模型的间隔 | 建议:当save_best_only为False时,可保存 `epoch/save_period` 个中间结果 +| sync_bn | False | 布尔: bool | 是否同步各gpu上的归一化层 | 建议:开启以提高训练稳定性及精度 | +| ymir_saved_file_patterns | '' | 字符串: str | 用 `,` 分隔的保存文件模式 | 建议:专业用户当希望过滤保存的文件以节省空间时,可设置配置的正则表达式 | + ## main change log - add `start.py` and `ymir/ymir_yolov5.py` for train/infer/mining From f823f8485bb701251fe16f7b7d5c7c25f35f53a7 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 10 Nov 2022 15:51:09 +0800 Subject: [PATCH 160/204] update doc --- README.MD | 4 + README_zh-CN.MD | 4 + det-yolov5-tmi/ymir/README.md | 36 ++++++- docs/README.MD | 41 ++++++++ docs/docker-image-debug.md | 139 ++++++++++++++++++++++++++++ docs/failed_tensorboard_task_id.png | Bin 0 -> 40000 bytes docs/failed_training_task.png | Bin 0 -> 75693 bytes docs/hyper-parameter.md | 101 ++++++++++++++++++++ docs/ymir-executor-version.md | 6 +- 9 files changed, 326 insertions(+), 5 deletions(-) create mode 100644 docs/README.MD create mode 100644 docs/docker-image-debug.md create mode 100644 docs/failed_tensorboard_task_id.png create mode 100644 docs/failed_training_task.png create mode 100644 docs/hyper-parameter.md diff --git a/README.MD b/README.MD index ee649b4..2566d9b 100644 --- a/README.MD +++ b/README.MD @@ -2,10 +2,14 @@ - [ymir](https://github.com/IndustryEssentials/ymir) + - [bilibili: video tutorial](https://b23.tv/KS5b5oF) + - [wiki](https://github.com/modelai/ymir-executor-fork/wiki) - [ymir executor](./docs/official-docker-image.md) + - [user survey](https://www.wjx.cn/vm/eKFm2aq.aspx#) + - [ymir mining algorithm](./docs/mining-images-overview.md) ## overview diff --git a/README_zh-CN.MD b/README_zh-CN.MD index d1c287d..7c23027 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -2,10 +2,14 @@ - [ymir](https://github.com/IndustryEssentials/ymir) + - [bilibili 视频教程](https://b23.tv/KS5b5oF) + - [说明文档](https://github.com/modelai/ymir-executor-fork/wiki) - [ymir镜像](./docs/official-docker-image.md) + - [用户调查](https://www.wjx.cn/vm/eKFm2aq.aspx#) + - [ymir 挖掘算法](./docs/mining-images-overview.md) ## 比较 diff --git a/det-yolov5-tmi/ymir/README.md b/det-yolov5-tmi/ymir/README.md index 49839b7..3a2a7bb 100644 --- a/det-yolov5-tmi/ymir/README.md +++ b/det-yolov5-tmi/ymir/README.md @@ -11,13 +11,27 @@ docker build -t your/ymir-executor:ymir2.0.0-cuda111-yolov5-tmi -f cuda111.docke docker build -t your/ymir-executor:ymir2.0.0-yolov5-cpu-tmi -f cpu.dockerfile . ``` -## training +## 训练: training + +### 性能表现 + +### 训练参数说明 + +- 一些参数由ymir后台生成,如 `gpu_id`, `class_names` 等参数 + - `gpu_id`: + - `task_id`: + - `model_params_path`: + - `class_names`: + +- 一些参数由ymir后台进行处理,如 `shm_size`, `export_format`, 其中 `shm_size` 影响到docker镜像所能使用的共享内存,若过小会导致 `out of memory` 等错误。 `export_format` 会决定docker镜像中所看到数据的格式 + + | 超参数 | 默认值 | 类型 | 说明 | 建议 | | - | - | - | - | - | | hyper-parameter | default value | type | note | advice | -| shm_size | 128G | 字符串:str | docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | -| export_format | ark:raw | 字符串:str | ymir数据集导出格式 | - | +| shm_size | 128G | 字符串:str | 受ymir后台处理,docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | +| export_format | ark:raw | 字符串:str | 受ymir后台处理,ymir数据集导出格式 | - | | model | yolov5s | 字符串:str | yolov5模型,可选yolov5n, yolov5s, yolov5m, yolov5l等 | 建议:速度快选yolov5n, 精度高选yolov5l, yolov5x, 平衡选yolov5s或yolov5m | | batch_size_per_gpu | 16 | 整数:int | 每张GPU一次处理的图片数量 | 建议大小:显存占用<50% 可增加2倍加快训练速度 | | num_workers_per_gpu | 4 | 整数:int | 每张GPU对应的数据读取进程数 | - | @@ -30,6 +44,22 @@ docker build -t your/ymir-executor:ymir2.0.0-yolov5-cpu-tmi -f cpu.dockerfile . | sync_bn | False | 布尔: bool | 是否同步各gpu上的归一化层 | 建议:开启以提高训练稳定性及精度 | | ymir_saved_file_patterns | '' | 字符串: str | 用 `,` 分隔的保存文件模式 | 建议:专业用户当希望过滤保存的文件以节省空间时,可设置配置的正则表达式 | +### 训练结果文件示例 +``` + +``` + +## 推理: infer + +### 推理参数说明 + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | + + +### 推理结果文件示例 + ## main change log - add `start.py` and `ymir/ymir_yolov5.py` for train/infer/mining diff --git a/docs/README.MD b/docs/README.MD new file mode 100644 index 0000000..8c664a0 --- /dev/null +++ b/docs/README.MD @@ -0,0 +1,41 @@ +# ymir-executor 文档 + +## 下载使用 + +- [已有的镜像](./official-docker-image.md) + +- [挖掘算法评测](./mining-images-overview.md) + +## 从零定制流程 + +1. [制作一个简单的镜像](../det-demo-tmi/README.md) + +2. [了解ymir接口与数据结构](./ymir-dataset-zh-CN.md) + +## 基于已有镜像进行定制流程 + +- [增/删/改: 默认超参数](./hyper-parameter.md) + +## 镜像调试流程 + +- [交互式调试](./docker-image-debug.md) + +- [通过ymir-executor-verifier进行镜像校验](https://github.com/modelai/ymir-executor-verifier) + +## 其它 + +- [ymir镜像开发SDK](https://github.com/modelai/ymir-executor-sdk) + + - [读取配置与数据](https://github.com/modelai/ymir-executor-sdk/blob/master/docs/read.md) + + - [写进度与结果文件](https://github.com/modelai/ymir-executor-sdk/blob/master/docs/write.md) + + - [数据集格式转换](https://github.com/modelai/ymir-executor-sdk/blob/master/docs/dataset_convert.md) + +- [ymir镜像调试工具](https://github.com/modelai/ymir-executor-verifier) + + - 样例数据下载 + + - 交互式调试 + +- [ymir版本与接口兼容](./ymir-executor-version.md) diff --git a/docs/docker-image-debug.md b/docs/docker-image-debug.md new file mode 100644 index 0000000..d5a488c --- /dev/null +++ b/docs/docker-image-debug.md @@ -0,0 +1,139 @@ +# docker 镜像调试 + +假设所有本地代码放在 $HOME/code 下, 以 `youdaoyzbx/ymir-executor:ymir2.0.0-yolov5-cu111-tmi` 为例 + +## 基于 voc dog 数据集进行调试 + +1. 下载数据集并进行预处理, [参考脚本](https://github.com/modelai/ymir-executor-verifier/blob/main/start.sh) + +``` +set -e + +echo "download voc dog dataset" +wget https://github.com/modelai/ymir-executor-fork/releases/download/dataset/voc_dog_debug_sample.zip -O voc_dog_debug_sample.zip + +echo "unzip voc dog dataset" +unzip voc_dog_debug_sample.zip + +echo "generate candidate-index.tsv for mining and infer" +cd voc_dog/in && cat val-index.tsv | awk '{print $1}' > candidate-index.tsv + +echo "download env.yaml" +wget https://raw.githubusercontent.com/modelai/ymir-executor-verifier/main/tests/configs/env.yaml -O env.yaml + +echo "download demo yolov5 config.yaml" +wget https://raw.githubusercontent.com/modelai/ymir-executor-verifier/main/tests/configs/config.yaml +``` + +得到以下目录,注意根据训练镜像修改 `config.yaml` + +``` +voc_dog +├── in +│   ├── annotations [标注文件夹] +│   │   ├── train [ymir后台不会按照train/val进行文件夹划分,请按train-index.tsv获取训练集] +│   │   └── val [ymir后台不会按照train/val进行文件夹划分,请按val-index.tsv获取测试集] +│   ├── assets [图片文件夹] +│   │   ├── train [ymir后台不会按照train/val进行文件夹划分,请按train-index.tsv获取训练集] +│   │   └── val [ymir后台不会按照train/val进行文件夹划分,请按val-index.tsv获取训练集] +│   ├── candidate-index.tsv +│   ├── config.yaml [ymir后台提供的超参数文件] +│   ├── env.yaml [ymir后台提供的路径文件] +│   ├── models [ymir后台提供的预训练模型存放目录] +│   ├── train-index.tsv +│   └── val-index.tsv +└── out [12 entries exceeds filelimit, not opening dir] + +9 directories, 5 files +``` + +2. 交互式启动镜像并运行 `$HOME/code/start.py` 文件进行调试 + +``` +docker run -it --gpus all --shm-size 128G -v $PWD/voc_dog/in:/in -v $PWD/voc_dog/out:/out -v $HOME/code:/code youdaoyzbx/ymir-executor:ymir2.0.0-yolov5-cu111-tmi bash + +cd /code +python start.py +``` + +## 基于 ymir 发起的训练任务进行调试 + +假设某个训练任务失败了, 可通过tensorboard获取对应的 task_id 与 ymir-workspace. + +1. 进入项目的模型管理,跳转到失败任务的训练过程 +![](./failed_training_task.png) + +2. 获取失败任务的 `task_id: t0000001000002dbc2291666595529` 与 ymir后台工作目录 `ymir-workspace: /home/intellif/backup/ymir/ymir-workplace` +![](./failed_tensorboard_task_id.png) + +3. 进行失败任务的目录 +``` +YMIR_WORKDIR=/home/intellif/backup/ymir/ymir-workplace +TASK_ID=t0000001000002dbc2291666595529 +cd $YMIR_WORKDIR/sandbox/work_dir/TaskTypeTraining/$TASK_ID/sub_task/$TASK_ID +ls + +# 将输出 in out task_config.yaml +``` + +4. 其中 in 有以下目录结构: +``` +in +├── annotations +│   └── index.tsv +├── assets -> /home/intellif/backup/ymir/ymir-workplace/sandbox/0001/asset_cache +├── config.yaml +├── env.yaml +├── models +├── prediction +├── predictions +│   └── index.tsv +├── pred-test-index.tsv +├── pred-train-index.tsv +├── pred-val-index.tsv +├── test-index.tsv +├── train-index.tsv +└── val-index.tsv +``` + +5. 其中 out 有以下文件: + +``` +out +├── monitor.txt +├── tensorboard -> /home/intellif/backup/ymir/ymir-workplace/ymir-tensorboard-logs/0001/t0000001000002dbc2291666595529 +└── ymir-executor-out.log +``` + +6. 交互式启动镜像进行调试 + +注:其中`in/assets` 为软链接,为确保在镜像中该软链接有效,需要将 `ymir-workspace` 挂载到镜像中对应位置 + +``` +docker run -it --gpus all --shm-size 128G -v $PWD/in:/in -v $PWD/out:/out -v $YMIR_WORKDIR:$YMIR_WORKDIR -v $HOME/code:/code youdaoyzbx/ymir-executor:ymir2.0.0-yolov5-cu111-tmi bash +``` + +## 调试完成后构建新镜像 + +- 准备 `zzz.dockerfile` + +``` +FROM youdaoyzbx/ymir-executor:ymir2.0.0-yolov5-cu111-tmi + +# 当$HOME/code目录下的代码文件复制到/app +WORKDIR /app +COPY . /app/ + +# 如果更新了超参数配置文件,复制到/img-man +# COPY ./img-man/*.yaml /img-man/ + +# 如果改变了入口函数, 对应修改 /usr/bin/start.sh的内容 +RUN echo "python3 /app/start.py" > /usr/bin/start.sh +CMD bash /usr/bin/start.sh +``` + +- 进行构建 + +``` +docker build -t youdaoyzbx/ymir-executor:ymir2.0.1-yolov5-cu111-tmi . -f zzz.dockerfile +``` diff --git a/docs/failed_tensorboard_task_id.png b/docs/failed_tensorboard_task_id.png new file mode 100644 index 0000000000000000000000000000000000000000..e34d89950c5b8fa480c7d4efdc0bb718ae6b1374 GIT binary patch literal 40000 zcmdqI2~<*R*f82`qt>akG^OD*nwpiff~9F?=8#h=Qjs~LIe;T@tW#Dd)Je-S=Nt)1 z1x!)wOoZ{HoHneG`g{b$;jU z&#hWjhTXDwbG_nv)7^{LBUY_au3!GGriB*WUbQNne#!crOSBKaRyKW@fn?Jbm0Y`* z{#v6iD%`cTD_3VDar`_e*y|tregWuP##4${pP>pjO*u<=Tvtnjxw(`I=2d%GWy5ula_}c<*)Pn=gR2R$%BgH zR^9SN^3aEsk&iDelhTSiHCEo+!B_@k5L>_e+`(bbWhgeG9=}kt%f8UkGrrC2N0J^Y zZ@!PVbZI6NBv<=u*0`inFAF898kR36%A-tsAxm~qYG}?{GzgZ&U4b;Uf{#sT9}q2c zN9#Oj0?9!c4p`94WAt;aX2TdQenrx6-iN;oMa+b%)y-%yy7YgP1t02J!D5%z$qwqm z7=GA{y%6GISlD%R!pp_*fl(+bX!qGA_V93nT|<^!AdhfXfI9xd)Bz+5o>7s1ZzqWw zx=+W}+gwiT*{>aSdsG+yazfMVxO5^b7BaKYjxH{nE}+Udx~i30)R$WF#dhSW^&Svg zGV)s&I|#<^mCu7D4)v&J5n9Y%fL@9jaM#7`m0IDdB!Hy>`L36b$rg~%BG7ZRAarT^ zQXR6J%BrSX?0um#t&t^!SS)_MO_WW8my}3UZ{nzvhSg|EiY?2vU%Z4~l>g~#A{o!i z0_^KfxeR&d0B!m)qZhxJRX{sEjpV=pO)(a8`nR8!U|^e;hNhR!aVr*|HoYo%>ldeM zsKZ!rlm*2WDS&qJLKJ_~I|#uHSzb`D<3ee~Sr=Uzany>SBUc{~%1%W1VW&BrS@r&y z$vwH@u9AMbJIDd0^{`21JQ>v5UN$bS^hGHzmR&Hz`hfA@Q~GZlS=|8? zpL5c(<$D-dBh~|tzW`qFEyr#vqk^8B8Uieh_!J9F&x|aEBk{;m5!!<~CKGdx0UjaJ zMV# z&LM`XJw2_KXg&m_i<1?kRhiJ`To5JsxQi^l6OLPrE~)2E>u01Z0A}*i;$`}yxpSAu zEJg}fdJ+q(&6x#r{I67D9J1_?X0|>VO87(kenUgL$RUem@=OFn|ESWu-y;tj+0BiXeHRu>N%4Lf@TUgXVW)Ao&Vht}dB_8-lhzwD8DzpJv? z3ct_ce95}rBJcR0f1^Fwq{AJz<{pwRr=|R18Yyl&0r$-UQ-2G#g1MB^9yK+dIB@m+ zs|{z(8mTaex%2q~6>JIlDDCOz%wQhVJw*n4)N?9?S41Wtk>wctQDr)xm#*vlqfCA` zc@FT+nT@i1BC-lE^3JlW&1v{M4aynoNGouY{HF6EH$sndfp@tIyhrnMk&3hjJ6NY2 z;l?gZ1S&nJynQup!EL_}U^&J_4g~`eoAQAD$cIfA_*k~CWa-Iit$xD-?-UxPrPDFy zT}IwTcB7-rUKsI>L=z*+Mmq&X1S(ZQmO13lrZ9fY2b+;pn22-~DI)0c9IM=Ezk~Qt zNQn4rf9(e5;Xkj#n;f+vhkGdmr>bM{Nyf8H#67h)@#_9Yn+n6-T}6+l-iPG=+5c)u zP`CY|=cZuF22a=eti>J}IN=Jp-*G~7=VrsU9QbX-ilOU$l-KWw4}yTiP?+>3Ec=ojcuPZI9o7KeF@xu#Mf$+ z($?u%HF!rj%55bkMJFku#V9|_PXnSQbD z!))}p$8cJ-In6h}I6F zz6D7UfSBMcyDHf!29-C%_xMr9Cjcwqxlpd5W)iTHmI`DcC)0>}o)9sX+ef_^w`81~ zaR{d{LtQWVNiX{mlVOWt!m^%9>AZdulmcO=g044n2Y($u&+>LsZf~a>`pt%idSg(U_r!i7Hed}5UGpJGwB+!X@{pzY6M7+ zNiTtsrO+Nui59fTTg$F$y27H(u?l8KqG_I}WC{wI(CZ|K)hL+Oh2ncoRVGzqGtH2? zAQHyB68_FY;eUj+Ly_soQVFuw`3U?++1%fv3qqMwUYT87xjpTGf@`UEug%p+-qOM* zL$<9er!Pb>(go}@?ac`%^so`i5s`Wch{reGG7k1MWiY6eibv0v4KwHpwotk!#3B?8c+@8HVKTA?%d8as5ugzb^DtKiW@0groO3i%u z&v-NyW57*{I@W=ip!Dv@uPwrq$V7s@dy(TwqO~k^#|3Gh3O8p~pLHRiUQczA?2ndp zkVi#p2d27(tM!-<{-9$VP?8Z_+Acn7)`M9&nmc(^D&^-6f=nM~X*kh1tf%C!5&Rd# zeA;i)vE!y8ub8385ZbPQ=F!4I_c?w{{uO3NETS#nLh8-!=YXMP){n9*+W;%hA#N5K z*Pc&328V;Z+O;5=(uH#5qp>fHGOVjvd#AuYMK)=iYqDLq$7*!enEOBSgS$}9nk;+I zB;?GNLIdg$I%1ZPrIxkaKadsa^J>5%8po@9f6_DIexXu0ORc zEcegzn<9YLO^D;uTmEz_=I{bNNGxlGnlo)Rt{50ICC`xwh;C|<6U3!o*Ct}`aX`qD z6&nYLmY6ebfm=y!%c^v%2V?$${bYB5DZF2zr?o>x0T;OXXM z!LYQ;NYA-o)DuFIrO;3iSG5nhAr-RDhBAU5ZypAYxJ^?feWy)jh2~CMG0Z3QA2yAK z_4viT%?W3{48)qbgpj78bgkwRnfi%o)0d(kMmWVj(+&*Uz)u<+gWPwDBVTm~)i~40 zyws74Rnyoa>60$CVhwtdglPaMM_!4EoRT)a_a&oEAtk*NZ?+s4(1?gsyDSnCa)jlo z331U|W(Oqtd&92Hqahp|0Tq#wSaawE%$D{v1!%|i-Rk5ez1wZ6Er($j+h8sa3!!rl zg-r_X$i)UHl2YVU+ohQ!Cp2&aNMX*!&+p|{P6`GQw1O-(c7Bex*p>>NN-1Hj^W22( z%;?moO>HK!=DN94{I8=@86f;pd$6OJa_!V_I9x8nuE?B4j`Lu~W&+$K?JC z4@HBR{u3kJ>#ZXfOQ|f`J8$lSY3vm~77vx~TxJE5?~xzs>Ak+A;+e}2gZ`iz4>Es{$|`pYiB=F6m< zYI{*K5jltK2pf-ZzQaCpeWrl^cO{fJ|i z>+&-P3v5^<@WS!-d<&2R88ZnGTG6XJy^EZO7!TdC{7PG7^0E@QJH{}org@=6g-QMW z#Lh$}wrMnm4fHA2XuXM<;z#)#^k|*lGsP;F)J(WzMx#O$>ao>4E=_jhNNSorhc6s6v=II1r}ePH3=XEi61v^ z7rOJlI>eMF75Y=(AqS^eBIXjj^@TrHM}lJMZ%Z~gA!bq|ldnlTqEOZu+AK8gr7hnd zM8Wvdfzp`^d|O!=>)p1PkY=PN=&DX7E7ue%9NCVnll47p;>Wnc{xdjABuoF#kx|vt zPN0~%;4A|=)38YYw{FLi##~ZGvY7E}m_gUNAoHd9a(mXE6IhHN4RD+q8UVyq$|O$_c+Y$|aDxqbHM6K>H6%t$mlha^J;;T8}C_mY9Lbzrqck zm6E}T^`4y%J!Kipq9Kn_W2cW5t943tuD^hr1U%>{wdC=nobdQK^KJRy(ZHVe94d?K z+d{1qc`Sv%-o=0Ws#M6<1>8na;y*IiKTeUVxWwoJ{+u=T{AX(_PC;)3I;clYzFL>o zx&C|)b?^wBKhGbwlERv0Gow|It^QI3h3&xW({BocXN?=rdZU^I)UbZT(W*gJp*vml zuCy(odcr*5HM>-%aJ*_dud>KjvRJeJrTl1E90KTCj6?qPML=AcACSK(yhtCA0Tk08 z`(%~dXEsCm_uFtU1FAF$>;f7?bOEH&84>60)CAK=FGBugE$YLhR--80$f3rUNweOZ zuMEJoQVA6fdo|N%qIrT1$0(FwrpEaW?R~_J%1auEOzW5sRw}ef;ozm0V(FXEUO`Y9EkDF#9pALNhHvwWL%gJc8*p+;#zZAXoGJgv(SB2!1QD$?K z9^wvreJrQrB~JZ3a=)uDMdcB>r4mY_?LhV)N$9OS%+-fwliFQbnH!iFYvU$CyXd7C zFg=x$aM^wrnbNRhl{zxp4n6s~1qWlQ+!;K=MCOTPhTuOIkQYG85G|oAi;-1=hw_RM z=D9?iZ|pt`_s)-#2v?~dD}Mku)T1ym*zeN3b~woF2(lC|OA}W0CW~ew{G}(EGT3YL zN?JKnL<(`QA|8j$qxxhd6;nS^nki`Zn!;hjY&nL^f{5QizLX-GKw5+V=rB-^mZG6O zNWOXBf=F_>{ch3)Xl3PnXKL||7@&3w)(0Nl)nhzhxo?`tZOy`7mzLY}yPCId~k_0z6KBS1F5|Ok?Y@vu; z#g^KEH{xd+-0NpeQh}{IK8}i2N23Cy=kY0*G>bJ-M*{ZN-lX(Ua(?sX%k{OfzUe7^ zbzORnr8d)1BlS=SlyQDt)HO9{r@d1S*YCCoC8;AeNw=;4f&Sc|;Fj|37&`L!iS%m| z-QMTC?v)H6QwadO=+qAfnxG2D{O5I-Jfq;O-#kVMdz#&o_DYWur!rnYZodz?rGizS$c;pjB5k^r$aTCgO_}4J#fSS=N!+=M(5ZP$1?3G!OJsJ;oC!^yrBS>rTf`iza9 z`#zjui!NVSjMCCY&GR_w!uRkD$fhPxtiw#b>uXpAziLH>fPqa&&4}r*Pn2(r|66^BbQ9CK!cA)>y%1Q z4aUW_)Fi@7%G-i&z%zdgJOx%Nh0pEDZFz)KF)VGQP>%-3q+$JDZf{ksN1${;st2XqlE8iT8VRp`Rbv z^f03Jc@Pz-bVz0zsObf{RuF=_>KV)wxV28CF<^J63>rH^~!Ga-VTZ2FKp1!N75*}@f4NHvJOEo;7*e0`#`x|~Vxt^No1#cww$75S( zVv-l$gl6Ma`9X`f*bj7I4iodHp#t6BCuKr6;{5=)AAYlr5J*pZxE8h#kxMvXw5GKH z4%hAWAhm~G^gM(#E4NJdIV6J~=R{a<+Te`$55ZLu zm_1qK_qt-KftqYEV&wC=piYb03?wYMyH?9aTq}j|(qx4O(KlhzeUyZsDI7Q}uFW>s zp}ln>s1F56Mapdh;({<5Bjad-{Y?(&h7RF@=>87kZ~WT+1Cjj81oL1EbTKFujOBu zK3<1+Ev##;YF8NxeVJgm`9+iIzBeD>_`v4wjX2Lb`$KmF>#Bc7{oU25w|lzphRJq+ zcs&1SxpKOG>)l-suIG0mH2u-}`bwUKl#EwNpdJ(88%qt@+WVeu=%|?Pv!P>vO`g)^ z&BXFhle*&CUg^!?Y)$sG)i@}4F2>W62>kQ*^P`G5Fz5Pf;CPct_qf&js`=@Nf0TX{ zA(VY-^M`0Bx1V4{3>np->h26qHQWz6qZ7|WrgcqGC8x!)dj`c*45!Fo#UhS+oD_+= znPF|T5J2qod^q2oIR&vx4og9bBJ}e+tBwh$*BaFYUx!;sGGxv%;-%!7e58#?_z980D&_aO98Io@)wG`0#v66?9?-OCgthsy4GxF@98e*u z)hf%#ZW&94-O+bsHH`zUdL4oDT&9oGpLZ-tNeghxGw0qH4hpG993`5vJ+eJleOJ#8T>`!&0l%xo@jw(E!w$uNDW<(J1UUpwGAw824 zqaNIzNaqf0Lsn#u7z~({ZkhwQiK@(f&=>nvZq{`?CA+rEmXcL0WIyqds|$vF`i(PD z7n?+-DqU9no*%g6Y(ha6zPxv-pmlCQ=d!8lZ+HO*;|J4XwFtH*CHJ2H+6VAwuq09~ zg|xjX%uaTb513pvl(3OCqAJ$yI0s5eP+_OooD;H2q>&7xd^GwcP8iZUoDoyi}GqJoq|DUdsHP|R>dPQ-xt&7^6g ziwM&h!sdFv5%_aT#1_n`DJ5oi&;^%*>Y&yl;2T67J0kk8vEBZ>j$VhoTKB)*31*jt z|Gj_XdOos7{pZ|@w2n0}C^aI*VpRKbp?L2G1*AtP0?4tv1G>b}iuV3H3Z zx&zK$LZ!fk9ER{_ulInELE_#k4ArMZ@jt)IPuCECXTN)bXZB{$v+$OmNw z-H#|>HrTkJ_EV(QfAexGrw6457%=I(I8>--Jl!fV>}lWM_ml}7mObMVq+?PaszZq= z)~7B2^t%0qOuci}dMVTgv$qPEzZY{_M3lfw@Dervw zw6pd|Np(hq2gyVj=fzh9v?fN}_%DF3yy)5|c}&x6g^1Ft_9>#5)63?uiV@CDLJv|* z)91ooE{ka1HX`_DIh@h+XU%s@-r8?36Wk(40LnyWoiN%98gS}Jhe#~{xE2;*l1QJw z_s3p}ax$#NTX`LmDyT$hezc^NYZkB!8+WHnd&zTH@l+qjCXrC-CB)xN>I%s8!80n4 z?bEz*9@h~3p~(0teY4`)dJ`37qB0xuA**mG z>OFGJ(juO3YTMYe6qz*CMkS9y|KS~&j1!~yPx3YBxi9uLKyU+E<~#-=#I4J$-nNF>m9Omy{|k>p{D}EXez!P=a$hb zO=G^Dd3)36j>wFt{-N>YKhpZ%yV<+bbIQ$rcOy4R*WL^T#Cw?nz^lKcMpbII$#vvFTm%C`XCa%Y(7x;tQR2K2I2;UOydehdTHbGKT@h&WvnfddM~XC(lPuP zZVK|jvPf(%7^rX*`QG%h_u>%9F>PJLP4uA5`}G<^0~GQjqkfzkRfnm@)@HKZH0$=$ zO*`ZA!dfyJehiasWLg9iV#nim&RfHv7Rs*=Ma9e2fgiv~SnVCzLJF!n!Usb@>ZrfL z>PM6ax|ma+-RDe;IcJ5n9O1WSPnauXotWwWG;~fn@)OY-CpJ;bTOc%QKFz7a*PF#Y zWxchNzXAMm#NJV!oIFJgV{d|*TT-_;Vjg>D3lXOV+uf7ffI5M@|Ifn&#O~ibqw1aS zdN_&C)qmRz@aNw3t7pGV=p~Y-x=p>wEHAMgk0Iz8FPx~$P}B6={<&Acmjp=q_SU@g zhk9!-#+3F@ga)Qj^ri2hRqs!7D&FX2L?uqL&__R-VkgqVvhlR<64&m2(akwPHD2YW z?86h_vo12%svr5N*HAUQ6*l_WGZ{FZaYz4!B%tYyEd44Q+w-c|L$>DclkW(wg6>Xv zxep0#>(wh-xW+fZUDh9bZOmGgF1CTTilklr{GH+L**R-~pzk31j%Ye^| z*(noUo~u*|mLGIa!!9WPOkE|tG)9j$o*HIDXm1L2|W=<>oYx?p4mJv>=l|i zh%%)KBGXc{Wb}LheeYBrh@HW(GQU)@h1hyS?SoY|KiBLFDPTiVSOWP)Zy#*`whswJ zT$WI7V5Zw4Y8OA3fqo_Q(JpuP_}k^3>f8E4V*@i4PYKC@e0_S}C-NVet&mQZk5e4nk`Oi zbadT}w|06TTF%MG#$j2Kp28;(xe#Le?=n&bRC32w|q8l>s@jt2|$3o-AUi)pHQCa$?@^48*Z3ITBnZ5iUPn->< z#0c)pG-`zIU#SWl)NNu>W&BXJU-gtnb^`KrJjZPPufgH2-~TUGW&Zn`&Hvp|-In(F zu`>-0@;L`JG}l2^Jlyj7%tWE$W0ez9LAQUPsR|MLxF5|eM4fohr>popAU&X1fmWVE zp8k2@Uq1<&SCVH_4{3u&7Y>}~cRPT{U#Y$-{-nX7K9DGWeL3ulRufS_Nf(^ zVx9WmmZ!17Yxu+|Y3*(HS`&!X_OLj=EkrC(;vd#-A>O5?>)m&`-<|o0X=80}D4cpf z@Hn~2wf35}FfW{-tL2-cmTD`pG^5`GG42+Oux~3Do@SU>P0n)AOPuZ?!xJnUxmE|q zgbLXaW~P8k`SZH#f51|)gb!x83a^&-V4G^!F$YuEf83MW3M-9x*f*N97M~`FQ{7<{ zMYhD{oIRm2QYt-kQ1{ZN;#)3bgYTvsvy(rUY$I0B6PeZ3X)dyZ&f-d8aMcM~cQ{?o zkB( z*y^9r%h~omdb$~4omK6MYSZZ%N&fKU>xzba-nsshKF8Y#sx2IXwQqOq=puI@qt?Y+ zrAajPE#@a7KRnYA39~mn;{9jDU=Ati`rwfy>oNjnkIwI-Rp9r$^Gq_sw*4^r%h9os^Z^aH8 z1+p&0D`=fCNewPjs~&SMnYrB;z>@pFogL^cLsmk2 zs!;#&^(hzf&1gFqL8!$ch*FVvl0CM{!DgB49DB0P@Z>7Zb9MWCidL8b$! z^ou_9H=#LC(F(Ft(}CO1pY>)rQ5yZl{B$J?Du3Fs${Z`vX1!|xVaNQgun#6mSn#Vq zq7g&Fk4|O?ik6=Gbf1YxYRhp-jitU1x>9a0?RT}Ul%g&HA|ck^mwQZ@pXJEHRM%i?nhh}YG@L{4|3OJ-(l#_#d8Ivu_e8V)(}DeXqtX;S3%|$ z3;8Yf%sVO!Wa`6MAW4-oymTsL&Pcgr%%z!_QA+J!*vs{L*Nc_lI6bcxswOpTd+Kro z-3D$t6||Q$QI$bpA&wf{jY7~T)C(sIr3Z)0EfOrvjzsV9H+p5M@p7mC%pXj7qR0;| z^5b=!0F73?Os+N4M(CjE{`x#MjFTqV8d03o>_5ZwWq)>I(NSNa;P*}K{yAm^Y(-1X zWeO^XO?M~`Vvz&oKm3SaL@2%l)eOP%AaS^!RR?Jr28mqO=sdPpY&StkVF#M9hsjC(9D$4&Szp~S-6 zs-JMlSc92@OvGX)_`UboI9grFCy^1737ho??(KriAf$ zaZtN^keBusrIE|5Hv*GLpW&Z=islzQvkZw}+VLrq;>5|x&k|!e=}(JIZSmCp?RA@( zgUHX&Yg1n?-=%|g9R|sVA&W^uO%_O2D34?UkM#Vfh=I2Z`KKFs)*$xkT*+GsLPwS{ z4QvL77_^*Of2RS`AH5^}u%Xh04nDI0(+Q}bo|gWmBOyVkd}F0sw@~ zown^kYvzIKtVGF9m>k*TF+~po;;qD+%QU@pRyjx^S}-(atR8TUw=gmuE#nN$JOe6j^ zrR4I-#Rb>>VJs%!#U?7IVOQY8`#sfz(N|zfl5T?#M*EoeUJaQptAuvM*RNfTFVo&?owVSue*(UE z&z(iyaw#U52iG}&D35B*`cdVN&120~O&#S@yNp=w_z|_wZvBk*2R+kF2eMQ$>y0^# z6{e9fWDYCEv|OizQ#pZ(y*d}|#9}I$6(TPWQuP;r;}I|j){p0}QEs2bGen;poZ>Yt zRMLnrp1yCoxd@tinho-@)#5Ex+iOW-U_z*wa&f4*4A~9q%~|3`m^m;Z6nm-m<@D-7 zMz^Af@8GQH(-V}!I`>_)h}()2(M6Y1U?@OLsgRgO#%Shcwfq0;1M*$Bd+5x*`5t8b z<>KE5l}{&I`ttH=IGKj@@;B}#>tsv6r%`hY`QJ-e5}EMiWc zYYnlB@Tsi0Isbx_A%hG5DG5!E>6o=|v$A4yaya=jX>A2d7vz18y%72Ye@kXL)C$HJ zcA{U%1)sD{a-!TN!bwQ_!z*P$f3x#p4r+NzdzgOclU}xJpJ)3-?$SJEooLcFGqkB1 zSjrU1sJZ0g%URXTIbozO(VXefSStMjg&ckJ>Hx{YmG%y{5RoG|IVy@Khvp92ri6Ef zGEP$nSvYD04`PLw1PgKmcj#fb*eewKxsh?6*KG*;0>47%6|@f;D3=*8E{24otU;>m z%ww!veu)KUFod;R7B3C6gG?HeGD9j9raIiB%o6wVW^mp!ebZyFvybK+BV8wF-FqBd zk<(;$`eHtm8|Iu5_IE%u$dB_;Q2+_J+C4w|(Ul@hYo=}$!T!xwuS)QA0E>RjBqJJoB%t9Ll1(@UGICV07b;&%V|f$`GbXZT*y#dIf6l(A&?t1oirVafZ?hJZq^ z(=e3H*R>hYVzL4A@jbWx#ER3PJCuD^7J6)55u2+ zH}4h3#{}#$hWq6GG>i0!H?~*KEadKUZ3<)YpBk&^AhQxPUnygXfuXLNM!?S}Z`D;l zBaNwVH}Ln!mpctYpFPvZ7Vc4eGB>v9C+u5E=M>I9L#JaXqTK?1Y|t&>P@u-I1P;sY zU&GJC0;{hTr6Ti_i(tP}J#&;If`9a=kFd1#`*OO~sQ4tla)dqqf8(r8)gOSI>m1IE zn>w!R{qaL;If1x?l>93Xq)i7WAYPrqUDj}7J$ghN_6n_iusouvcq{LZ>g46Sc1G1V zk}5+6&o5_SO%3arD(Lz8WR4T-<9&wSZol@o$J-#qnGh4m+p_kfvy%J@+(@R5E*RS^ z&IRyxC?fIK!!7-G4Z^v_*szKBNQ{a414bxXs(}3H(VmY*mfAN(3!E&yb%UxTzD|AT z!tiCshga9|oi1gAUmAXCS}*ZpOWbB#jo4QdyJ%K&<>aJIM8g)JI#lhv6fK`FL* z%dZ^`HB4|@+LCGaN|!_yKe(K$UMKhI7vS%t0J$dE&$xLS*C5{{eo=yEmjK z(h!#1xXB$qU$?`}lhvE>ZpSd!GM{x%RGWt}+UMVG2vEPkE_4Gllle2vF{cF@ExsQW zUooOE-D145H*e(Wf(5JmYJ@I##0{E+=`KPKQ4vheMaa?Qh`)ldCgn&UUA#088C>JW z7|oLy&ee1-ePn!W(@sEun3RfH18CJ5?I_>X3M{j;POjnWEq>M`p692X(zb!cA+uph z16?@{=&tqn4{c3j`or08M8bqmN+z6-BbvG$1taq#Qsh0yzLGQVroD{ytP;Yq%_pzH zOC|R_xy=&ci!p;}i{vdt8I5Pk&agqAk*ZigD)r~o0s0Qcm!)v1A~c7bc}dv4Rx!OL zs6T$R?;*1tITexj`TZUeU%1*(r{n&Ek19d+NwRP!z=oIEK7uql+4Wlh@4i<=*7p@0 z47n&ny!bJbGy;2&XUVqd8%z7d73*V^iJeuRl^ROGuq)YghF?W%SEc|=xz2u=c~m1} za4?amXwvrMGm=N6{6Z;@ zL93O732xMolZ?L!NPZAKb^G!Z)n86@JW&s%{kbB+8<=jZiq|yQB*{w2@y3V)#pOY4 zAWc(G*%^<_dp2&RFe04OX9F2#F0@yP!o)QrX`fgC|Gb%(tX|{<5SzZvlT_uv4!WIA z-DcrnIA)6C2ZA;cDL9BxFFPw0wBfMgB(!*WUQ46?^`1`u!C|j0=-LWj)e+wedo`&x zj;x-OzdObB{JB8wzDm8{pPKs6qs+#*9?3K7q$D!jJQU-*J0zImH1Exi8WNe5K!u|> zW#2!Iugx5nA9H~&E_pAEaOYdeKjD8(Zoc2^njt_yj0(8X$@*Nt1LiVGUVI-gk~&hU zZX(P!+QsbUeT99QC)e_iD8wz#>^iRjU3{|lTCD<`uiOw!uRx4K7OfnFSkMw~>|^RWPsJO5@L%h6w3SLTELAy= z1|@8(G2DXmh2<8-&aeWWBkYe#rds{UFq&tPTct-2jdg7gI0 zC$@%W=_SO=Qb#a|E|g{zRJwxLu>&iJl}7o)j-9MS#TQw6k=P3B;r5-}8LEN!_0YfA zW9qEDmPof`o2O3<0B>+L70m&eDF@uJ{quyVCaRI-S|Xo%S~0cp`i+Xf<&3HQihdYh zUAc-)NiZBl!iG#>O^kcXTm!AFBE{^N{Qb|Okn+DTJ^$-h@+_qaFT8VETvpjwE{k1f zsikyL(C+>!Kyho;_`MaX|0~%FnuLs}3jCg&@oQ6azgK>s8QHL0EDX!Q*Px1xDS_fi z#f8Zo+&WLEHGIXM-Rm_XW>AARp~g9$9|Xbab}z&1Sf8L8W>vXuxzsj@e&(8I=>;=G zH}up`&LxP142Yf&Qb-@>LFBRU*UOuj|Iod&g~&5=P4Ls(&A5*=G+6&Af!y@0Y;chL zwKg0@XRKYHyLVp%ifm9zZG&uTM!;E?=_0d^=z2vblioV!0Nqj>Vz$&4p4_)jUS^R4^;3e8qTI^vtRzb24_rZy3!&n+qb1|8h#&L#$C zRG05wCvja4LMLZ_tc6iGBp|-(8g#gwbE>o0pP;WyWF*=7F@);!pWm1l--Ny5<6)VC zS$KzflH4@xP=|zS&2QszK=OrQD}cn2;Cz%R-a~?z^-_qlw45IYw=E+yczd5e?m;|k zyEW4B?)NVnjsVmn+H01UarVqheKw9 zp~u@-TaY^s-fO5<*%|=Q*xv zEc{-m?JrEW<=oIL;UZhgWZt0`VhvH9=2hFLWu*2!_n|C4Uhr)#%0xA8D$kPDJI&#| z>%tz7#$T9zKJfHBzxcs*dZK65HMsiC*nyx&Q;59jz@ZGk=vQTv%^va-J+5dYvIWSK?=WlU+SlN~qCnQ%7#X9+IXhHqo#da!ge} z^68DEzG-EAekjFU<e_+#fpJ@xNPmeb8; zKQU*O4eOaOMkqS`-miJE?oqg0f5gK=MdDITwA4uc+t^slhk<34Fql!}4x80wr%7OJ z{#$}R?Xt9=S`aqRmr_z?sM9+8N=g}8yU=A zW#3|Sh*!7H8h%qb*bGAlYP2GWcvArRszKq>;v7aH8^+BM^tVaXFpyh+@1CT&T9YAS z=&6@*sjFG;UaTn3;0{G68vlTG*J;J|t&)5_)A59Ut7fZ;Hu&eIf@in69}Sra0h%80 zxLw(c=<8>8ZR|bZHy8p2p2f6TQj|}_w+|S1JUQ|^KBep;nnELjvtcc;E^L7RlpTu| zPnp8pIjuEPs@M~fb>BH4&zy~lZv|S*u1GJ8z$UMeMtqu+8EN}i*y2MqOi|@tkRlLY zjxUyDZy3<#=2YKF+QpCOS|BSv-4e8p`H-gSKNw&HD({b#i*BTj>&|mR}0-Wj&{cHa+W7(~oN&Th3 z|Bnw~bviOlA__jn3Im0~u@5nNzRpUk_D79TGi?@sI4s@nIw;$v2cfnYs48#x7b&Ul z`20!osvE~!kQT{56M8`$^(~3Z?k25x9aDBbd;K5Ko0}3ugVU&}>NB{z$q%f~Eib;u zYJ=x>mmV2+O~q5%5H!DJ-cV? zAL@oYRK0Oe_v0?F-b6(+(z9zns=(fGp%V&YK^=7Xfb3{^X;BbTK`WPTbgJV=#F!VL zSA8(QDvUh=TM)g|Gy^z?NPp)ufvYFCC>qCa6~WwgD2x^k>EbV z+W^WQ=gBo+M9t;=+>4^JB<*vVK_DU+gkTrupteZ7_nCoKeTbig^E?QJ;E}&Z`k{Rz zTUb7n)eo1$r#svOYw;(~-oBZ`J$QR^%@y7SIOPLu{sHEe8n}A?F(!wVXITA#=03Tm z8A$E}CQnp;XkA?O>9h%nSKRXg8oLPZ5aW32TRvznM@CmHT(X5*)F>PK@2oadWi|HQ zIQB&_?tw_SBkjH5;=uZe)ge1TrM1f-W}k`Yi@^?~9_u{6Y+6~N*1-;&Et~5$&TLT! z2lvCO9cQGXo2+prA?W6jdEhaqbiY(e=6}AzY19M4vKmEk>C4>6VQfu8w@Z1w`rZzC zVOz>ur+8R43Gk_OMP`N165OoPf#Rg?a&PYVu&wH`>SK>p{S@J1bryKC53!!5gx3S* zZ;Pv;UP!+nVi>MtN3HCnYY+!qQLqKQb(OK!+?yQ6E+py%;~J~LT*Qj^3g5>%`Umn~ zkwT7osA*NtJMIQg7FBQTe1HYIMSlNXD*P@M(8|s^Ka%8(=Q*v+RhS&ICyYR!%vl;qE2yTTJ!EDB(1ndK*bnHRvyT>qN9<%snT zw^+$@0jwiM?+Ixtm2w^~f&RL7fX0>7ohqE#vTAYzqx(n%r2e+9Z2FKLdajxkC3|4F z!gg?AcB)FC?1m`h^KYs*P8SrbYF^X(Bxy8+LsnhgU0RrUb1EpUC4U=nX(1&dZ0j%1 zI3^&TJa~K%{anDMBmhJFgJ$s)wfif2HiW4mufWq~&*&;h zI%;EcA|aSpujfqMCOaOf>7^BWg}NTFJ8jzTTu1uZ>NVta)~5 zmPsPa+d9N#})&DJc-f$9#cpvK>GeD2>U$Nyq#Zw(kN_F6t zJ{9Mf$>upL54EkxFNqP1rSt4E7-+PRpJXoH@_!Wf-T_T!``RcYA}R_xDkv@LjH5_b z0qH0zj*6&AZ)!wpKtMW41P5V6h>lVOrN~Gp0STc*5(Oa&(xe0uiu4jfXbB}Gxhr6s zy=R{@-}&yn=ilV5>s{+tpXXUpsBQ;Y_JC_I;P%Eb@YOiQcTjqXlI?F2mv=O>y~vzzeh0Y%*+`jx z;2u8$A7!5c;|J0%&f^rEGM6KHbv~ZNEv5JeN*52YJqlqWGnVXpvgjUHYD{xnok_Rs zRqBq3koA7|FZQMr=K84daJqCY)W(A(RJAfId}}e%vf`&{0Dz zE!-9M7>XwG(^fy;XerJ!$USO8o{HUkwlx|1g6P4_;PEhP zr}fz*q%1ZCDhK-9gN^trjS-XjIAwvdYYyalaCp>=ePmD3%ajWA=DuEwk2oai6iM7& zp96T1dK27cP#2%;5bw0Wxk$xj>AIYyHqSVVatI$yI>lv3*GSUEZgu!2B`WqjX=>>&6y`J94S(##vJVZ(s+u(Q*Uk9MnfpkGTD!6UIpr{FE> zFyvYnBb-~ev*@2)9xVeWeMZ^OLBfMCSb33q41AN?c~om52TM01y@daf!b%96I|*Kk ztu7FE7j7pqLn&b>!j(@jI!CQPMj#y)=bg!gLR1rE*C=PAkj)<0Nbsa&&{4bT@BH$m z>O&t>VL`8)Md1!uP-Zl-IcYPMIHwBjM~^T4F?&RxQHi&~}7Doe3Nb zY2_N4>;G2l0h!q|IbQ<#h=ap`vc=x14Yb0y7*R zn6T4{$$=JBRgCI*F!?C{fw#634&L1>YgCigIFxDk+6AslXh>@Y<@_pe_$T`C7q)A z61J#1rboHk+A;!n48x|AW$?PksX$bP9w7s5okx3}axQ~8BvUmXlL)Ucye588AR!yn zKxWe9AaP5f*->jaw~IwK*;qU-U)q!B2;ZdmdMQS+(Ra^Pl>8>e-D~m0-7s3=G6AO` ziGM6&M~a2I*vUMKDat)ZB?nozhwJ0?Ea=N8;1bVQO$0$k7pGW4l9(txDU*qX(#|}= z3KpE02sXAK;Og$d_`@-s<_aHky1W*@(+a|PQb+RfE4M0uR#* z)NYVe>HgDh^qQ-lSDrRSDCezVh4)`AiUyz1^vvgD2nOsyc(t8o@5Ancz@1H+d=7OF z3sZcm!z1pz9DGy__kZ8r5Yb}7vG&e!pi>vshE6Tk?*1*~rt5eIKBpdY43o#g2*Nhv ztvw7q10!&Fd-X+|ghl<(@0(y4~4M#h0Ej&E^7dUH|(pgTXSe$?@L@vuwjiQ}qi5 z*Q9r>k31Im5xZk;+x3m*znoadOzcymhznW!=cxif)UmgZ<}R6m;C(6TFS>6(X4fvYZODpr?I+i+SpzmcRmV* zTpnmUEO2{lf!@odk2ib0W^O+pnzshFYszl}wps6aCq`ajX-f0g)Mz$!6AOr_@gs|j za08J7onaB|j|%BGJET~5*W~MYws&$-$JKzKlOSx1z3Hbo?p81)c3?+$#1(px2jSNf2M7_c-CylWQ&D&=a78IY6l-hpgCD;jnve~Oly&jS z67#)CWROCX@*-^_*+@3xed1yexRP^>d*Fhf?1+Yc>fC zUyA|1f$Jr$UT>eUwUt39=G}WeD3TGyzrx8F{uMy($q(}Fyi<2vf3pwy`ULmE?+RJv zguU~i=qCXR*#UGYO4mGO>JCI9IiP>TiMPLU<}&r|?4D$V(&YLl`7PGNzV87Z?x9d3 zw{Ke`3Ne@74<0=m&Zd_{MaTwa6>i?@wJ{}R=KXl>khjXLdYptB$_n&(J zTSESFraMhrRQzv;FYW3N>jZpMeN%Oh*qZch|0kdLFY_Y+OIRAG1*^ISZ~Sg*fthBV zev{w1HRZQ8zD}S1X36jt*nLX!@voC!7?@5}`+=u94s42iHA#Cj{`K*fTk%B~6U>V5 z_6YI@?&-_h&zJBghRMDr(B|${NQSdIDNf0O84K@> z^e*Y}mS_1_DQkqIzg%S|7W%=2;Qf?1=I=ZJD4-TPnnKo#RVG|zNt%P+FzWcw4-O9n zZ)TS?Sm*`}*Z+poMl6>&HqDDR!J4$PwtqiS6EcJ1%?0z!N__nN z%v&HqhxWAfh8s81W)2Bph%pl>keUNeL9J4h-$VcFRQ}L{ftT%a<;!0^hkG4jqICCl zTiik0t$oL#Y9y2 z=15SHYV$G;J+W%;P0%6(wRB^GRi3hjw?2S ztoViDQbV^saA(bxJL|?7;NN$z4PCLvlPmUEYBkO?OqH3%+bdVhU7C)?guGy7XC@!i zn@bbhL7WdJMnoRXPT@>A<;u4lxv6`y3piTfpk%8UO|=m1hnRQFJ^ouJN_#_Q>-(V) zwk1?*Z~t~Z?!rJ@tczr6^4>|Mm7vaNGnQ{o9flrjZ$KQo@vuCp(qfSBy12Ef-@mOn zf%Qw2(js!p5HX>^f7>AMB6bs8S1Ih@-4NLz*xB;-53g;{$vBH`CY!D{xIl9MouOQ< zRs0uz_WQZLQW^N)IyvZIC@Z*^ZSO^E&!;45c#1>V9i)3DJwYN{y)Hq5Ckn@FvFA#Y z`{mB8)FFX60l}B|v^H^8yVzl|*5V!?$WzVWaT!1DJwNWG&+hzkLJ$>zn=E$ky;gFu zyPiDXB|91fp#%HAA1t~d7LfIUpe&_F|k-Yy#g_lkDCJ(+l4wL#a zqso%kOo*<|T!Nw7Lk!`h19DcRxUfI~*v*6i(fn`_5P=+sACQaz6^QqYpl$@4E(N!X zSy^A`mG!ln$=d~`n(>YHoRLYnxLNN)jmAB``DTq}rYPwx_lae4wrtIu100$x;{uvZuaC`^l(@7@foN!~BD&(9)K&T^wRR3z;dQLu8} z@Y0xTax(iTeGRJ%g)48X)#R-!)bY$pkX|Lxuwz6?NBXg@t*vE6wzAGdiRU;!De{&1 z36k=Pf$s0Jo8JL4FXOtvT^8i>`+){;GeAIUShW%*j)Nsznp`PoklKw;nup&?g)da* z^w?F~jZ7Y#3_f&#@z@}$^u#y2ZY1kGGIcZ5e3*u2C+8tg`EHpG_!h<}9Bj&1%B8y& z+0M%$lh69A)@eVIJ+qWXF1_2BRQG1bYbW zP0T`c5@03e@=4=rBYRxfTEmknd(Q)F^*Jtw?g?$bTjXl9;~?4h$UF3!%=X{Ip32)m zkZ)CwtZnxVgFjaxxQ>L48Sc^}bEJ1Tw~(WAwekz2Jai-p##XvwF34>Ni$;bUHat@E zTlAKo-;}XMlDg@SdVJ!w!|UUYz1(GQ_u%l-1wM7R``>myX5*^8co;Y^XJ=K`$ex-MJlOI!K z{fi-%89SP_tn(QDIOrlsJaxVe+|w(miY1E!vWJ>*l_PgSvh+Z5RM5q)M3oS8#q+qI z{OzCmC%zfRxM^+d#Rs;Xo5BXxz~pUnpfQDJshcYYPle)sx{p3L>olKBm1!1+&Vz5Q z`Ry5{yCLD@o3U>!{Ndd_Q1>m)TnT>^XGS! z+}BT(IC0|P*N|Qn0FP88gepuQU5JXb`*vf1g~$wYYL}ngQZKgm8i-AX>yxUx{d?`6 z^80V!;vc@X>+dS_$!?&R^b(rG?tO1}KL9xDP5+!zvv(pB+)^t-7QMZAocT=B%Y{F} z%^~GGr9}Q3-`u!zL_abTSP^Z;tD>OG-XQUvLWi9kzaRenRkt`FR!M}Mc>9#mWGx#w z`0%>Kr5a##;$BOG&JbJ>q0P&?_kwPE?Xw6%uZZcO|2umiBu0_24@g^yrC+V! zK_4vv9dSbWA!9+Z>D%soO{@1Oc}JKdMrgW>K$E3i)5A%g>Us$TE)ydG;fxybIW9uL zQ4^=al`mUlxO@t_SBeLff&0V4K%WZ{VJ_~B=xY~R3A!yS8GL){d_1-OwKHPN)^Cwf zc&x*Nz`^rj6Nj;Ga7{lj15|Sn&Z*~9uanLDzyo~&PN!uYw zA?rV!BP;niX9ae6Wh%nf@dy8#Q97DzzdNlPHA&JU-xIc73>TRfw@9&%*S!hrwjV7P{)oW!6bpwW zxtUgA8wIP2gLvbGhnc*d-0GI4j*|J;#qD}U3v+G#_A*JG_N-1+{!{Xl&f0-OpC*Oi z^3ouTr!eZfN{-iB%61*1c}PYN>umKShv1hB`)rJG`7d*S>tNyzHX*3weD`AZleWXz z+_UvT+LHB+>bS}a@&U~7cCS$LN?OG{I%eD5x_Qj##tuJZ!`9i z(~ipL58|Y^Ip=DZ>utfS&x&H(c1m*OM@$+rH$Sr*84PvvGT7JjoZsyQag*pGrv0q)TMD)`ZLG1lya0(Y5Os9 zZ}oX*nC20-A1cWp7-Yg%qeLB`B%8jCHiY2KD$aAMhvLpeP;=5V#Q<-6|!(sE~?oQulDosI0 zKFJ7?b;9OGkiD6RMXEpsxe?VU=vOAZn&eKxeYPuFt zYpl(2&AOAm0k|J;qQFyrDO)~73VWyOYbUVF{Q$zCzg(rS0E%OfHpD!>*Vuo1PvD3Qh!^2A@Ard5 z?oeOQz03olt5#(YQ~?B9DlKAuh!g(&)3;dRvspdy^)dfPhX9k$F9JvOO}0J`j08sU zFRq63cE8@%b+1fKhxY&r{_R#IcOb~9;-BO1>e%(0)rko{uw->Lsk(tI=&i@M&%2V~ zew%-*h06K=m_b-G9vaDJztn&~trioZ9 z7bQ>JUBpjj_NiE;@~acNtJMkM9C;B3_!;=NZyEUC9w)nT#jE%TQ4Ifg102*}kP6@I zMycTnoae8F^>2T#eD0vyP?iIahP3by3EUT zkbmAIL6AByqZ3arm9Z3d?E*1^j@nAP1~Pc_$=$CwENXG1J}+dNi9uEz1TklfGIFu5 zEu9}3p5MCi6_Uq+JTlF2Mj0(2U|KY1Km#?JOo5eT^S_?a%*%o+ux!h((IY^Su2%hl z;{sq+#Qz_EfB;g=;i$z^ZSovQ%Qn&W?o9?e+^pPw7>KdS+It4i5c-vlg9-NsSg`G5tJN+ciq8$=<;fH>b7&D>UDXCX@z$2V^Z`Ke5g8JGD6QzC z(}`4Ak8c|uH#D0C(&Vl$e>cid#>I}itlvz#T;rf=A76sg4N2QrUymn7MrwY+b>?EE z;~jPonijOC029g4RbGfXtP0|^&oDWY8_4ZBo1U|1Ror=pYHY7~s9))(t*Q|IrxOt1 zTo-p{D&;S_-drj#KoYD#iW^q;lhksb+=~Yi5H44+XOzf0)USXA8`Gl)l>xA zeC>vm#?S<5zGUH&2~4;W&E%^MT6 zl71{Mi%vS^r;pAoFky*1nNQ)QVl1Bv=zfjZfTfnnLFd^RUZ?*({UYy3 z82bkSQiQ!&Ub3pM`aV}5DGkR7gssWdlnKQqab!PANW|*I+gN>;X9Rikg#V<0b{t&s zDE66M_hVId#k>Z=h8)~G?gfET1aG%oX`#I#pdpk7v`1|M2UH>moersLQDn&Svv~wDzUm{1r*}JaqM!0vo?4m>Lj_EK)o}-hertWU-n1m5+Bo= ziTw_tEkTA3X;Lk+0#DZ^*^9A!IZb|kzThtsbNdSXapirVYByQyLdO-t3pw%nm-dPl z&MgYu=A@AYbHU_#p@QS_zdrEr(^tIhI*ixQkKCsdm#dAvBEfeNm=LRwwfubcNYF<1 z*N6Bv;=DHz1Aczf6Qtf8Yhi(e1Wt0_rxzz55Rs+ z5)jB-M>b6m{4xTcTj~274T4wNR8RWP4K2zO8bIetPCt08T%LE{YiPNAd4(;xxenu? zg;&z|A(ZylMzAddheEuDmtzYirrh@2L^Do>DTca>Exi zLytj{_&cR6vmY|-6*9H_yzG=FXb%Qym~+RP!w=#EAP1H2kjfqEm*j=n2NG|8sv^}^ z&0lp+u6a!LF%`Q12Qi;J8MlrqHj&G)Y@+aJgpC$ zj$@-dd!K1vtm*9J)`rf!2|R}p)5oy7-g@}jy#||&0h+KAKKa4I6;cnbyXj;fS z2KVm#n7|%ZWR`jMgrq!eepRO)%w6_jTnMZi?mFpn_=s}9Iaw(0v#|%{DPxiAWj|$P zRQqi=NZcd0NyhIDM3D$`a(o#Y+r|V2}aP<2X8HlISguEP?4At^~p?00LozyEu0h7C#{?Z)1KZN+&y_APN7^)8rp`Qe$6lu)dQEKhU%edMTpTMtxW{IcdSS>sBspO*=pE5fA{V ztnwKZog*>Yt)yYr^sy@`{88@v8KSgtwjD(Np5m)&*A;q%6slF7xgr%KnA4DDuQpdgIfWs|NFtRhlqGH(a~e@SQ&O&U1z=LZD5R!*SOY6-61`(x z{5q~_>pE3C%ZMoVq6Nr7lQ(aKcTzEZde+*8A4B=ib!YZ~YosgF_eCp=SvYQJN1Sk? z9aUfJhb_WwLcx7MN@K42tlgDJH@xqV9Q+9tN+ zrdo|>BsBSz?{^>Ly#fF%FW%T48D;=Yao*#JI(*rq>Ruckes{@v^PTsfS48XXF*f=X zG;8UTAS^dJZT2FK>XuZ~T!#GfzC0R|oLfN9)pvv>?NK=@f$nzZ=6gA`pMt4RJR`@V zLuLc-*L3@Rf6LuXD&v)6d0uL!u1j}-wCgr0bE0bZhMaM$ctBPPM*O%uKR{DCEZRD)~*YaP;J+(_Zh^aVZ!>athoZ3Dmm6oG>IN_kRyoE@zT zEU4)h%!I;T>TJ}>=#Hg_FI_0Rn^)`Hr`JlVoebx#j0a)n6xEpgCPQ=G81!&t{18`p zSv1CM-g6G)(s@^#(8fGgLcK6d7je1DyYHJ|BKl0E7j{2-ns<&1O>Nj>>?Ab!Prt}I;5cQTX`B+tJ} zD>C3XVfxfS`Ih!K_JA$W3-*qbh*x$%Cl7_r87(<3zE4?x&(~UG`UQqC$J^$13hWm4 zewBO*VDq3ikfE#(5D@bCKOsd1B&uTsn|%e@x{U~}^-TcX9k(mTg!5uu|K{kCzlY1%| zycx8;O7$}Vpi;;B^4K-7O%ni)IwCF(O89Lpe|_nemlOMn#Pxgn;sUR`25;&XlLkQI zw0< zaxcL)i|UR^YH7AJ8s?mvRB2+0!3a^Xehg@%JY9sblV$w{wb1powi<4a@lqd7bYq>YD!)r@5c zTgVhb&?SDl#(!1=v?-`;4~MTnoB$3L&`@8U_(A+r!b)*s1*WeK0CbsOSTBGDEVtJB zsM<=FSl$Y7?s6QslDvq5!0mcI=6}=5P?j04ByD1$tlfNhHZ|TKe%v!f%e8^C7{rkNbd#e3yX#n zpeo_w$?yzDtK#9ojZp_+`KXkdR(~{`&)jrdYIDt09cDd!r$Zj%(=3gxV2y5GnX`RM z_9>qF8L{9ujCnZ1HUk}LPXQOEw{EcYS8-!JLQ~~YhB?x@J;^5jua1tosc;c zB;Jn`8`~MJUVFQ3;*r>nrCo@aZT?=Yz3@Q@+&`6 z3_3-H{NB*R9(Fi;+Vw!Z6QlfsmoHC-r8u+OsHD(oW{Iu}Aq_{HdHL+dPD$rroGPZI zEd#_EPj)^fqH*fCvJ}VsH6vKQR-L2suid<^+@}qk7;0WrV-?Fhjk>{0^hv^J$~rT7 zrd@G%^S!^xp?2)y6G8{JZ?}Zg##lFX>mh6Cgjk{C&8Tuk#g`Fbme>x_ zu!sBGsWzgIaO}ZMG8S^XRU*24^R0@)O$59Xrs23m}+ldNT0o02Y}4n3)Clp?#S(D zx_;ZI@cR0>AAcUPzwM&Go&8j3=WFl2AT520awyFu`T90zPtL4SR1&YVal7a8ek4@h z(E+Hf1fQweZL0h{{lS$<^A|ygG)QkUX)4q4{sx`DhAEoRmIGzCva3JyOV z?^7}!G3V1(a*F3WnS+Q=^s%k)j&<$&RXaYKV{1iTc71lsy~vYR92*aok*SjSpt6^J zMNn~&x?+TErd`K}3V}!DBBj#7OpZtvFn*%KG+k=d*GOGY5ocAB@v>wscG+0=?~u01yx47b)9P|2U0ztt$w2iv4q9Lyd&<0 zk6-48!M0x?pW3>%(AB0az;oS`k4N24?s@Kp7dyf6UANSs4W_GbjYbnIC?yNU;V+e0 zoujC$R1bxVY}#&VX18@*$;bu8k*w~B*D4_-PSkO~3Po3v2Q86-)=TthK4`P6Atf<} z=Af|{0LV0u4~`wmZCieZG?mvQEYFtZd+L91oZdrSni5fg86r2@0Tn)LBa|CWRB>X0 z8`f>nS}1}$Co6)Oz!7wCQPN0-K==7#c=w#;R@`PhDtVh(GOg=}*NDELa@nYg8=Z#& z6&4DqOEU=on_k&3NYCdD{lu5F=_&EhG3{FR~EV`YlLvrgP!4?fbuxj%lh zrc)`aMJBDLpK@1X+jDHloh?@dpLQpYQ&hIB6_7FrVjoo@aQWlV>Id82zm5VXo>w63 z@^FkqL+C_&gNup>G}yMC3$K_ii!3psw)3?7rDz}LFa0(bdS>NugHsF(Ne2=C30@X@^lO3F)LVYyLPSyyvNU(9I zhB0!10GCC%0cFlowwDlda>N)x@y_-ni3+f*97F~=&b@W43Kz`PFdg!I1_1>a4jb^q zCVzKi^h@|O`SE%@QHMopBh(FKDfaqD!EC1B#q{?9w`{4>bpzD+5x<_nOyv@2gpTDc z(ClSb?N2rlY=&K=ZgzlIY${@KsTEhvYX6LB?VdOfE4yv4JZ$&wT$HDT`G@1Wx*Acl zXoDZ`M#Y>Y>fdE$iylH;?B(0K&Dd^zFnQXJ{yA5oOHQbR(_Y4TIOv+YH3Y?sEGZ2E zJYqzpH3cKTUMrY#hLbyz;e7?cZp5b)4F_Nx$aLDv_G$T~5w9WFUM4$L%VR3}eYG6X zJ1=UjnYdu{wX0z8hy?{ALi@CEpCr^+rR64&ztVZ4Ji*?ED61)W=b&YB7Mu!MbZJb& zx{Qblup;1|@c_hlC0s<(jYVV=9*ys=AvF+8?8EtN9R4n8NqCrle7eFj9e>v zyjJgGU`g9v{2@_$v%rFc^HFd`hD{d!P^Yr!c^bEMNbcsS7veMK4&{0qbPJr5pz7pt;DRSgJc<9p$d5ulHKg4TGm2K z)%1)e7t{P_)fmIn6eIop7m0_xbHHM2X9?Qqe>#b`>zJ#urCz$;L$7>6>jZ7rH zV1V*6PudaEi#4V~j!}Rvu12C@utG2TN?mv%>0TtIWZ+Q>t~-k;PyksDw&@9GaWWYe z%^ni!a8Fm8d+=UBT3KN@F@stox_coOf8`%d6o%l#6eVh8<@W+5R1*ZnFP&SttZ=Bw zkBzHOT>tl z50=U=Y;k8(lJ!bBb5ZN*HV{7~umvS>K--yqPUZ82?uuK^v?$%i8ZVZO74H4BOSu3G zKZbZIAegX@hlrhjZV#E$N@oPy{PNBRyh68LX)=gy6Wl>rQ*XMge;BzK+Rh^Ig^6;3 z-DX~Pdej@$h6g*OTuzP}Q?j~QBC$1|mIkOSdUJ+0`maxNZjy!W=F3yWd=vo}(ELw= z()Ja~jhw{t-by9=@&dNVM;zE))2;4#;n}h#Ut3_j(CmADAE2nV2$1=JpWm=t1C z;0m&19{p;5j&q=veZD8EnGLr5HU`(k?K=$*hK*_CbmG0=IBfefIt@zY2nBDRVN+f@ zPh3*EB}0asO{Ni^%glB_0qdtpvlaTzgnBnc``sNNd86I<>JP1v&oJ6HPtE|(v*7%n z6F;#}0WwU)44_O(EFT|eJ<6OGe9iq7lsK{gOAU8?;Ft=hc`jd*Z;jc&7taD1!?*jM z7^W8Ue$3}3B5EOX3bzEV*pw!#zxQ3X9f>!>IcT0wV~Mq=`JLnAb=#l-#kp5D1>j8W zZe=gUqHU)*rl_l`d!rommGHaQ-FAG#yrrjJmMmMBVDft&??9^!kE+$W(eSGIeF|E4 zSbt-9q>)pMgC<{bUl&{cdoxyG!N-B${QI}miQ?-?{jJ;4m4eXx|jS_wQ!0V zM*}&Wq8^qH!0!N+ymZ_%Ual#b_+uP=q;|1?;suj25Z5;!;`JgZxiW7<;aH%BO=Ljp z=X6o?y+Kk4let7AJsfkZvElZ(d#1tyUmSJleGyA?x;JZaZjpJm+G<yULnWe{(?a(rMmgwClDp}K9%(+$OK1t&iK?nGuI zq+71FmhkaZE2~uJzH~%cdSssPGH|{;1FJIe=&N->%`&m31cZ z-QIB))zgHjWdF(%CUf_DSQ8&C75?+yYoe#fd71$KV^0VvE9G(7El{hFXWITsx;Pzx zgLQfMOO%>jLA2fF?-83apjiF}35 z448(|M70P_KJ>I(u=z*D*UOdpBCDO=u9ePphxC`r_Wc2<|EUuegV$3EiSyNbi057s zzn7lMr-rZGnSaP`5v4`JUuGiCT|d^w+@ZN>X#3BYKF^xf+(hG-wXw2Rsbdhm>E%%) z{eu9<@QeZoQ22n+vb;294@}`k!b{Wup6l$46t*tU>9L%HFN&dJd&u^CMfv)x`e&17??|hq9d#<^;W^j0Z?8VIR-LvE$tm4PtPi58Q%N>Ymdw{_IEV{rLJrtCwprp)JEHfA zmR?MaUW1@nB2q%P^kg}3stgqUo%XB!%fm)Rj{fV2?%lhxw)p6fir=f~uEGx1X7+^iTgv@ze#etQwBFl8>d=!GY7 z#@o{AwAInt?g!2ne~&CxCKCsE=mOivZWqsAT~UNu1*1<4iNY?&L?%R?t*{4z8eh#` zWWbyfGw9^^u9u^hx5ncs5=x^^XA zxGCay+Y707#F_zbUko{S1D^>;?z!H)k0_)0Xn84ru`2b%0rwu50%?1EJ{Ics;m(HbX3xAGG>m-9Vtme}oFVf* zUBq?fnNAY*hS!bsbj!32jJ~1}Mw*1%dSlPAJIE-JUdP3yj%b1f=8>D(v&-!P^if*g zczT@KNO`L?UJtX>RdL=J|GtKu-hI9zWtV3@%_)VAw>Rc1dacsfpj^<`ueq@&SH#io zwYf-7Q0UWI@(-!@3+*C3hi>Lcl(+dnB*&pkoVt{}w*uTg}K1z$1XxsgD36SS5Mcb ziG^C5AmV%u%$6nExL_ttZi=3uj+pc6)fdM|vTY{N$Obp|0D ztKz$el5`&gqDPv;%C>r2N4XPKdQ>i4j)LL~>?3}QZC=x(oE5RuRa{WQKAnO^MY9rT zABWxV1fW;84X%0p20z)B0beWEmbsD;50)LXn5hj^{^<$1(DvC$wn|fG+w3jd;2u}U z-eE>`oJYW<6z0xCfacsDEQMjep|A}%m-t8z>@Eq7F?!`k$9W?UC*O*>+vw9t&^8}0 z%BD4xv??Pba9y57^Vf-Z2h8wy*ky-Q<0u<^6MB<)Zv9Qmwuj+fj_K|#1{WFIO2}Hc z?B0^sEOzW01B&OmExI?Lyi>vIyD!rd%DI7E4zP61hH_pTqMXx{YmqcyQ#Q$~T?+6( z!>=@tyf>@J_3pW~d@*H0(?HQ$k_nq=?w`B-9PN`7Y>oG8)LaPrijQAi1=O zT9)>``ctq(TQ9f3a-9rBoP2&}>3y15nUxUbltA)FMTY^rJ!Y7X_6)8Zj6%p0Tc;3Fknguc!s=m1edgMJp zd?JDF_g1l+8Kf5f?a>w|n=+cN2_|knC7Fi}k-K3(kIot7VC8j&?q zhD$$|=TXueal>w`rL6{m$g3}x3usRZdU024+vuf*xi|P7Ysmv;tlOFy@-2mV#Efna z%qF}aVsBEY-_1NUWJptm#2mT4V}^}F9i8}>`M3}}u=}_20-R`w*@c&(n5}S?7&gr2iR7!8tW5&C2JI`_N1OavX0stqk3k2@hq;r!9pdj73Ya5Gs(+8 ztka%_lsl@i2G+E$Uvv^tk!bXFo{{p1hIr8^^_};)fflK4T1uzYDz6nYa%}Ty;h}zZ z7@VtHcW(|NH^otZ0_e&*xv%}08Y5+xoh=ckQFS#iqBxsXtQMCxke{qUZK2~Dnk=a! zW}DcTWcG`WL?)G?Xl>u6|HGX03qckIzWz6fp|}>6rX~Diw+KO)PcYDfHHOWpt`BS8 zIice_S4E7otFNDiV;DjeM_$Z}=Bvr*Cpx^X@2X?EuKVVPW&w*u?sj)D zMnvJ)oei!uYj9{u#q0>+drT0LS zD5}`ZKDubKZhSGwo`Mm43;P#0R|IxHXj*7$6+I-kp03@5&Lf_(AuChQP|cTaQa6Rx zBjTK3DsBxyhnMhK30C z8qYF`^M~FMc7M$vJF+x97(_J+U)Kwi1@xIdyqo%+M-wHk;umneirrl z5Zc~tRXs3I5v-q*>`61~!m!KijXnQkGGRRVYPR9B5q$8WQO+NT@0oE8fL22Et9@41 zQZyY4tyDPw2~Ex|`@7m2%a$+8fZg>y$J~kAn|AVqP63Gdha{bc&WD~9w{Er*vdV(} zMV6scv5C7Az$s?^v&p%9PU2v$XEV8X`8Z5rvbO`x$-ow!+6KNIS7PM!GhewZm&o;<`qVP>MJ zF+)G-V{z6}jRxVO&g_T7#U}ckTho1F$fI;1@fae0qt7Ap>Qa+|?W#2w3x_MqVhxv%kbUYvYu3%8g~*XHF;X~h+R5?q;fDl*no+}w~z-Ny?$$e!!=E%>YEj^9Rr z`3X46Cqe#mDmuTgxuTIIws~b_vIXgB|CB@Vjgq$_zsZ-g{if~l--#0Ze|a(}QV?bv zw5$0-4%WNahGM#{9d{#-hMVY2pYV0G{>9dWU*KO+*<8CqS>@|NRf!>{?u?z^u($s) zVyPts{Y8aq1#~pDLhRZbH2@6*K%_M*zwuiH|5WMZe{(dzgy4JiNdkM4k7cWt@$b6G zr(7Sjng!Ebe5B#oSziw#hZQY;`TN!Rw-Q)Z&dju^!q3tTt6AmO8N>iqV*KL!>L`gS z{6_KY!+#_1^j|QF`2?f0VqhDVG{$`8_C=YO-}KRD9-_7bMU@}<`e@wQ6wLP(>L)=8 z{?0@Ggr3>t`9D*(dJ-0I4BQ#xi_T9sqY&7Dp;(J8TlX_X92Sm$S*~YS*c#9!8EdQ5 z#c&&m)jWIy(U4zm=fnS1P5%**_{)LX)=O;LnO>vO$}{57mrXxj|6w1`;W@W~{ENun r3xxemrF%sP_MeEW9J$xe6V|YnHfEhRWr7x;z^^CIpFkhK688T9c0;}8 literal 0 HcmV?d00001 diff --git a/docs/failed_training_task.png b/docs/failed_training_task.png new file mode 100644 index 0000000000000000000000000000000000000000..861aef72423d3b2fbd1590f3814423af0000f4c1 GIT binary patch literal 75693 zcmeFZ2UJs88#aogqs~~PC`eVXOA)0CBszwoGy^J1RZ)-{klqqIK?sNilokywN|PE0 zB`N|!C<37qGSUKp5FtQFzXvSiH-7*9*ZtSJ>z4JOHM3l7ID4P{?!DjgywAHM&zKu+ z_*3dnF)^_Xr;MSNVq$AxVq(8Ftz8YYlysP^0RCCwYiabCSYC_F1n|QumlI|u#Ka0> z*YPj^4*dLwxA8?^F|my`OaHH^hvTk_iHR~!K~J0wa+ngp-zd3+{~gRVBR;pDq-bre z0Cg}VWw8BeyjTTky4kvrf|H^T?uExAF-`B^-I?TNWXFFQ>+)$1@G$EDV15{ ze$i{qYX6yM%|wU8tAv2VM?!D~VD`9iL3Cms`?_ZuLx<>xIwGH+{m7CHR%(kQ;MI3z3Ah2W_5{r6%rTiSw8 z3tVtO4>~lIlqllIt701y4k%gzL$&A_olW?%vGE^4CX0OQ z#`2eK``#wTO9$Fo^{~|oCHcA4w9x7-?N|vb(64_W{IW^pJ&zZ5Q4h8Z*Pey`80{_i zfd31-Wr_S}+rS+(4oPG3Q^qXcW(4bb7@Ykv>eOS)HI4c`D5=R0gpA;QGqY`Gon~6O zGvhb#Q=@d8I&wT?n0}fGzEYJ&zabDj9X9DLH0tzW225rQAEF#fcs+cmK=_ot*lyO@ zHX0Dxtht@%S1k@(+N8_R8;jGj`vbc`8iq;tGlI3Yz^w1MN^5oS*FN$5FqPk(7|{&M zm$6RJgp#lAZl2cdnq-hJK+zL5PS<6r%o^CgCO2>=5+~2=sekP#=yc4@v#;nHsGf<# z3F1-&6O#%P3!oBtPt|iqMuIq8f>Tlf{{yPegE?MS9xjKSIP-5&ZrY2@97T*;BQnaX zSf~)r{&h(wBQ6&eH@*FAOjdfB7Y#LQ^WW?k1)A4Z_RVlWl&fZ|EqZVJ*W9e3&5HEH zVfWpQB4Wz6oMg40@Pw=^k$??X53|hb7cQqU3X&6GuM`e`mx$QHsaGpUTMl9kcQy}? zKQ|!j-Jaf!?PB};y`r=T%h>qj5H@u`cDusZ zCp~OeKZ^84GtGRmpG4whdIr>ib3T0PX`OE?-7$I)o3|hPV}rhc0%JbFWgXr!-#7!S z3JaVV91D7sB6vBSJhK=Ox_135T{tZFJ6q*286`&I9`%R2W)Zl7T0p%X>ijo3m zLZZn^iG^MAW}$EYEfw7)6jz_h(%+|vK3@g1vNKEQF)Jt%aWY(YkFPdTbkq8+RI9yY z9X9x*Yz~!*zwuFDb?Ly$D0JsL=I1$7+2}9yI00H>&E?8~$nL|-BDaGAvu8Icx~=*& z*PFF(-cZr)&nJQli89dExSPGcHOAz@8=T^?fC^O~Bngz=>w*tZG3;0vc;mPRi+XT! z_u$=VjH8&?&6B$vDn7VNF88UT04!nX80e|YLF~}x$Tk_EHRAr)0dGc~US6do6}J%t zKmHr=>bLja8f5&?7!fF~oh!uN?i$iqztrX5SWEixa)pB~S-_Ydq#pt;n`fk!jD?st z?1w_A-8HnqSWN8AraVWYL-^-)j@S8(*46E16j=wrP5_U}!R5WRq&>Bvl`H-b6ANfS zE^ZRN458^#FII7y;nSR$z{)gk4TN7qhYQExggx-}`InbpmUs$ik!>&?-6e?*-drUn zRuPe zU1n*Pa6jnl|Dlki_ZTK+-XuhN#@k2y@<(oUY523M!a-)PLl`NIa`)NX5IvO0u05wa z(MFG#K*Hy%RI4+DdBZF7b7;L>SetMD0jO7s2vbB_EbiAhlazWPF<=sEqUfm78pQ8_&uX|KxdcFYB{ z%k#U~mOIBXM)z*vwGUX}n~6d??RD#1;6^dA0WqJ*!Lqwda$5!yDdHtP3=weWy&4x# z%g6qwE#YP;2diKvoGAVq8x(M&sGPDXH~6ly5&7DBdzS}kj0M8P#U`_kETf*9vHOZ{ zTQyiaZ4R%)f(}Evw|zo1C2xl98?c7=-*Hv$6##oudhcK3rHUr? ze%fax85@$H-&=cZeY%OcB&=h9*HcD`(w&LJl^fYk;VAvsG%E}EZZ}e-BK zz3doUV#LS)I;0fXxCg?7EDQv;8SRf?vJv?eCHh}A;7_}iu-kwnE^GbXtyM2NhBHKe zny+L;{`|4c*+DbfuYhvplJdR2P8g1%X(<7(3zEN?w$hg*X%IKZ;A3DRzCA!&d+sFkkLI$=0UpZxPeJ z^Mdtw`~+A`?CsS-;R4Pk@M86v0?L9)BBIIA;<4E1;(gqORBn1K167h zl8rI9zgEU*1En1(%tR#`bxTk!!(T*YUhcQDf|{`lydawrJgGOM*K&ZZ<2hegdGAGU z+2vxFimI7CTucT598gzCLUFr0t%-}BboVNT3no1A_rWpnd91RS*t13|znQ8IQ~!%j zXf;IN7hO~Vo6H;jmrq+8Go0|&-a!Q~ZeUfGeD%RgSy##{Pzky!KgXfG?5a1U zXAGo~8!D$dQNfUY=*YWrw@K)dG?s(LQmzGMl@ZWuUvwzC^~HuE6MK?kqwQA>DgnOg z&8;AeuT;W3`Z!m}CK5x2BKHw=7?qN_Qi^As5R0ETA(e_JWX}T>cc|G}Gz=FD5e8}DM-5NDMlE~QWw>u{_5rN(RggdDc}C6VL={H= z{pQauN#;kQFt)K!&;3wxvEA@kQ0Vx;b!aoBNImwc=NZOW_;hxsb_v`eBglXTi^}np{cPr=8aOyX=zyBBPus14b!^$Po`O}9FZJt@ZTTBeF zzW0~iXS=h2`(eEt0r{YUR&Eg!lTr8|8qAHL|Dh?4K>cDA4{nUhw$9gt5P>g<`0HOD z&-Wqih={7-tw#)fAMQRScYCI9&$q$7K|j2T{|qz!Tj=rM+6L}Cz~zThCSP|Gh4Zz+ zdXRrRbD?KI^DVl}952Ge-d*#_KlzN8Fu*!bKQ|(WGOO@}`7YaiyhjO@P-A`CFMW)G z=5yiU?pqD;^TDm54*rb3k|9ltq0zKifs!6!KsmJh*uiG`kcl!vVb5{|lR;l>oE4^) zhZXlY53t)tJsjR$7;B$-Ivp@|5?|G0&aB?(_>*hA9Iv@-g_*GKgJ(Y~cCs)c{@xdL z?2K!fWm2ZGuSvc9L-h%Tmvq$EJ*(xESjS|W)V$eIaI^xW_rNO}S5P$wHhf~A*CErY z^XD(&f!tYWIEUm|z2%G(rN^*(K%KX%5E5>`TmLdN*ZxEA-Uk~`dW~J82=|k{{i)Uo zI(EN=bT|~3Y-N!feH`r{xCNFJhXqvKv)JGC8%D!?`+3cGXnk=wdvDnmICdh#ZE6>Wxt17xNEz3gz zy@cxWC?7*ix(Y5G+YYM`OLA;yCr6wAyOX8RXahXxq=W|Ew;x526g*#3FTOLWr?)xw zczFb>udcnob3Gm+2kZA#NzhltJU;kO`15tbISO>eSUpR9W^k+fZ_cYaQE66{?7`5e z{{Aj;YpXY=iYHZN9UI~&4Ig?HO2CTzOXUs__F}tAc|CQ#J)(lqji-Lul7Tu@aZAV9 zM+s*w^ykm|Jhx98a)re2tsn_i41>HNTQp*mqF`^5>;|Y@lvHL<(iJz?=O-=y9clL( zkPrVwU+1{^e$3q!+=~AtfIWhpz4Gsqxde0^_}3Kwf7)iJS3QS*9nzXbkPglK5t}U? zo~7sSD?anB+4MC`+-nJb$hoSCL9ZdXU)5%r_Q;C)tZ8t{L!0k0T*wQug*W6psggre6mD0ygd7EYIILh=m2G_lPWBs#N^4Q3Sq1o4J0a+d~Dq>nie# zuYzxhl7ZbRco_2NrqoEAD!ZO=rtHmTkacpQDdVumMza~U*vFIFZQ=er!pwzI6z&i$ zC%{TZwqNzzq{LJdeAnVme4mRxc6H6ekoKEW>0-}r|E)mJc$T1W#dGVbqW-9|i%2s? zND|{o{KcXaaj6bmlYHolD*Xdk^YZ+}TA!ze&&MWXWKFhf0f+;h`tD0*Ou-gd%~bXG zfy(wb?Rl~KN1&}7Y`no@%b7cuaWeOtKD#_eRHqIO(00y^UM?tWx}+QYR)$~(P4RQo zsQI$PR43k3!L1sT!Rt3B zA4NH`{;8FM+nX)&5uf3k<(Vmj6eT@#G|p<;T?xB7DQ1s+uhirwd`V9$HvkUl>vi8@ zNUjZD^KFWM|Mn$&QwcMcNPEm+==@l9bI`V=3(z*DkJFDE3M9UZW9|Z!Y*C`gWCr5p z`LiL!IkcBz=(l}h4w}DmM5ds($DsXbTzX0dEQ@!^3TlsMet+5SswGrEvS7OUhbU?v zT0&C*<7a<)_YVYb-|#Jnr_=bUXn(fB55VSo7hmDMuQ>JpyshO3)}bOGm)i*dn)LB; zT*!opW1l3ozWAB7O$5#87u~)MhFk(Hxv5M}DjhD!C6;tu*c-3xc_$+Ot0TNL6r`emu|x;4_W=xrO%w@gN8dk_ zAf=C5h{I~cEh_R!H*KWjN;WIoMgYN#2g3vFdf-Z4-?YgAR6tjM_1WaX+RyI%BF9kw!L(OQ8xVQQBO2fsqI{zXUPrJJ}>+;`L8KJ#Sw*aJ{dhZ&|LY$cLp$s%z^VbJ zsv9OEL~JB-K2X>~oiONF1ZUP(Uf3HRLyt7V-gmuv9Zq(=+kAp=7u~m>rLE(?+pVJ6 z#0hZ2G6fUw-F=>e65_gyzr|h4{V!p+>??@9Zl1c0EKC2efqP_?WQSL9$HpP2tumo; z{`OFGa7|mLk7GfY>)`#H}+=Yjn%qzuwt z?sc+hin``CaujB5h6-+fCf1zkm1gXCsV04opdR(Ops8&%E)JVy6jV3BZ4GugzZxc@==@^lTi&4a#KXQJzYyroo2#u>OlZsph1D1sGRR=Dk z;{V@kqZvHFwd%9&W@LcS`)e|^P$Q%<@0$b3QZOL5&piI|E9UoK{FDj3I{m*J|06a0 z9|CCz=DHvkTxutFUu^eX-EgvvwXMN(zVbxOB>Tc^|tMo`20%APfVQ=-_Wk3ChMV9|-H@@xvv?~(~f;&N{cV>4w#p;(nEiliq?b~}Q z^(=C2+<;;0j@_FKTCI2EJ6WrB@~E30@%*}fAX+MDNhwGC9>;{;ffA`nox|wSl!U^t6cBB+EKN!l)^{SfhN>$4(R%%U9ptnn&3$eUHY1ce8o6bFn z8qo}C*%`b)q*|doH&rW0jz6YFycjb7Pb5MXQ)G_^y`%{R$ajpi;eUElc#2&l44xR*wnZ}B_das@oQKKpqoIV%V6bv(R2R{FA~hnU z_2_O|y0QA3(D0+&%BR9H+sZ+yqA$lh-0LwX`@3&5c;*wq<>MPDqR)$w7GYTKMw6QTGcJ@(Z-&n-xUt(1hyW**7+K771`}GKD@93m7s@uzJLPmV37#d)*x|gj8%J* z_oQ8Qz{PNWgPZoeWSgvYyIeb~|?XZYeBB!$D4`foRgwrawRphG!z#423And%8 zsI<#a*-C1XDEx^TQW?BirkEAA)EiYq4uO$Dq^-_R;xe`T`-y&MW&L?)X2)-AKc^a) z-!KGks10|{6zI{A7E9i;#F@tyV9r$^j^{kj$H$)U5LoB_Yz}YRFzB9K%d6#3t+FST z=Biuo`7fQJ>30TXBe~#o0)S12fal3pwVu&Z#D^S69R<=gvo1w+qJu|N++GK%yo1a! zlC3Ui7p7GO-MH?sc74J{CnahqNUbzB*GJD?)I)bbdGxio9s>3O9^_v;ce}I$a(hIH zGAjR9bJXX#`q@VL*E39akxqKB!$rPN72l~c9G0Tmua60m+toTi(Q{evplJFgT#)3D zWf7E;)~lLqTc}~pYy`y^Ze5|2v)&>hhjx+d9Rpe#bHjQ66FL+q=9`hO?+n(&<+KMkyoc*#QMWPRvrncndDK%U+KbrFh<@7B8& zJ?)~4+yOIDH6Gh$@fc7(0AJ${COd6f9)FG`x*0EQRS+G1#PSV9u2mM9xd~I~f|tj_ zKFg5mQ8ClhPtJX_wR7X5=Rq-|@jaVrDTa~nC;#RK>LaTjrf%x%CQbzJbzsIjc@?|T1k zUDU0aRYm-jho-ZO(VPRoKEuV~dgp4R`bhPiF!s^1XPW#Sc9jp!i53&0fmD}~eX4~O zf|EEw-~p9fvgl+9zp}k-k?qZ?4^p*MC*_LpaU&BtL{wr`>A3J2yvqXFqt&YG+S_N6 z&_~PMklEc|6K)fZpf@pfG3J2K=98)>IM+F$srvZ!qLx#wCpavF5Kr;3{K)Mju5F~A zy8*Ukn&UPj93Ik74VtzQQo1mykbPZsRIex+zHox0UNuO6Zr_5p_gdr@AVWt5ulSPf z2yW=?{W+e`yJp9Jb`5P*^!6S<}2Vzn@)!IpT-!@#n8O52>fC>5HlM9&q`}vW^sTsQA zMOvY97xm!jw5YN6K`W68bI_{H4nh>CJqB23*p|V`m!BEa^A{??jX)oHjv-7J0q+FE z{ViY6<<(m(7<4BV+ldx>sOin%EJpG$abL%1n0(lmcUTn~NjhMPnLW++Sk&zinw=Lh zI>ViPM8Q}-US74dVhL^ph+=iL=Nloz>R`4X734n?>qXGJ#NOmu$?-WrTo31C^Z6?= zWA%_Oj`U6jq_M_voMf~V=XndNo534X)$5uav_CLvqaxIElOSW!4l*d52~8emjm#Tz zsT2rK!;d+u?%euE)kKOSb26RkiL_EHUvi(8m_C2LEC-!t?|_9OgFeU~nhEFbVZ68CI_-&P zh&~sF9Vzn*8xRiV`_9BpZ-t~Ni4qLwB++YZsW*Us3_92Hs z8VKhKEy-b**Bn`Q!~zRLokEksxI;93Gg5rTPF077lRkLl+G|8sFOxeuXj~Y^nPXlj zj07&g?evV496Z>nyuW6r20Wn*;-Io%S5;9Qj>0mYn6E!_cP>jyMVR{RijaT;ysgKy z>^ZwfRp(tXhX}M6(>fX9LONuPQffy`KP_&pn4tAe>}WjlVL}&TE0Lo&#w_RAazJ%;1CBl}9R5 zAhh)~fcAmNYXFS+FGPPK6*oG!89H{UXV>eH*cI?f}yyY7GDH@CEhJ#t~7 zMZ3tNVSlyokxiX*GFw{rMbVO`b@rC}OyEj|)f{|Yzafw5zzZv!h3{>!@&|)T)FHm3 z=<4b6*X-mc`bc!xd5eaxQsV;Ff{-D7@mLtzimQ7Nsed#Kaf(!bq?yW@=L>n_yDe=cB)Ae2Y{h z1{|QxNcjrpF#AwdY%;_mnlI!eJvh1S!#I3IB?@PnkTZ!8=&Y7)C~>O1z)MR%{uT0- zb(d%KzI*fVz7J6kI_K-68QT`Jf^SSvTSwCt0^r0ej_XuUk1dWUVpXa_=G|ESwzK|h zS3aga3d5R~C)ROYlm)H7M_~%ok#qOPzZj@Al$Qh1PW!siILMQl1q0SBmCC2K&KYsj zArDTaCKkouIFs>jz8oum92hE5Khd5qg4|}lI7|`NXW8b`H{I2qMb(?&xpMW{(3#&=UvR1 zGVv^8%tEF)h}>zXP`+>2EC`kW7R+^+tw+N$`X!gx#Ga2vPR&PbZt$E-J~4Y{F3jMD ztv>wWFjoL5_pbrTUhEjafzgV^mG{R&Hzd4Nx0iYo8EyZ?g4)s23OiM4fs~;n<9A9}Mehx#1T4;s zx6RSo=@lwuOQ6}r;;9>Xzl7NzeFTVy16kh<_oI^rXCEbJ2{$X0qf?I>%RNZVH}=Y= zZMb3gBmDkKoA|+N{}1ruJMi_t8~?9K34qQ1;k_q=e_%Y{vD2>!kVDG_1po6kp1oss zYVJp!MGO@wdT%LCIsa3#W(j%+Xf!{Az1~va@SLAG4}YtF%QOAVb9eog$4&bwq0{j# zfo1s97+K#EmOtl1%)jLUrGK93w|wKz$qnrdOK{NwXnsKz`KmxYmS*zrj$1Ub2(B$P zM@Wj0vw@ZX$%rKg=*&+nf#vTkipGhC8dR08^u;2%ZFPRSV^6z2e@>Dcej{#xKljgH zACLS7?;Bwdjtm}#OTXd}Cf{)-750kubB)9Dq*?%Cep{BPw;nPWt*5NKRqAO7!YtPP z!UJsozjG6*!O$w2ba%Fd{^F&rEe;ouOI`_(wOYhpEMSNlrTF9N?LC#g-D+Gf^Cai>)`S2VGmV6#!Vj`qB3xgAW z-j|A#lg#6dKki+ykzbsuxi`TOZeR{4T;UxMLLgIOAGZk^y;bYH#TS=M(B!8iYTEQLT1d{iy(D^Cvb1gTtRoPR;N4^bJBf za?h69LBxP=XA*#JQzKAI{NVq95r2UHhxZ{SXI>K~Bd6LI`Cq2@fr|*+S7Osq!iM|wBJTbzW@0d8|y#tPh>c67x z!q(y1AKy@%{Ozr<>^xfvVEe8_*TKGODn9IVeTnXCYhW@{`uNJ6ka^i|f~_oj zz_MzpGmvZxklvqd67j4o&gBEVX1Y9bXe)Xq+%%~vAzuEnI>jiP>UJJM2Xtn-lEjI@*CDlQ`af-V91bw8|IC_)pX=O2#DAaRd5}is z@_qv^UOFl;nC~u=tz)YxkJU1Y`KJ`zoFp`@a+-`1@|5svpI%0;ntT$6%kIBG^S+yd z3IWIz0)V)N%oiHvlzyPee;f;U<{;zMH+H$kH;;)fgiF8G*^71AZUm4pH(_f~QuvNa zXK7ViCrI^g+GpindxiFT&LdZdbE5F)J1dCUuZ8IqDXg}TkS5#3P4v-*Wf3) zytWF$#K|k=)&bO5wVtjBdbrAw!d$-g>@fdvNN<~Gin1fW3!ivh3fV8L>I;G>0(rKj zv{-=Qhc1h-Qo87ONznY(&i>SCFU_!5A^uv;s~UiNd5>nNq%#)+5xT}{+t z;ax^PjnrwcmEhQ#OUYO3Vzhb>zCVz^uby251e%HJ{ewN#UV7F(@{?mO9z9(NOhUPq zXsn_I4&%3Ly{2yhJD!8{g3bx%B6!SQ_m|{&q`)IGo4ui{q9UoyCor7%J2N+7(u-E9 znKY=B0M8n^)UQm4}hojzZrwv-mRJ+%6ysY^ z*WE?RIl>5vi1l}cI-8ObBK=Q)P@9h_`4Y6JeK`I)0&+jrQD0<4AMFC?(C#kjHG8NT zph{}O=3J*wDY_vp4y=vmFt)M8`qm}Xpb{^laKz%vyWJ}&*PxV;4>_nlE2#cGfPE(e zsF~-i%n)u8o%0T$zz|Ib%vrZX<#DgM{w=pZhl$Z**JimwCyG&0iWeO1H>`QN@c!UU z-;b#ZZj}d0rp*rheFpV*vCTSO(WNPLac0`k4B+I&Gud1^Ya5-fs_+Z@VuC!!MoV%@f#h@v<=mzns1GZ(j{`V z1}W7dfM`^_;sDr4?TYI%t3iUq|67)F+8uR?yzaXFeqJ1V`+J-4 zN6eVFXIoY4(nAnAc+SbrOrsBeIgoSI%MH%LHGzji@^z;}9uI$*=_Gx)aZ*ukEUxem zpETMP?o53_y^~z5=jQqyAPre&C_xbOi4a+Ul2ZE1^_BzFp0!xF0N;(xtXGdwRT5Z; zsVO&GkR&GLvTJ9?c!SL**&7w0Pv>gOKSDsD>$#tSvJI2y1 ze3vMgu7hO^R2#DALt{WX%>r`l@@}^<$l9khK*>|VoiZs{aghAyoh>RFTjtY&GONun z_t^`COJ;VSD2}`EB~a;NnD(1Q!~u{-{6)K&z~=qr_H(yZ$Q)5Pc(}#h{;a9vn)Qlq z2V&yKYa3%e0~JN9dgFKwo7p;R@daMs+kTW%g))@|tt4s^SXJ1z58MWA_rImSbXcaG z>r_ZdwV&EhHa7V=s6UexL-Kw9%+II}v-W2Zx~-EO_L+stb`G>QciKCuRHf>^L!b-1 zkeMF73`E^n)+#b+HvABSq*-72xzgwfcm~mu92D!*xW^7Kp#jlr9Y7C4(l%FIfAap! zhxGQfwQX@x1;WISrB^FZgy|qBL{JT|-Qo`9c&2X_?>1-zcf>&cx;$kCRyf)MFt^!R z;d8!>8=$i$PYmVis~0jiMArvuUtyQr>E8oa$JVLQJ`~i7)^Dz3>l{dr*%AvTP< zt-*;;jmX+#?P^Nz>Ax>IJ^~9 z6q{mJUl3cQ*sHgoE!It1z+JM(3IjuKJjbA8v`6d^rHC#kUUUq?o4Z8C6ZKZA{1`WGF`t4c( zOyZh6zwV>|8VOTLnA`4yH=`X)ql^Q6!B^B!u&A1DD-+GyGKLey_%P)LvF-vM(*OG> zODNjaeZduUsiJ>n2yZ*ckFtR8?fNq#90Gyj`V63EujetXTnXntasG7?E3+`|LTX)2j)1Glti!*5C^kHna zu)K!jo_75%X}VfVXP^#XEvJovN+`7U@I5B=zWc}OVask|syQ?lsgy0BjWK17?*Pr& z-*Ye9cYZ9e-`Jw*w}aj-4JTdA5U$jMINXM2#^|2#(8*2|?rrey!Ob?SKiwacKEuc$ z#l!b+prRVPqs03QK8r@ym?x-ssl31R;R827PT3^kEn4z9SW!I|9eFt?+B4m_Tm_rS zYwbpRrl@0AH;312uI^{373Pjr7x6^nJqV1xXlkZD>5wZLv_eBTXa>}0^j>fQ%Ajs~ zNwu#IeGF9{V| zQhcwQtw!_gz3^iZA42ai5A0aV!LQ6wJm#Z8>nX|gc}Jos^p*dM<35g2zByI zEWK_G%4$b@R7{owN-;sn9CL36>%HH_{;eIk@;t|lnDE)QsK^3XbhZarh{`O0fI2OW zqlcr*7Tb}w^31IGSf39cf(AN{IOW{!n10p)uIHCk1$pS6S8(InSlB?x9(@Np164$}PP!n4JTsDJ$pSue&MeKs$tLG9+)1|xb!<;`-j11a0Qp4Mn9PoS_FLk+ zlmIGL9OfQ^xh$1H&xO-4K74;rOPZLpN7u z&edIzHcw(xhBoBB+kd|nb;lnLjl@0M7HfSdp;Fc%FF>1H82KXDJ6cSqr%nO9ci)GW z-|dafzsrF`ZLHi3J>o1^O&}%IpE|tG7iswmUkYa0_#N=`48Zd*nhM#CZxyxI1ub;kP% zQpAAN7UZ@asR-ZIzZ7RR;CHp z@QkwkYy8Dbgk`dNqn|zMckjjZ3U1LmDdI4v=X#IE6whom1k`cd`=pB+QD^!pBRi3Z zwG!}W8CunDq40xCi@6Vl<9uuxIF-<>zv57vX2Gto?cI~_-J}dE0G_8AYt3Z6PoBr5 z$45oCuxVHBA{N7+%Sy_xxkR7>`6fLZ!TG@KTH~{SS8jRtMsi`OgN(-S8=APdW|=VUA_1$B^pX0k z9_x15G%7g5zcg?Mj0M65k9gd1(_(J5I4kN;k4s-!^f9|?Td6SonHHM-=!9KbwGl^@+mz>U;?wXpu20e&A z7>#;jl(943VujltbM*b<G)gDb+F5Gwc&PQ84w;`K$*XregO^2bcwIe{*w>K3{3WpU6QT#94m+5VIGYiSlmU!+jL*#T|C zqi|_ye>v&i0VO*56!g~p4X(fb*LY9iy)V7}0ZsB#Ml!ZOqHZYJ9~5i|ly$aO&9J9* zQ^N|A+V7LC!?_)l+&uua!24TEQ@~_r#Nk$+25DsKA5%(GIOr|he~$=mrg=H%hiJndlV<28MrrRP*bZJBnpvsNwC z%UgW*p}`w0jb&p#nPB!@#FjsC-;N3ay-FH1kx*PsOqcu*8|NSc7)U!=~x=y1&nd0sLnh zt3RieT_j|#Rs!yYkiFMep$=EWX0}2Yv8NgK ztIKRNJ+tn#<%(^5HoYNz%q%lVZfyj7e+gVFw+TLoYRqI08r$ZjWeiOPSM~vSMyM;r z{Fxg$-(I=Zh}?q-q10ZxZS>x`CKDy)x66Ncp-#c=GhdzE<_Vv&O9rV8sS7jFLB|Y% z=w8#YF;L+U$X_WMFi7Q2iyS2R@IIDGL#$>%-rdgHj9 zNRx|VN->0}YXr8EgyQB7!TK(F+6YJk1zhkz{yRs(EoW<8@M*;z5;UbKczC8PQV-#C zipnoQmvMob6vwS<$VQiYc~J6nfnDS+j+4z}1lZDJz#59)oDAtoQg92CpA?^nQSOQi zXWVP|aya&3urg6Q^Ma~=07p#)vG0mBh0iklN_LtZ+K29JpJgy{x1yqiz zsY<6Ls=BKm1PV!3&YCQaDOzrIUFD`!q*ClPRM1*lpxGPsqSl178sI5AA{TpKiHS8u zh{q55!Gp<5>axI8F4=I$s*L2zc5bM*@v4|L_=d|9fZQYf9L$@(l%4h?jbyU6u23_; z`wcz-5ia-5WWn$tvkoWO`W*?gK!JCrs9Tiz+JP@Ecw>VFE($4VT!6TgQJZJTcot5w z-eDFX6+?&lN9{YD5|czq9}3e%X~!3Qd-XIqHz);JrzU3H;wP&u)GzDVYQ>+T?G*_P zPwfhmY;L^`I8?wT4ALx!m+H^~lr4Njn}9e5^(xyKXCS2DAiG-6QqjX}v^sBGQ4WcAs?i+f|z zs`a|}C8Fv}7LS&tFGAM&rTfN54A&OUciW>4(e)~x$d2JM z_zy7Mx*RA8-+I#N?1Ux|`Kc@ck$-!jT8#xRv5-M%$Kh%!e+PiOzJPv6FS`ndCg8&> zHL7D>UGyJ$9tA3xB_lZ<@(yqSi3daLa9OxE4O)~$g_2c7`P^F_uf}L%DNy&&EN3KrLlMzeN!^cp<(5cPdaS@(+Z6 zNfz{J48j%X?M19s*xRa#9eVixWQu@Yks3Vq{6X?#5M*gb{g|+r*pnX>GXHBe!2hi- z`2THfOFZlR?s}8&*HJahFJ}HD&_;iW>^L8U_~{0%Z|`UVg?{6cjw0^fXJDZ6 z6eutxbf3dh5usbpQMq0C^QvK=*Chq3hO|XRn=X}Bf2W#1YFg$-1DEYcwW%w(hRI5M zO2IVHr!^Gj`3JE-L~nRuxLc<-8KD>Tfa+OjxD8rlo>9DnU93dKDMnLF9MC#++< zz03}Nw|#A9&9xfqD*Cx*zI%7xHkt+Nw13z{CVl({m~q43uLGbMSA=tL>O|4STod4` z6(mqcWT_hZt?K9qA@^#9yq2uJLyA^U9dKV&CvZ_!HtAkMY@cAiC++Po!dRmH-2H?? zaz3^v5c#9c#Ra5=!EiREnO|1P^d9sp0+c}EA1~cyBA?F)j@ZQ*k;N}X?9v#bn;99^ z1~WA2!4!nbwAO7`t?&XQZD;CUAi4WpUSIcyK9F)|RwR1A!$p=r{~` zca=cNrIdzYcJ;V{s!daD@C?8QNrJn-HmjT}?d)6KC@gML@ypBW2KdP{TvVb57KjNS zYq^Q)$K9d3Rlgzc?j!e`Oh_Kdh*{><%wXC`fb-EKqXSy6L{EpfNQ*%*lIDk z9>@DuXt7;Vl==7CPVvAdzUdusyYW7RomEu|B zH%ci$#%t5rpKfC-|DF=Rf8a^{n8ALuwj|~b!K<%SbEarr>;%iO1Ax&{ePMF0NWbed zEaQk7|2=h?ak&RVwu6sqIA%rW_U`W-G$yk&4xanx^S>s>2QEU%;szZw>{#7(GQY@T zxV_pZk}6UC9MrPX~EO4dFcSgbP)`;Z= z0d6&@k&NRKx~shQ815b3QmyuzhO)2XE)6u-13UWUFu_@!eqIr_2J=d)^hC+#6YDh zK+KbkN{j{$9fHSbc6nJ%KbP+YJ~}^L!JJn19o|)Lj|bvOO3NqzktShj*&18Xx(@qV37Z~TRQUCN%{i(@u*hE{)J&@;Zr2q9r!6hM< zJ5wYatKSL(3}ar2d%IFdHn(l27HzEG6*ASf)&#<`OC4PS!8i!0%MKmg1ibE4Z?UpLGhQ2zA&SqO8gzfhE(}!EF$IkXZ4{*C+ zmTfh>s%(UQq1H?RT3D5xomw_OV@cyO7~alud4z>J_W_#l%ZFn%Oz}~rkfB=@He)_p zbt-T6rZWnP;T-;av`Q@hOHxklhuD-zOCh5GP@rCOGhKwQf6l><2_2OOjyX=&n_b$v zkZEKPT8?B{a7vUe>H7$8TS&1PlJn5IJHp_SwCHpc~~V4Q3$ONs=fY zD>!f`K6W%~B#iXH%cRE1HmLgPgqQVsHxVsLd)9VY$#l%&=C|WxDf|N~E4TJ$R>^+9XI7BK=rf%dLk&~txhycG*Kt+4nDwv-*4uM) zj^(EfJg7E(TnypqeF;f~t7+eKsyt(uXHQqe*Qvl()63ApTdLlRj?QAgWpm- zzDK1h20Y6%Y|nyz%bNR9Z;6;w2C~Wy zv%YcQJ3%Pm+-mJjXr}P^;vz4Ow8?(!>AyKpaS;dl31AjJNqqPiz;>k;eR#*ORnB-U zhO+on_N(QEuUrqAN=SC5&3iS#;25*Kp%M$Lt;9drAzk+5>T=u{l1%T&M4W{rK7Qpo z?ho-Bm~C3VP5G_T(WhTZ{+d`vY~4||TFgAuS!~F#t+9k2b3uaAYILKiV##u=4kc-E zunj~v+;dIDGXf339oUa$cq9UE$kX` zgD{AiHdQSqpeRJotmkj*8u)NPFqg545#810qdlz8mmKnR?1PA>45AxV`gKR0v{g7Y zw_FLB>lWb)r8Mto)LX8<*bZ`!V7cpUg``m=SbDhCpy1r3FYxnw#OpFVs%od_P+4KU zWVW2%@@=uRJwJ8;Q+7j91}BvGJz04!6&fsT61R(NaC_rdV5VMU7|)eVtH5Fl6w$Bd zk>hK5YNNjkN}epn!D@F}S78qX>uPccJ;@}GnSg+T;4zm6Nm4O5NOj+ZPN|2VH=23a z9Ku|XmZ^}K#F`syhQ5xCO&ES2o>=vZh?;zr6r+Fd_JjKM47|410^74Bl zEX8%$YswAL_bxCb4C-(1W0{;TD0v0&dFCzhVxX{hreu$%$cgNopaxNkBL0f`HATXl z9&Z`5XJd}a_b$B@w80Vk4p=6i%p(d5V~qLK-$R~;F^sZdnO_nxvIElQ#S4<;@XmNb zwL2ojE<5~Osu6v2-iXgTioR+T?mnkQco!>}5!THIrn@R1p(>EdlmnmTurs^8QLN0* zx2rfvT{4oRH_A?mJ%}hk+aIV99w}2sqTbh`&LgEYTEdgiwcKtoW=2qwagTbs4ph>= z%=qpxL%4tX31jF9qP(lm+5Y{==%{l?Y%Z1!#CdYA28+wuY}xt1?!*>hCUfxXsUitC#^ga9OAirja!tdO-5AKRKG?b zYOEkHbYk$zs%WPv!qQZ3W0Ge*XZELy!j>#*CC9luvF_&^IlldlQ4Xn4RWn?TkGq5u1Df8DB!Mxb``K6 za65@rYVYBacO?OfUO&PGH?uOYuZY{TYNwW~bQfplGtu>CbBK`KXEm}vmr)Cpjd^5( zB7Dt_L4wf{rC@tOMV!wy0%6fk@ zsVJ(q*@K3zTl0zGVF9Qt?!)!jEhP%-hRNriEIbF7MLp^0<2J$77Vh${muJw@;Kh%} z*qOu{DaZDUsH-rd*kHM?Fz?{r15SNQ`$8$i%CfcrtNrHYhvA8YOf2>)TKR}Q zOp`x*KJ!WG!RMLpBv)wyQ6k)wsH_niB76){voBE7SDL8Kvk@STnhG;`1M08;zQT8A ztb>?`iHOCaP?}Fy`e9O+dYxa(Sc+$&*<48A({QczIHt?%o9GMq@r{;G!zPce&V(PN zE}XP?s~&vrXyLXwl@7G~;xMpX!Vn|-dcW){A< zZfiu(7=fLd?=c!vIpKH3n-s29q(3Wpqk;*(pIXkni+=r|-EhrdR;jQ-*D^y>h6^<} zL5p7Fz?dRTX>_);IF5l!8Kvzs?8RWt*{DUjIyAh|Nju*+9RhWoLP&a5{$fz&{J48; zcenA~yov|YkA@`iBdyeJE)BaNY(VG~hGGqx$9>$yCvXKk%_~q7Oy_IydBRGo-c^r;I>env(B; zlHNWizU$J!?@5b(RMP(kjpb;yBD^XCX-hJe8{XH`uO-JggpgKSmw9UL4g;rRt&W$? zd-b7POEJ??{M^I>|l0-W!7470L`Nkf5{9nJ7nRVgf5Yj&EMk`mY4yFgi{};DGq1%wEQ> zugq`PH6Hm{5*YqmGA~%6L+M6+kc`MBP4!>=8kSj?DB+S6l2Ptcom<9;og;{C9)ZQb zQeRs=sD9xwf^gqK9ez9lg6(*Si+IE~{}Vp-0woOd2E=!cKj!(_uwPbNKZE}<>&2+g z#xe|!A6|Ym=2>Y~6wb877G_lKsFu?+$^?b^(c1%)<$!8klPPVIy#Sx>HbKc zd_Oo@_%wsBP(GJ3Ss~IVt1s4>g~HQ9ph#QuRk%I6~bm**YVIxU?Y8_`12iQOca@Z#nHUyT9j)Y3b+g z*RXeed=fB61$+?nunVO%`=;Bv!KvZVz-(vvIDwO`!r7q3=;5M`%qvYv@gxs-74MQ- zci}J9_jzEJq-N3NqE&6h1B^g$h&+?w)#|+0)us}*ddyl+$Z0HD|HddLUsiexM~lN z`ktek6y-0f)Fj1X&fDf=)2#SvvAQGSl7>1hYh23)`k)7bli-l$-^zL%+pEWY09#jY=@$^O-ldn_Unh$v>9Qhcji zMBI8$KRG(vwxR7wcx4gh#?EjH0~!5p@(RlOEnZJiH#yu&3j5GXe^o(9Dx9sErCfjz zKM!j;5HxLr`pH#$Lumk<>Ikee=svj@eN~%omzij%`5mo%G3-F8ezm=A)nJ=t+>BiG z#alF0D*;VFf@L9r)vVUD0?Ydp#zoxNJTrDG=!Qbg&)teuQP1~PEy!O`#OsEz+;0ej zc@~t&OtBesj)!{?3Lhw4-prI|9KAj|3NKc_0U;?iP9Wj%2Jn zgTaB%q*F$Juh2N`)Bta256{ZrIQ+p>rv1TG4p;arAwp+qgM+oR*Z!a>i-L7aj)k_M zFck}esDS{0EAvA)S+>y{!nG!BT)>5jpI<%QH=j8F*3@A_TyR}2`J1vh=(c3QuI5LO z5)DTF&li}y7m<+gENOG&OxDFjShE;FC)$p-T1YmRYq(qsnJmy|E|GDR@XU!qa9uX$ z0EU$;%t2dRjzc+-UEuh-V)2btMDKk+jmAt2jX@imLbRFes>e78hSlb48Uyzf6mKKN z3XSgeO^wYTSzy;(==&%YiDN3Ov3qp<`A#;==S>9HXtgVN%1RZ4-^2-kE%tXTU#vde zW7i5ZeAQI#?(0g(ZO84cu+NHqv`;CXvoC}NdDE=A8VE_ntzeu_Vjj7si}JT-FA@fi zr7iZzq5MHR2J4s}``D0Gl-|emiz1f=b=<{i3-iZ}eg_+dbW>C0cJn0X*b_=7V+U3P z!Zda)PvVtSmK<8z5}8X28tHBPgT}M(WDyDowc@oo=pg##b&%4a{*5sR4|FX(kYG`> zZ?)IX9bsd8Do7wPOe0zaCX`zTlr2+ZMM3U!Pw-Gk>9u^G-Vw@!S@(gf`%{vZc~WZ8 zhh^zU^_vcUzYAFw*m<-z;~RjK6P^Ts^Ztw+=qkzJbXE4mwSIIkb}Xo2O%T*j+7x<` zu5!_S{>y!lrauyNNnAMlv^w1IR)q1Q;XEfJMz=?F(jA2C|1tgO$qT5e;d2Uy);6X! z;wnMV)suMrAT|=HS^*MvR-_4CH|w#lqm*d>_&JbrNkC1=F0?I}35Xe8#S8r<+HX&( zoOzhn?Q)xjPd5|FvE0S0w_7l*+vyNBc}Jv)G4U62$MRKw)wKO|FI%ZaBZ14^Cf`=> zU|`|Yun>N|*vkgaqsKV(et&;>lZEwkycKn5py@A&vcI*<_`}b*;hUm&W-AQc*{&Kj z14I7=K0_N9$rqJf<7AZpz$uOhZy8CQR|Y|EPAE@t^Jfqu(a^bxoxaBpQeAu|`5|>j z!1kZw&TCVbLdX)fi3+x<8kr-cDAaz&DoLXEQ+n@ri8kNB-sY!}Pnl7ADKQ5k!^enx zY4q|Y0~4zMUb>)WJcf<>J_6@Ybod+h^UWXiC&BRtA@qNK`2To8fmA)f@zZJ)b^qP~V zkohM@vJ#L7xAgDWE^B zG|Q}{_=-54mH1`Go2*pVlM^I+!Ccd~_`J-LY5$ubHtlJ~(Zqxw+rks$q6-v&)2ekg zypL+M(+H4D8&|A0z&*qE+2#t^secsi0JWSb)O`PX!tg#Ncp5xWR+P>dQ1m40)CzTH z06?MJiLn3^BJn=a+E|(2+4fdal#3&AgCNtmx1mGgZenJm$SpKNN^MyGjQM-Zu(_YK zatZ86ab!x)tndmJoij7tA-98}v&^Cvk+vWmW%%8P_~&=$kmf(%|Nl1{ z3|sI2%cjMZ$iB+Gj<=GZc|`a*+Wz4-)G*Wenss^MOI($dMqGo_a%ON2V~1R>C|#Kc zNIYL@JxgJAs7?KnKmn}$w|8OtOZqvp518DIKk(>Qe#9Kao9PM9AA6<){T7O>=MKvB zOylhMMS>!B|6CEHE5K&b)34vhc%jIWW{%hedMNakXUudj*9c~bIq2mGGgZLDc^tDx zmV9I34>|{)Z7NxTzxU-+ zVOM9SJq?Ki3rz9qGP``>-s^Km7TXX~p5FH;3#sCc_7JWK160Zc=D{XTg8rGyT|Kp3 zM|7V2)7v&X(uYKH%Ef?)Df&pX^5S6FB(sUL)D_lO;H8~Or0YKXT=2PekCz_mwID$P z8l9t=tTCz~?iS)v^YIWlI$-H)oTKCa!r2BpuG2aj_=qCVnfXKSq+( z1IfDV{wqSit;ze*v)$Q9cEIH>tZ$i*o|Rcl&_>~j8j$@ggP!|Phipb7hX#?>oa!*S znVY%kcB7|y)ps_`yIafE3UHz1l*zR^pkwD&|499vSMW z)s4-bd3!@ecwc+H*??D1l62Nacwzsi@UWoQ?kD@lE8^fV5?#HjIfMXKM|OR9r3Tfr zLZj9xI;HK@U+Lt2%~M8c{$3^9b!nH(&=$J-5|7#eBYWiclR3aShL%sxgk+Eb40}D) zPfhwk-^QFD6nD-xbtDk3brQT@)JAkyLjt2|@7`~U5V;QX9}#ztj(8PEq>G(m{5FV} zZKXTw>dN0bIJ|$SgTI7%l^@FkM(mU^lRcMoJ-It(;SnWq9mJ@1CPrA!ZPOFXGGxs3 zdQEMAAv@YB6c_&ntXAUj4ohzK*kL6jLP$?bP0xW@q%()>Oy(w))yAG)Z-aR_$##qK zSjYpOJo#qWFnY1Ngs? zL)zuGb1OvKd1>qAwjUfqu+L61ew~%b;Be?&{N$`_K?WEnMVWf}T)Q8>sTM(fVcbW( zwC>?T-@3%|zHKa)&>AN&`buPs&@Q7f%n01OM#pmo80Vdq%eE>TN_{V1|6%Mon9%GA zIh8?6O_p%Ux3FnOnj=^1x`du_o%e>CK+Tg;CNdPyVpId=Jt`KTVm9dkvqlf_QG!v`bYA98&rYGi`mIkk?V6HNnyTVMw>h7D#Fy?rg_xG_JqX z*?@nu@GD+b6~%`bq&TZOe^Z~GlkTc7NXnQJlWsEn$~A| zp{TxXqfdxI!?xlfVd>ArS1Gr)e+hKd9=V-Z3@&NynUKf#v3xekXBTZ9JOR|k9iXwN zU3Bv*!@)>YFMPFoAi#4Ly<5ZR?`%xX2jNzguMsA^!gZBWKMq~2bz+^q^hL+V%+Gj3 zYVUY^3q;MjKpAreGeIIPHjQNf^)n|u=ox&LMu-XpF8~E@@Ft~hCQ#)SiVdP-2+xEh z9j+_6sgTwb-hTSC`&e#92~+l=hSlr0HYz9m$~_f&n3UVVGm0g&{ZJDV=b2u@134pVIC0KKvroeASKlH2Vjo)X9|HhRV^kcD>s-!&>Z}_r`lx*gI`! zyFj*+97;@rdOr)sC*O~v@H%l$JbVjY@$=-TaEZj8GzI-cl`#i<@WWSwev==(ols6Pc}z z6jPap!eZg8sv;QOQ83g}F}Sa-k3wfk&&f##%sK^$w|KCCKt5c8-c4Z0w%C%Hl`yTiJOPDW<&f2YfxKpqCy4 z!v<>^uI&T{Z6F?(n#?sa`0?%L`#nb=_82oKQww_zC)uq@+1uHub^lA*0t#c^hh6lu zt>OT-<)R%vG&C+@hTZqIVuavPnpYm`Wfx}HziXp2uE7)HbP=d^)irzia!Yqoz*G#=lw=+!(*k2xwyX^FU< zOmo@L91*YXe-;>Z?wTGoF;~vgZd(3KpN)o_-iUE}>|E*Rqzxb|P52Fu4H*aD8Xk`h zlA1v66yJJc%Dz%L9=2)7E7)t9gH+=^#(BAWMb{T^dGl9nit!ftwC;I5x~gKAsJ0=X z*oZ$YSLZh}SGx=NJyvO)HIP}`i98ABu65{-iy{+Vz4JiQ*+v-6y9k!~NLf7`elTtE zo*Sv8ZN!!K?p&KLFuwJC*{pbd1H^ot!|lL|zQJ+Qi}C~v)kI;)$=h}6W4v!~zv6>@ zo=NAUiw2u37oN=qAVgZ7pSPbjqqV1j=wM_EK12zBn46 z0tgoCwRq0~@8erCftl!$=E2`f(CMktNc;EZ!wKjQd-P#Z@6;0jl zUOaO8Xj)U-?PZ0rxL*Z#1C-DNScmRUkwg_{@X`1<{pi_H+22?`7rd91saqf~s#k$= zEbJF^`dl?&RDj1ij^Ppe2D=^h<3-@%|6vn&R=zps=7|pRF~?0MA~qyjRKlW%0&3Z# z<`XSt?!Dd9m^UZ)dc?LsT@f|iEfBxsK0*r`Zp|;iM%RMtP|(@WYR?900BZ|yi#i8& zu#||zt+}$=OCFdUTfkF}VBT@e(M)IL5Hkq1=|J2sA&uuN{qb zL&=-)Dms=fXKWH-hS>dczga!bAS;bgpMs zL4V-ab{&FZ^0AY}CD>ONQGk@*rzC`1Sn|E>I zPbw!`O3W6WEaXp@F6DNTqi`oSya_tKd2xq%&y!Z`^CnU;5fLp!tRXllBu=Q+`XW%8 zl%?v%+G!c#rxg_=sDCkD( z4!NWUo2)P%f7d4aHP;z*Cf;1@>TxjEQ_K*Y_^Y>xSgYCZO>pR2>VvADO7|xm_qw{* z|90t1$k$IEz2oG?maIl*;F`ne^jq6bCc&QWMBHvnas>MCX-sb8Ts`N&%b-)t5WAC` zd84zgIpJDb7gg{o60bCO0(NTCw9oHXKSuclQ!u!Mfu3!#6dJ@ zv%}8@o~yqmFv!?x&m$4ej=k$@3n$REN*Du~3y*Qi%yx%eAWwxw9R1o;`@lUon3L#2 z=8Dx1S$&{ft+jV@uhOR|*jwRIErHgu{!R(*`bq6r&UmlX=Q63@ZcVg|WLzP?qK=1A zmh_Tnlh5C*6r^qV_{Qv)Q4zPH82ctJ=4GkG-S}yjw*rj=F}M>AWSy5kx$H1^O8Ji9 z03$DbHqw|o8spK>Wf}68pBT=u&^zqAFU>z@{%U;~HCb zZ^gnS&5h_c#7%uzBOr;%Yl?$!8v1%YxFytgGAbofMcy#kQ2M|4;;h~*Ro+;VJQzF{ zLgqfh*G3fXF27mZ^cji9M?A@J4l7`v8!L#6gHiZFQ)1tVI}TGJh+gPXzC?_Wr!XhF>C z-Zp2}=V6i`F?D|iF-JtawExY!u)dW)gIZz6Q&7G4!`pqFjKIj;nq)-ONlZ}hcxTA9 zbn{j2jv7yQ{NR5a^)oHRz*Qb|9OK;mu-jB;u~)r&e=wvZ@F4B_E_2@GR(d`ua@e3-e=k2gi>7Qe8v^sA|$Z%21irW|_Pzj8)j zqZfHQioVp`iiZ49%$)Bxm#{gen`UTW5(A&O>S&cHM*^jP>rP5rY6%;OU$_4aQi-6k z{f|KEl}FS!xNB`roTJM69q$Mc|K@DFF7w}vX7bT7m=94iJ&!t7ocm^*#*&)GJ8n$O zH2^Wx&3P5`t-oo+YD$~^v#sc*!P*i|j9w!l0(&>SnH7oC*!$P__g{hbZ>5m``~FNs z1XuvHZU8x%*BDeC|6TstO0SWKW)amv`jJgxARUZ98B8v)t-2DrR4?bb+HHx-CX^+_ zD%G>z{2P23IlzbGlI+xo7a$q)|q!QChb>G`Wd;|BjZH zm7hXd^sT)N)$h0`k@--R8zZu*NwMJY2XMFnXX`lX;;Rb5b|D{gu(A zVW_{dS~o~wYT;&#Y)~{T!q56>)JD8=0|T;+Ouh=^T#2s9cYk|@*8+AM8N*K^51Q%r zsF^U(%X9F(XUWDFM2K4P@7L4~V|L0j#*v_fsAHb`d$;@iWMa5g$C*yJ>`4PYGPL?H zGSFtFnx+4mT`PN$g0O!;mI&3_c@gA%)4o1}&1WKfA%}{8t5{?i6^AEg%Gk>8x`wO< z5f)=ub<*gdpF;9Y8bWP@T7~k4Hv9kNni4T|5oo2LW@99oe5>2%NHT;u;s&<7- zp0Zt<(kHB>CIf)?=QYHsYJKIB9&efKi*D)hw>I5vTBkz6QrSN`)#!A_#%lWhC&p2- zX)Jlm8hGGXN`ShunjTb*pcHE8YSPw|aAW5o^7{4;Tt@~(k7Aw^32*f4@MdP*vq-K9 zd2U!__a(XbWS6ud@YBzQZTkWai|;8`v-K`hlR8o9 z$PUUXL;={@&qBe&FhL!9x@shhf}1IZas8YOOjp4+7L-Pk_^HnV6aQvRlRejVG7uZH zT<8J7L?RTlH4-0IeQS&W`qTK;6D(1o>r)iiA@YG2iO|F*uxnaMw(Hf4g>|05xf}Wc zE0Cg+Va&~06~my+8K;^hy+|Af2-b@+uT5=CNdfFA}iMT33VM7t~$|rAM0WjH6F4=LG9@XwPIgms@{J<^|Se`cuMeP}XeF z8;216>W_#GZxDg?Qb;l^@!<#R{Csa(LeRXsQcu!7xa5z0_Ijs3suo~!A$mQSoh|aW z@R-yV5Ek?vg;n|Pzfw?JTqj|v+~T2Q{WYYvRQ~i($aMFLL;}<|S%5B&m0{n;>f-hwecQi9hCn;y>{+fi&380r8411r7N`nA{$05n=C+Um!7yM zv>rgEbDQOuN<2#KQhJw}ph=8BqBj@W6|?*iaS8h|>&f1;sikb)tD$^mt$8Tl1^q^3 z*w1h_)|;(kOzI2T%I13*J%**gDwx{?pJw}X_1N1d>JJ(H#4~xix6S0=KC4RKX~;qV zsw~^?YgOCMF73xFV0gFiG-p5MpLDO4nRw=9^^SC;e1=)EiPSH zU5T1!buD0#r`-6VVzQG#Eyo`EW%WL6g$d0as5=eaL;360k)0?D1pOywAnt$|My?Fq zX5L)P7T8Y895T4j!A<<}|4q!wt53W^$? zLgy+N8PKuJgtFPfKs=I^a(N-qsA;S;#-d6;lKsZVXd0?NQy(-5@=_4MBX<{g3777V zNrIS&D^_?Hwn`~xgzkwUGBVh?XnnyYxwaj(y}TedhH_*jV%m_U+%HX2e`j_o%S@~Vg40ua5G6C$yH($dg zHIZu`7_*V$qsSi%n=iyxbOn$cIz(UGUwG>`^vC(U|Jx~18qxK^k zo5fn~tq?vp>@c_%tpWeoF|{gTfBUWSPy>F4y&lXthlG<%JAH2}|s-3@!rKb~%pD zTZa`h*VI4VnIRGZUrp4MwA6MI$R{?b|3pHKR4}ml*4W_+ZxQcAjitAP_Nz8=p0j}~bqX_kTqt63-Pq+H^4m`ZJNd|+)dMh?%0%i+r_>`CizpUFJ?D@a2!F zrQ)1AmCQ!^oSEL&l+AnmTH3C+)fKAh2aw=FXB2xQ6SKI1qhM9*j=m<0J-fWn-d1Xl z;HPJE?{fQ_mO-jXid?SmOArcrWkHGRw0FgS;46TX0+WR)^sLkHgd=J#nXPN7+h?6A zQ-kDL&cXL?rNvxAnAE9A=~il{-JDd*guGD>4eF&XRj+v8Qxm|s3fVbR?FKnlKod|K zmq&R>l$fQ{b5}kerpF0HOC^Q)h$~qRjeYtVdJbc-uPviyT-<>R% z){%kjo68 zzC=a0L=gRG6={%o6!H2>HwV2$9XAg-A zAKQ4hsh^fAj+nM#gu8aPvXw$qg}bEsVqOd~+S<qaELh4} zc?L{%5a0<*l3=m7o6CjdBkO`wLC}Wi5f0l1+Bz|Ks4;QP_;nQGk}F6mXe$l+HLpd9 zsg2U4`347_rBRWk^5EpxF)pZOrZC4+Gby&Jm;w$vk^?4~1+9riLmJ=MW8P%ehv^&& zES`>%bw@VB8=pi^I#RCl^Ty&74Bq8|ZMj3x;NRiwy^*c!8q~|1Ym?c`;uBtlYkg*W z;7f~H0p+8s)Chv%0cOwa@veu3;d&1>p2sZNi2L@PdEs%mCDJ2$E&eSgdrdF|Z>^X{ z{=J&~7*{L!y|_y_w=Rb*$CG9~B3ri5Z}_Le49mK3y|QlFx(^8#Z`tXgdetpuV+vV4 zRN?4vFi#ZKnDq?!?iX39O74VtU+ykBIop(0ig({|cwetQaH@>D;5nK)e45~tD~pdLLs{1l+<5T zo>|0ttYI<;TL<3e<+WynYpFJ7sGd`L*y#sdPP|~D)bpTKXGxV-200KL@q6)lZ`0$h z_b>QptDIyUU3;W{%vI-*VOscw-eV)XUuf@K6*sCp^BN+u?}u1P@HCF3`EQAVM#ECl z9epQbCqXu$ihY5P2S+G7?rh1^(z_9>;_2LaA!K#Jtm{V}gO z>pNdR0f8!WCV<(4NUe)o^cXb-V!Y!6+w_x2%cJe{;BCde$tp0B@+6(Pfw0vrEoSQ;tLBBeqbMpEvt!}=Z{)0VwQD`eTqHxNpo=Y{byW&~wY}$sU>xx;W#dhT5ilt-olIU&M*^ zE^SibxC7d8Rl4Y~sk3Vs@3%J;eR+28yle>9p-B4GPD*5uX~>}?WW%b`CvFm%O_^%Z zjeQwl75Asvg>orf+QRzcmY&elo%`1#!aWbSYaGE=NNxZ-=AmhqLd<(kBxixnj}3oZ z*upKe2?_xU8oG;m=%X1eavM@&u(ZSkMeQg#3n>jWndt8%#mFFLl(jKLkvtr8G%*}} zsI#VV(=BVp1q24}ZHoYbf%-SlgaS56I>xiF&A8iQWM#_pC5hDp2-%>nF(_%^&~?38ak1psCdU- zM9AE%K;4_@IPrNtRh0E-Oo@OWj{9k+c{8U{b9tbrs@fx%H>NlLaC$Lw0fs;qk9g$L z3Y5zRupY5WM|ckzb4pu{@Hct82EFH8fS4yP5AW9j_E+ldzHm#z# zN+&rEHH|OQM(2#>2~}W{YG(Y|k161YDc3>BuV!qenqQvbT+-HUJtce5iBmp2b8ek0 z2zmW;(yfocmCp8NE<8Io!#ZC-?Q?Y~N!$6ovB>ZYZRFU#Px zf5tgHNTvQ-wO!L<`I=!{nQGhu22MKK^B^aLuY=EvNOd#XusCER6 z=3gLSJmnD=cB)bd2_%wy<_(KEjvQ5RnLT!doQnNf|9%U{qmBMXCsh}uJ**Ry)L_{l zLh^!G+zbZt#$iI`qEpI?EzdCq7|J&(=sds@&w9d92dQapN@G&#Ad*L2OU=If7JxH? zelpx$BgjsooRLg74I}wNFE}M!ypb}DG3Xqfd$&N?$69Yy{Tva0x|7OXK;gnd|A{v(yz~B5vwOZaIfiDx$S(5RCXOd)BoY46IC6uuodgB zM3=IMJ{KsVKc&trCBBCzVhd`!*ZbSrc($6!CK{z#Pw1#W0gBzNsgW~uf@4ka(Bx;1 z6pVJWt<(Vl7oFrjbeAP?uSk3es^hB!V<8fUjhl{zHV}PZ6}<7sjFk- zB*~1R16#leV|Nr9Sk3xS+@-X9Lh1s2vc?P*+}WWnx$cC*kEz}U4C$zT<8(>Vp5l%9 zk;gk9;lQ@F&6M@-`LtVluV6$OkrZx~Sf;WuU5=L@d3QqE-@HJ3w6SQwi7U*JBQ-j8 zB$KC>qpocV8gMFaeP1{{ri-@1yVGHgm98sn$tt6m_RQ1Ee>;S@|Y$Sz9A#onfhT2eJ0|0P3Ec5%BORcsLzX=O7 z{egy05|=A6f7B*}?P+d%^-LXuOQH7u)YZ3yV!FHp*Y_P3U*lcjQsRv|qC+*qr@Yno zjlE(^TQFSkY5i;XCBL;~Y;~0gV^Bu;7}T5rW}5p-q3L{+PRo$%oMI{LT}kNWT0x#5 zT$E1zuSsuNNyO@dkOgqJrr6{MNI8Kbb;TmlLocVIH#2Xq`_)Fwv_7DFEMQsXf>II1 zsPmw~aZKCkBY(<~|7>&et~`JVs#gAg2|t07R#7;*te#BF>tcJ(jRxq0GokjvjF zTzEK$^Q0hZYZ78lGJw0GqDW_}^?&(g(4h=0kxNhGs$c*7W|q%NWmk`?Tr{5am>fy> z=cNNKA-2Xw=EdmN8bB{Jt#KG~J7ksIyQ11=rJ_ z6ulA zQjquE z!P#Q+Q~m$)XINT7aM`u6d8EB4MH^3H1F-eYipdZN0L+wLJ_5vt?1_MF=GF&tVdhZt ztb>2$_9@-#VlUely<`vcviq@omJQP8Pds>kTB_&q%|J%A)9}s~TXFLwkDc8UQ|R+< z0TZ#RvUjVPjf% z?8jDPqGi!Ykwj%HXyR`8?5vYFGZVPWbEP5fKfM%#&fuAvvWQm)ltFHpkCcTDlh|=1 zKy#pRy!}};=&)6!Scm6gdNYD!1S1Wkr7gP4@yI9N(QD_iV5b6v)jpqpvRL1?ufOV= zR+z2-^Rqv<_5by{@bf)2ud9du-Q=~xBp*(5^#9L4GtRfK1vOR3Qsh)25r3>Z!tZas z{M-ul9xnqV7~!vGH47=feBY#XHDg8d`f&@y?`+f|-Gi_q#UG|(+(3Z=A)_*@YKKh~ zy{n7l*_`ETQE%1YEBxH|Ed&i8A=C}<-$VVa!(g+xvo~xgm$CG&L_)0m%u3oA#y^-*2EWJN$%YLCchQE zl;(aYqo&sKvJ&pdEN9&ZmdNIT9AIlI^M9o?Yi7c2Ef7Zf&B5YoC0B|lw{J(q$uS~Q`2@lgd`VGW_G5b8_N&4EbKEX zWWp?IQ+yg6ZNu-1*_u_#+nu&r<;%Cesl^L0r{+^-d4J&OZE-nOo`k$Zd8Ph9mU9!G z=uBeQfGqwkGX;yIRfPT)8|NG!TWx%v63vhGXv7g+>@g5xsr#c8=f>rlFVoY!=p1(5 z`6L7P$(jZ3K#RGf2u}|%3Atlutz_Mo!dzrgx?r`7E!^5kPDh=G2!pOSHMS20~M&|N~}Jl{<((gd5uOv1N(3^l~}^k9X6)nvMF-;*{}hM z$PbC})wiH%`1haCxuSm&%koQR&@0}Tuy~ol*^DyT8Bh1RbL+TM#a&xn#?BiwPHh9G z0kGq{^c{G5aeJtRF5h%v`Nx5xj1lNd!RIvtm_$FXo$oliL41!n7#N$@cUxN4pTO0; z6)k)>Sop4W1&jjwK6^M3!iL*w%h8q?(e=S_J|$&B>dLg{eVkdUHUWo zbN8IQptQg1fab~b$Tn|>L)s6ozHlUez@LlqK!1_n+eM~TQ9Ad=VC%tk*1 z!2-Y&WeD(cx%@JY#X`!%6*G9@M|{ICRpSEyWB-=K3JfXdYl7ph7$b#YIBjOi*9`7=g>A*D%WUXpiw zNY6n%1#e5jgp5HU;mwi+iuMV87XS#B& ziw>VX_~~@XnuhoU5fl6{-fSSm}CHESiiA^SSC zVJt%{*|#tVk$oAJWy&^T>>(qD8AFW07{hz1@7;Yr&vQS&zuv#z_xJfD(=4CmTF&dd zj^jMf<2Pz<>Ehe8i0M3@xyL=)k+HyhGA+jioY^8qa5VBwNs3IqSK(j_K~4W+re;%A z+F)#OQPNZP9)f$s_e!A4v~lpztibhST5=L>Urq(Ws4s9yVSRPdpM^O<-c*zql*Pw-4 zunX1lJ?pl;dV<%S7q2q_1_pOJ2n;AiQbA60!F7V}IX@{*Rbu&+X zY+fZuPa)^au-P}gwz$}g{dptO+=@WMI&I_Un!^#9JLfX>5&2>6VnPemlGAUr%Lk0* z>-R&m9u`_k!9(t3=vktsWu&DPP{=hm5(NZ6wC!wB1G}pq?j9xdfDzp;IFD>kNz2mF zwQHal=sMT1&dRF`J{yD$`S3b#9p$ktT>OIM#oCCaix^igny|5tP7GS0ags`rg}fh$ z%w_r&NmJHa{85YuSnkYvHt`8hC|{alk~VbQ_$_4XbSMO!^>qvxzX)0_cS9q(R^ z5XIx$WHa8YU5)zQgAn=)Ft=onJpOX@y-v$wiIde_T)*T)OjAW5g*_w*kjhA5N->U2*4Ns~^(f(2;p99*34`fP(+EJRhl$y4(e zJiY+A{nqPvJhk~7kcU2Y4a!{KN#nTbdv$DIzD@ihD@bUG{WNX7jflJqv{~K&d53c^ zw7P?KIepUJvI23@n)rP&kFPWKyby8XxY3WrElW4}%cwd6+A=K590lDyrKc=ha(J7F zj9!(?-1M5j1a-Koa@D+$XMx4tVTgJ$Ip^@Cdy` zcDsL-6~FOm*}rhjGr--2{Kzf4j2 z(}eiRM51hP@c$7U|21|&5{SW4P`|w#76%G<#+GjDJh26dyH)23z-o|bHXz~sGeo}} zg)XTNY67wsA7&26Pchd*8UWZL7yna0v*YgiTSv|8oB>6-OfMkz3P0MIk7Q|oB`OvZ z_r7DhCvz={=$3Qzv^=l|TZOJVw+daI5wUPPpPBmOwH_ZVikz~E4CsDPldk3u@zRxqNB`koW>mZ=1~UTVkP#+T*lv>7GvC_4X-|Q&?>X`OcI63*G+L zbt2Is*q5EUGt!?~OROpid;C=5bwF8AO^yXBKT#?LH|`mVjC`yCwe&d8UY^7I(5&`M zFbKtUSNt9T^f-2RUibxugWPc5U)8QsNpVYB_y4PS4p{5kDa|$i^Ou-0k+Y0grSa=|vo{-d+#>3Tr z^S9{DFmuq3-dzf69M?$1rkChcq;(*_+NN8E=$N|{Y;06C2YkFe_-eSM_4D*M&rhsC zyGhNm9trBCX>sF)*P50STV71bd+uS+PhA;0xB6zEGMAi)PV>6%6g-bRchBtEOc3JE zL>4Q@qDK^+H6b~kCJI>pgBB;2i=zV1)HE-L5$~SjIfVR$i)%WL5&W*lRYE91*ld66 zHMd%Re&ls}3v*KBG{+3l^=vKGM9Gbg`{sbxiM_?7l?ehLpwq49FIYTnMgJ8)HZh=7J)&67d=*kocL4Oi3$8cph)ZY{C^Y^4p$rle<7f%%k z6}kp`VweHuC9|!MN~YnVGuey)Yg(6Sg}g*%h>F5F;GBgYg&C|{1j&*1Wh42v%c-6T zm7F+`>=u{ya41HdVwqUU1M4BCEo)^jOYZz8(Nod$5!jV&4-7J^-++W;s_09(Lo>ih zdjVnd{`>~1^zaoWIuypv`C3BPFRI~H*wlqWi26k~H~#*#GN$fJ!o7FDWByy_YN3vU z8){(%L@A-knz`2%gU0+_a8O!<$koUh5oo=BNrui_@2 zujuM<<8iszwd>P>aYa2e`Vwlm{`l*Qb+NFVl=*^?(K+nPv=M-qTP|N8n-)Dfz;o8Q z8|QXKrStK`RpM!gTXWj`x%HOP+(p0T*FM2Lifs@rIbpB$W8v92ZIm}~Bjk?Ax`vq0 zdiTA72k9_N0gSWH3?~IDs?J*!ac3-5kg`hDr7~l%{qkcz=7Es2Jw42pni07PV?hfD z4dYT><;G5Xx@E~h)rbcYU!Vd>QSfmHqG3rHa-;Ce{?>A1lH65Og>)Z=H55# zpbq1OQoZp#pkkx{_sL1b|41Yl;pmzO+v#+k%duwL(P4?*dHd8aNX1?|!;(Fj^uf$D zggsWSYMI|A`ab~UdsYY1@CS!K-s#wm!%?p7Z506_8jBtncHvsF1d#T&QDG>_oeYwG z*PF$PmO8Pa9xUwMJQm!M=A3fdLnU zK};=;3}LqPtzuI9qA#gBUfR9w`y=*`B9d5oPT=tT1o^SGpzq57E5W9l-+807S=nPs z>5lkO)6XuCfcBLz6xh`geVUTvE0y!vj3Ed2&B`crpzZxb*WYeI_Hw@=d-n~}%w~;R z7{KgPddgVfl$A+Z zi$$232uz9v_^RaoV)+X)ny*}K{(AV%u+?aMLja=|6ku;zafa*!C3oIFeS|e+2ZnnK zvp3y)r6MYbUAqTO+J{*1Pr37}h}lbIfcwDQ`W|=QAFDd}eSI_b@@hL}HS=4lJG9=! zDczX!IO+5ir3u$e8|{l(ve0Z#egQmT%)BL+pFkty@Ydac)$jLl`i_~`S_-Zro(!sS zfvd#^U4DWAT4K75tLJduX%7S36$j}f&|P?id64kRusmZdCjtR|E;&AI6)azU@b!Vk zOG7pwddtDBc6ZY8nrJkWxo^w_fu9V*N&J18ZOc~y?h|ZsCPq^0rJYQg(GWt-X$8?` z*Zd4Q^h_B3Y!uyn>{mPexwGFtN&R^dro1PfhY|<aOSSTn&~{(U_7RmMmc3fFwe)oA;qTh3xx%g5*P`w;k; z-B9bkuP>sZl)@LyJtgDW!lY{T|9*SR7h&>yJ_mnx(R>bmB%kk9l42XAOa77Ua+37i zcF8ge9q{hI<7u^P3Fr^JAg%kad-zqfs0bN3c;f#f@3}t%Kt`H>*l$V${LBAK3gb7p z_~*^PO0I1oj{o;f&;tL51_AP1YV;p3dWXk0midni?D@k+U48yrW=38UAB&i^>0Jq) zUr8OrL7eBX3Jn$ElBj&sFP%?|&Wkhom_9<=d~Y zb+9i9N@<;6@0^z*ug~H(T;zAZj`<(o3XDKsV4=}*A;I1t*UPo_s$W^nWOMObwXY*A zYCl`&3#}6L(L~H*b0z+|=-re>+fW9%S0&&&jWFL)Y3+74&hplIE`!Qs};kU+Ohms^%&^tJw7R>dZPPI9aMO$+(LPDTO^fEL64OAoXpgFh#bm?Ww&K|*TaQ{tt|Ap zRxEtY7dS6clvk`w>KGN+cIood^AbhXN|wuKD=jT8Jm7FzAu%?xR6KwTU0zY<4&($6FkXpn`el~vOU z+^@!R_UiQ$_*}CH!gn^aAZu9xt%QT&+GFC+mt(~QBGJ7zA@yp*H`=uEMpvNllt3ur zM^|PQq04iwZbC~+f!fKnqH;gO?_>g3Zv3#)4aNsL!Oxx-`b}0mAWhJ25wr+5~y)m%HQ307f+x#(U zP8>7W&i}1gje9Wd-P_F}OUBB3_h*#JwkgKQ!29~ESpTvrPcL4b^}bL-WFzr~BTndy zQjcgJ9MrA%8%?tB_1GIph%xA_iy=m_2~^z(>Pn~_2;QjkqhQUOH$8~uYOCGVgpgz< za_4Z7wMw#p=RCdpLT+0PHh>hgP?YM)F@>^<@@y7Z;cgDc^g@bj^m;dB<_*jPt!;+r zorsyGK|*nwqDjzF0r7FbTgTDT2-*tROJObZoM}ZO8-eRx0`;i^J=9^W#3XsW@jC6r z?rE>|co5?b?;J5xooud#7Dw?`G96Nxn4c zXmt%Rk`j0b(?X*bF3gS1TXn&k<8#uJW#@}6>}Hg8t|xB{h>=p_ z3a@%NY?63$VZu%lT3QaL#s#arQFegi{kQk@>c2f@5#^c|wQIJq8oi??T-; z?#fA{<1}H%WI^?O%>p|r*63^U5h?O#Ubos|hr14LP+A}PSwsC{V(z2c9H%*lN}sN@ zUTH>MahJL^=I#WMU3i;>;)F@rU#nvj88-JmA(cPb3RU2hnK>& zLagl%!?I(#_RQ?>>WJ;3m0!=9G=8=eV?aT7U)H_f>pXW4+UsC|zy;GMU4Pum<*^w(yt`$iHXLW(dOy#^W!F^k`vHi)rC zRE^x7vLIl)**Ylr;WM=SsGp5HNDkC9-flz|Q1N>=+U=FEFo?>cAa7n-ecQc8g^{sE zcbZd?W*j1g*QN9Epi<;7OBMp9xy=+f&MT9cT?7sM+~=MR8oT>@#M@QBgQ<2)(|%A{ z^6m+wp{|?TZ~^Z6sZ3#EVSG_v;Q?mE$;Gx(9E8}8D#G_&|PSt(_G_Yl|;x@F*_nMaJ?+**ik z`msY{=X3DT*;mEU>%p$c+r&}W%{Ymm) z&U6)8!q$#nhGWtnCUT z_q1^f)gsMiB~YzeGe$W;6$DFdu36NMV9XgUwo67{N;jg>xKyd4)%EHbcJEtK4eN^m z*L!YlT!cB74pW^1jgAS#qDL$EL6wRpqd!@$fR$7pM7_dt;5>!uj?$$vN%2~Syy{NO zoiPt0tJ=yl%J8E@m76{w^)K)`V2E!Di-VT8(y3HiNFaIYPf(Q-Nd<^+?ft~;IC2v) zD|il9FwHwb{0>J%ppo%8w=6uh91k_OmMB!})Op?|d;)C@kh8wZPUn@O%GnRgO5>gc zXZZ}-v_9YULH8D#w_wudKozmvCr>V&M3!YUv1aTv5@Yb*jdc_ z?7QrtkAnvoJnu?BMcLN_2YaGsO+(KE1VLpH$Bdm|RS*$&suC!!&MtPH91c7?iW#QI zLCOT-A*NyJt&|{k-{LkX{#?3o+A}Au+^NLXNMLrVI7r5r{@`;7T$y>UBT2jQ=k?q4 z>a&YkbL4_k&owyRBT@9T+uY7(_GMY-YC|=WV^6%bO-riQe!J9XeJ{9JPsc(7N*PQm z9y8hqd^fm!>GSNgRPE>G9Ld_R4p#FDWXCUx2QPb*N?a>0NH)U)F1$H}`gtHD$3pUn z9)HlM32}l0uSTlq=gzQ?x;(Kg!C2DTGJHL;iLUIgS_tCr%h^mcU02}aRM=^d7Y(EE^dpZJvIy9y}h-*>kZF|Chavu^ine(h&TAHs?%LPt(Gmt9msA z`;_YV_B~O$L2|>JrN1OuP&hDHg0K>uGNnWbnrC2145JtX?izFN4rb+2IkB*m|0LD( zCU4BGv3Z{Wffs>~r(&%2$$n4-)1-H8nF|Brl2?STr$|FPEQJ8{Y_JBvKr$kou$2 zMrHib7XGhENnEp0<*88`c#d2gR~mh@Gqji-i9WF}7Fww%6Vt|Dn{l3GakgIXJu7kD zG1$L;{)Qo5uFZL{TJ3B!dMy=|Hm-bc-721?cvO11uG#U0aDJf9!nc=gD*|pF9X^Br zvs^91&@QBKzUu_P+lcedf`Mt|P}7&LZ3$uR=bx7yO8kQEwo`@&p~$9j{1HRaGw&@T z^0T&@Q8crK9nJPKL`Fqv=?bnkr8p-$%cKCOB@kJHtm+bf85W{5Y8-TmE&DM$t%NYK zAe4C`ndHx}lk^Ndq$yww9Da7+_F{;6Xe)z_8^RcmjVg3mazy4$%5?Ndn=PZ}>t4Dx z4sl;+Q6f3(Jt)isfTOL5NHII{|(hWsy6+@xzDE(k)p$L9NBUPtuJw(nO#5Ji5UjBQt+R zbhT+8IlMz$$8qDG9V7c{@-+)-LaAsi!#s03N!I(o_aLivjQQuab#I*cnrc74 ztnYe&8CE18SxH2RrDLd=%+>W0r<%^kta)ZumFpC*^uw@WPf#VOvZ6x$Qm@H_ln1~* zD6*H!M2iXVtCBSE3iZs7&`TQMMk>-CnxWKnT|3B{_*HuD3>yDK?gLG_7uo0HbruX- zf?nP*HTb*!6*1NlnWQlt6PJ`Ve{}T=VbU(;;h6(jpe+aaKAAIx5=Do$#m79?QJ)+D zb%h{y)`yP`fYuyMJ99?{2r#)MoD7;;8#XxS12O4ybtbqwnRlq{AXTxbei5tXQM6fi z%pwYmS6~wR!m1A!&92E+utISkpFO%7^!!SOmxI-ug|L%K+AY)TG0L&76mN8=Z@{+> zMZBC+e`9K!5B*)CU7)*F^~hq)jQ^2cg>>g7U>_hWLxb;x3^2a2bvGp#-7}cjj-gQR=-a7nOP?+Z+tRKDln6_)}Nr#}1>Dy$(G=2?4wE z;}(R;Co6&2EahPvB5G(KX|&ZO5t;bbdQQv!7T6B;=1-SU9*8r9b&rIX`gU&W;ZID5Bmi#J7*Z_$_U#K;s)4IVYDdO=R9B7Q|2(|`L>}r>h8^(K;p%VKl z)htIuOrd})Wdw8$Mp7=f^1M7Xz1W&C|uQ@ z0Cmq_VT+Tx(#%bw_oq$EsCBsnk$Iq`c4ESFYoKd8?&AaTq zNtMzmU9ovrE}TzoR3i0aznwF;?%AxZi|dfxPsAsusf|1}v2Hw3>5iFX3--xb9cgGd ztWJuzp4)RJ)L>-Mq-~OVlYuj*){j&+&=mO&EZ$L;i)$6AO0^;SM)O=7qxotqMC29> z)32%aQ4ka4mZw8Am!hGe)nFUN{4¿xEfM%YQ9kPPS8vNXWfd;qq_1wpuIbVH(d z;v??T)}}V*f}c+Ba7#9E2}w$6JTXxi+@++Nd2M&MLcZC@rNr>FTiZc5l}xK^l3cK6 zwox?LXfwly<}DF>@-_1@8wFm_{Qie;p@y#opj+eUKSz^oG8auSXvU(-avt1-n|XW% zu;6GAiVybqLYaB%I{v{7$WGc#p9Q4S?%jq6$$vWw6k1lqSG@C91p2%(Sfc)MI9SSB z?JYe5yW;~;kAECpHBNPh?w!i~G<$r@Q}}aadT&8F*M1EI@YsJJ;Bb#l|6cHa8$U&mXH|F=%ww}4#BIvrUH z@+7J()$YGdHFckTI9KN3wD5uA%dNkT4t84Y;_x*YUZ`V45cb2R5;iVXmq-3I`%<^P z4X!A~#=qmgK6hUP7y)j)s2BB@Jx<0Q3T?Mxd$O88Ikzl#`0H{Ha_C&ayJ442t!572 zF)JAf6Oel7Dgy&AOnhT?MHhlG*WiJFG7M?p^Z;|hBIf4j=Y3cH4ChV>W`!pG;x` z?k@Xxzkl!6%xXQVf$><&ZJtk4uv(vs8oLbj>c#D2zWs_j$ejJ%udgG)sPKjss_iZ3 zolHQ9dAkw$(T#Sdasp=(cXjcYa0^5vHd$z9T8SV2C7&V*Cx{s~KQqCt=clr4`>FvE zS$Z!VqxqXw{B^PpO3(|11Qe9@M6YB|v{)fzxa?4}T*c6FKC7$=6JqXJ+Q5DIw z68`IyN3XomOxV6HIwOs9f%u;mGX&ES5ygGo2Coy5WyE<=7j1Z-wTDc677!rQF2ox# zgnAoU=AEvaqyOx6P2OK;0%YL3eg7E@rhC9#Wa6N^tC0We$G=Utgt@tSa|FlfNN(#O z_Y>tYeOr$+0R6|w9&`|dti&T5Pm@n`v;Udm5|J79oymD=x>|r+06%%4`vg#*QJ1!Q zu&T^&DqLe>wt&Vl@M@ZGY_UAkZ}YK+d-Fq5L8`Noi`wa6GfR^^Ecf3QLxRt!=G^ct z?rpabLB&<`Yl!A&YYD5Ph<@(*LHk^M&^(5)w(_P;aBwA+u*SfcKnfH~i~Vr4l~?9_ z9*k^tJJ+2ecG@QMi+&`;eAIlVv-`qT>Is)Blw<{246 zPqxDL?XYa4Vv)tc4Pt5wUa<9r_~D;NdVl%&eG^Un?`eWX_IMQwwFbh0Ne`N5n^ybM zyX27=`wB|r`)1tc3a)KafLNTGACrgU-wcG+Z4?BIN9YXAe_K~*4N06NPK!)>qy?3i zCC>@@-dkHB4|qing^x*A7El+g(+V)8>Kt-_{$)lsv#pEn=Sx@Z3XeH^EGFg_@$QG% zs?B>TK6Ui+<-3_VgSKTc;W1|vjfnvo1n;OPn;}2?i<39!JgPso)v$V{c&kokj^X}Z z|D4)D3PS(VfkONQ1Gh?5V>ElzuepCu%ePiY{o=9hUEqx+8F!fov>bAZ-3T{P_YSpk z@O6vHwt;g#x=;iG7uhCR`<8}FMcYF$CdaA*9MoKn77I$cN`~wmVOlW1{romUpU+E* zYV71!QY+>PcatM4Nj-HL8{WR~ZG<8n;mbY8cZcqk3gl32%Vs;mX;y7wW6U|0s zH|@P-Vy-RM9anZ*$Og&eM54IfMnQKagyC`{R#ul<*4}!zMMRbjE*ldYUoL^I&+AK;~Vw|;K+*oQT!abR6bY797lE#x> zAZn|rZEB1ZwWV55IJVlqZo;#d@njxixclVD4#N^dQb~?uYMeh2LA$n7jOWz$!b7(6 z8;>~pS`RF}+hL$MhPpZ#R45+|`7pzNL6BTNc_1K+&Q^3>r59`IS}G)9T^$|QW~5T~ zO!`UHsOJUds`qToR856CHV?@cyF#S}pDl~&;)ZE(ToVBDxAzg{ObQh5r|TWm&4}G5 zBheY#R*Ub?SG1~~r`1(C3Br;l3gdg|l=t&NuF`)zD04f=k@7?7Rwci{KU=0>J~gW!bn0hyQ%LIH=QfZ?OzG!K z4;fq)E%rAsW*i%vW&NnoX+GOwzPme1E4B|;<1i|!W!T(RiJ0JTZtw%z)-Q7PdJW8F zes!NZz#G0F5#8jqV6Im*M23avNB?425)ThBRZG6j7e_EI{;ZLF5m#*G?2te_G%PkW zQdu%KF5#@y$*M7bO0PP!Ag-$vA?A1H=&7v3Z3pZcU)+5zuU_K&BePEk=^_FA)r^VW zplZrfGwf%cK27&Wgd1;k5`5NKLWfQ(3mXs#sl*;f{dLds1Wr z!rV~eGG)9rfHFJWf-ge5GcM)(n`ySi^!S=vaa;)$$*)Y6V&}Zgo$;;@UvVl}|0G#C zx+C^Ue^YS9o)oL+x+Aj_=A$wcBcunq#}FlFB*rDbUb$>RDnYhI zYZ^w4tgHp976De;BVipPhAf7AZ|;0#`Q$Ip93Q8PM2|`!!Zn}AXc%f{?3lHSIm)>E zQzlCb;1Rc^c(mR)1q2RTA*{gQy6yW;8_FDgSkNKiS+FfZ?_w6@#W2~*^7ZBPQ~F0k z?D`e&gSIE_?7-!BuAjP=yIjEQ&$oPvEhhjK)NtEAZ|r-Jo|{fzgYPH)xPZsf8-gCA zXc;$`01?MEVJFjUe~z-m7wHrqD}BaIsd)^YRCks9Rhe*lHrDE5Aeaw#wK7R1jJ1jB zxO%OaV`uczT*INjmavYHDr2jmhKSL;M{neJ$Sy9#TOR_L9KeeP<&zl|r*wE=tsp@A z{K;|G2w=W8-P;M93(*|(iUr2m+>j71RMgJ46{hRK9>#KYttzE}HRH=X;Um0oQ9+(f zBRK<&W{+gK|Cqg8#ruL!*uESGV@bxB#R8N@*ZCbK7IPEX?q)qjM{@}r??>)baKpUY z2&=bVC!U^w!{c+TtQqs+xTDGe7rNymOWIZ!&L%pA96ey8jdvH9#E|D(Csg~aY?`5# zD^)r|Dfi8UJipm@^i!vX`n-gl*3I;sg!2>nY=>xl7n;mhogjBr`fT#B?)bv5r9M#C zlY-$o82yQj83w*Rs=vGFG{rxHSm?^Q$}d)FSL$7!ovNj;TmbuYhaFjzBT^tGV^d<@?{wv97@qb%W`jg7A0xkfnxe`=Q7 zha57%g*JX<-Q2-3`owkgU}s)T!_d7I-9b|MaDT|$+_Qe(?^wu&J2J-}m&yuyQ=-vA zm=qE;ll`+g7a^7|o(up*Gh^~W;G#az_=_m#+drOz{hQJ^gbaN21)zZ4g+hB0LlZ$R7THCE0CBd5k2M0WmYZa7Fy)y4_vMpNfeu{4vcoH zoZFCum<4AZXlal*V^l(36;!wvuT@c7wb*MaM>817%U#lD!U zZ3Z)C?8P#JH+jJXwK#H)1NQzwe!BY@Z9ynERTQ)1LraM%loAo62i1i&z1`_DR=8D0 z^pSJuqg>YX$4@AM0E2zXr&Vc%`2OLDJD2hG71nQ^pV{iaIEpx)*7g~jcA2p8dM2-2 zUhgxK#3tlg+3+LjDSDgb>j`WuHwag%d>l}B&NBiXndKI; zJowWz>*gk{D{H*q^UnOQr4yA)BYJ=?9U)(Z?jG2Uhs3_HJ`lB@`m?Vxx?{eXjwp>p zD%C)uS~NoFp~3Q^4!3GUjH1i20*^=o^PR7Rh*KG#k7N-TY~_{ysO?5Qnp&B5PmqQi z1|ngn_;=PT$nn4euO!-H*vkjs#$*SL?teTL^kboLzYDD<_!3u-aQ>r=Q(?X6RP#DV zR@%Z<%0%O&CnY4g&ou1;$0TNWm^=K-f>{#$RMGiNQlRNr+cH=4{Hg_msd^2;nn>nd zvAjo}^~T%qKcy)CoNZ`{x(uFT4;0u*@VsdYaTu2TD<{0fGMz>+>~FWdi~1~_ulW?1 zPKh4Of=itI`6HaSSI#SDMk?L#aV;Th7`}J070^r39*RJJ33z27?Bwk{zFtwU`h=NO zb?$9Mvq-)G?771z1<(J^s>~wtr=1hi&apy@)5cAGrM;hj=u1V|=t~qeU;fEN21$W2 z_Jfkij$S)xn1^kX#sbnq6=5zQR_s3>`csxIhm1m-P5CXb(cZIY0)W#ve@vqjHyhD# zI-tLJCe1qZ)0A%-|E`p=)4#Z$5@sO}@Z!eLHGG}d+*^xbGDn_n2zK1UZOxW*7sQ>Q zsi|bHV!8ClE;P@TfkrrVQIk_#co2QEEa>2k6op>??sFhkDfDh=*}Ji95T9~)lD%9M zDs!=mH8y+vSTA<-iwAwYD{=zs=BEGFd_X*=6}aHVGi&+J(1|%V{oegv5=k7I&3%kP zZsy%{OH)`f1yy9(uWk6u+CVjC>_FBu2;Y!+j=)Vd&xZu2J{l~E6n$B|y}2h!;6~^_ z7O)8Pf?Qo1Gk0NlVOi4y!|%E zEwM~F!z`sFS0M0wSRI}&BLb^OI_3IPPaodl$zi{@&dAEibr7j4HlRjZe!p`*P@nI)H_$_BgSc1V(f$l=2UPu4i-26r z(l5YCfbWPt$q|84ai=>V9I|PU-5id&>C6dZ%*ERuk@?s;{`i@s%*X7qEzd6nWbh^3}CiMaQ1Ac9I~fX3psQ`j4$BI`KCf9}h_3ab{)FaR?uorljwF(#Es1Om`(nM>*IMGSb~|W!~Jo0x?CP95!C!SirVq z&CKU+dVS8R1%^mVi#M1|w*w2Y!uf*E0`Y4DWiiXL`?G0ve#kF%M+9Eg58#5fxf}y)eQ@CDy`%P=YO?i-7U5k1W;A-1sE6oqO^w3OC)Fu#=_6K? z+Q%v1loYI`M$*1iu7}FLH-&bLZL1hCRa|YkznJ5w$9ZP>nbc^~n;Gd8gm)&n?CVVZ znPF8(uQR?F0&hHP4?4-ADUX0AI0}LNXN%jU@-sasWs7NgU>VNjO?C(gj%B94BWsYi zibF+XKFhzeMw}}HD@5~|Hv6_G%#!hoBgJ#Y%|QvN@Isn`d@N6#D?JfHy0I0NHSisO z-9H9?qx6USne-2K@%_|Q=y2lEIN*t?68vu^u2-p$Y~L=diyd#7=E5cMXYA~WfyC4p z$)Nhpuz-$hLFUhFF1tnD#nn5S(zJ#1i(U-%Z4Y*rR1&;- z>r#p}Gn`U+Q=H;$WyC*dUR2C1dJ%N;?6&qeMGeEi1J=sIvPZPiof0y>Dz!vOnUu&S zLT{`XV=__bnKin7F2IM|4%{agqETB{4ltn~e`YBKf+!y&LwLf7>)o7frp$ACqL;R$Rb`%Ae?# zja%lC+Vl5TO)psimaOQR6D%iCnDwiSgxs?zAy@${cY;+`D8;O{iI}_N!(#iHOqS*@ zg)MSvjf_*{$J^-k)KPW;#P`8<_E4^oQWF@FL*E3rftIuTE_U8!Hxju3VkR1$cqM9i zbs)hR>^TeX-8^M27Yx%P7@drKtN`iKxuhxdHiwEARhj1pfP{s(aEmiZAS$PsZU*$u zUH2&!`E86iS3UlR`5GExxnP)ejT`nCgm#YzW`gs`neCk0rapZWc7lTY6S59C12xFQ zPNY5DLgBA8!SW0=(>lXv*cSluq&|4GeAe!rDHJ6Zvw~{HPkekD088+xTMIHkNumNO zPxr~x>db>eamST_kVWiYtJM~^{os01N-V$(zqHG4B?o*6xN#|IbK1aEeogR(PO3FD zRQJyi+RC!{klk^>`yidkfsHc3{hZskZ7C~_eeG688t>ZR?|L2QIsf>M;QQO+<%-uA z<4+)iCYSWD9P1Lh2!-l1qrc)9KSjx?c=x+w{{p@1MWXi}IwmGr zJ6n3y3WNDkOF&vQlm$OP9 zg>haBN4BxwyrcXZ(EaPSf8AS61@fA9sO`Tn5Z>ABwC^7uHwTU}M!&x@vEuFPsu`ZR zQBoOG8n`SVl{P0KIUI-EdLG;g?fF=h zP-ucO?sXoXPK=R*J7&RYe2!=VJ|>IWzcgK^bf);VmUP8ip}l&y3$@Z}Du2Lt!mXzb zkHT}3x^nir2S^80=xe?KG420@x=sfd+V0CfYug>!T%p4spdJ-Y2vPh9avyl za}gr2#>V%) zeS0T(2}JMTSp$1?b$h`Ry5A30&d6q|l8KbXQr04)rRWTqJ$>u4GObHDWIxFyjhZ%m z�eVOSx|zt0&vGTm81|0(ZcP%s+f7c=4N)t)U=?us*{1YFTCb<2t8xl_9(qN-{^v zbC}6JE~7_8U_0IGK~;}>D`pT5gYbUS>ntD@qdffN=)du=@1xNY!R>E_^Y^E-#IYoR ztgJRuAchXtlFIDUw^3nj1J0%UCQ0Kq&%gaT35al|FU|Ox12#6(U#B}1)-HzO&N&#fUqog4jCF6%c7KxB z*R?Je9Lb}_Khe4%OGdbA7EHB$y!UTZIS|E1gi z08Rf}yg)f^X{P7DuwAr@h<{A1D#w2WH55i>tksD5SpFT60Bx#X-O2z6w^AW?@_YOp zw*c=&ciVQ=fU=jr4tJ*7`jB(8R%=K+bBc=F3j#IjY{4h)HTv})6@F{@TfTyGhrCgc zuuL8{*I&o#0Rp9p>pe6;0V0g#%}0K zVDZ@^4CTUm9mp2XrD)oGGeJbfh;znDR zR#b@5+OmDx7Qh9lkA*(nph;8H;0v*wY+Ppn;`L{WIpxm z#^M)hGl(O-+>!4HT<5a4l_(pnZto(6Ahm?e?8eD5296usUe-!&4)6GjAZtzIHiwLg z2ahI<(qcm1#=OTt{Fj0jdeZ3RWktsZPwK>~vH#o)Ps7wCk7{OaCnsaxH%>n}cwI?O zAZvNNa<1WUCpI@#QRp{I7D&!9W|VIl`lfbbYh#yC5Ag1mVwg8rr71tPfc2l1i?qqI ziH_oOZVuTF$t>?h+c$tN>+7UNwvhYu4s33IE*{IHHrG!0n?A1TtV#9gnkW7pMpEuQ z$4*<&Y7=@FyVh{uO3iE|eXd>EJZS3Sf#!)XWz*FJo8|Z$I3IR*ZuGPx!QluDez*ff z?_&M$@SCs!;tcvt1G9L;y+@agv)GTSgt@8WWkB;B&k473pVFr}xcB$^a%zEGCdpbs z0yM-c(0$y~?TlpdY^U;yA05Z^up$?85<~EoMH64x?8q7r(`2$G#@_0upZmt;_bFJR z*>f*uo#nS*Kw@&R1m{hf!THP81NQnB%b!dB3LU2h2Av*L`$P%1(; zB`QWXm@Ws2cl{DFYyu`Tl7{@Vdp6W8Y3{C|lB)&HYMilwVbQMHjlP?=Czw9%w)07E2{f5{h_j0<@@zn8}x6teg*!x<;CxO%?e`d zfS`sdj9E(2>=H)?&pt`uitA+?-k97Rfw^KI_~oJhrqV3k-8Mu^SPPJTj#ZbStZs&d zKUlNRg^^gxt`1uoKvUM&b{5^`_v z=nkbQiy}=QyAl}$ujAi}9(^pgy@Ijw^=B|@pmxM*bJj;4e~gi2e;Z<60B4i9=%b@0 zR2tH#rx0JD*2!u#fBHY<*=U`(pHYSwZ|LhMx#E)kB1Z$YB%PSDP(Yj&j#^t@)=oJB zD@;jOIC{o2SD-;XBc>zfGroEif06jyag#YI{ze?#;%Mylqhwh&Auf-c6g$AYFHji% z%@x{vVc1bgj)~oNGnS$vG0YFkE`ouqtP7c>*R|N&pGXIP#ecc|QyX6VLGU|YOs%VQ$Dr>;>tVN*N=vyEH6faRZjEdaAY9hc^ivQ1 zCR}EEaz`f37_M#3zcY*gO&~lc{9r8nD50M3WBFSRYny$BvjV2{Rc}n)t4H0m*CZ6 z`h@mFmzvQf=bRbcGqtE$-P~09b}nb? zrnkrK?;*pbf^>Ec^O@TWcZ4;m|9LcUrxdR zT7gr9d`QD^q*Ni8D`)D9pSgMMH}x>2*-J*Tjev5P*V0|QQ1i56@OosoI&;!kgu_8iVPRRe$-g|~MwYBZqwq-{|K#;27LPY_Q5~L~wM5RSQkS<6s5$W9u zp)V9s`U0dkK_Jw`28aj|NeB=~ga`pbKuQ9kWsjh1J?nWN-|zU2W54^?-uc51lF7`N zlR3t?uj{<-snk0Wom7!$b9I!+-j(O2VHM*6TGiYzGJbhIkDl)8KRi)GEq6d5zvIGN z1Iya4C6bFQrScqUh@p>xLfuQQLmH6WLAvDMf>{X=%+Lt7k)vYAr1Q$(^~lK3tIYXIyS&eI~4AfMRh*5ruFghQ|)?9qDtHRS8U4N1Y zVd%p4@Ft?F1zV)3Lt#ZR*E1?8{LUFDNA3Aph*85Qr|xS6uT+Bw-lKIK(f zlc!g|KetAdSV)YsQ5sk?5Sm&%poAMf>zgQ+1GT3q8tmP_cwuy3Jr`SZc;N@?b{ zoeqW5&bfV*-)-DqmA~f&-++@EGC<5-7S47@qiNL@SGBQIRq+%cq|tzo#+Cztkk;3Q zC~}T@mYAZEthJ;3z8i~@&PasnU#f6Z85-sD#-{-n!%@&{#6n_&Z1fXwn$gTu!bb;; zJ8RWIa{J&LwViE{7a5Sk$B?(x%$f9CSsUp#`gTGC5lv_^9Kopnpt#bVm@?ev_ga=O zu?TDu=Ccg2kmC0OIFduu&VJA^7Mby1W>`i*N1E zAHx$wN_CPILGkstTOwn@q!dv?8{jGhOi!c|{{TeE0Ec|xAMgoxk5cmhU8^^2wb(0+o*g&_ye`P zOLAIULSa(+lY! z<|ap~5NrXpxwVxMaa_@$L^uPA?N-c{IM|LDj3@&=OTZqwcJ0}L*Ud+B>%8=!@#03J zCnH`D66XCEL8;_1j<_xGd&+A>{Tv&j3M5`vO^N#Q2C-u9ZMk;IeRp9nac8g7@sc`N zlqqz`mtK%0++&VT#WfEkV+?WWmoEd&=Bx+tkCHk!A#T(`lhHP#cb%pP`TKw=z% zRR-5mRvoB`@Cd62!FQ_)t$S7r&~ORTsQ_ z0<0~D+Zcu-`Va76XG)58j&Wy7!NUl;DVK}_u-3bcw0B!{?S2n5#cOQ&EeObOHpWw` ztx0iR=jG<4tOEw;s4J5xD8`^<1#0&5qg(HD#}gy27jeU=?gP>H7-@9u-Pa76`ZT7swgA3z26JKk{iFsbU`sCU^Z$J*q=DMw* z*9p1(KC}JKu1;JERsocil!DcAa)Z&d*-=FheahKGgPLfQ4O20`@p!BonyMK~BCU)+s$j2n1C@v?B%)ACu<|Edg0)`0OynqIT~cQ~ukhKxzOXQGyHG^0d-J z+^sQuW1tvRuokXmhZNTr^h7C2PO@t|O9RjQ_~Tw^{t-4a5Up)st4*=eAI)$_VuS=!pcHPJifA7*4K*o|rJ2eY3x zR@nkikYHDuE56JW-M2e;#V6Ay4qs{G6UZYiHYInBrNx|;)Bd4$v5R9u5S7WDJ4mAlf^&8b|eX2sh9OFa9rjTrh}vZq`4pn1dw8ytSh)B1#ba zmBj&P(hgJH2YL{V$ENB>nE{m}cmH*kY+he%G~*Ne$R##?1=498gq|upE>4DKh;Yp= zx4YGh@YI6B7qbCqZIb`e+JHyVJQZ?5HYl8n7eBdK(Vs*#MzA(U=R0L&*b360BBUTnCf(}p*sm(hjV92Rkw`&J zrLa{3$z}+2a35&^j`w&7(OhOzocc%Q@+Ypvmr%5eQ)GYDB1mL8RaaLxRjeIY#;)mY zNdak}V8H)=ZBka$w_Nm(rz_78SNf0=TNQF$(*kg&W;o+5K+gK*yG_OQfzY(Xb_i#s zY&Adpt6Ki)#3F&>#u7wuC@ISxgCdXRX|)pXqz_h5v%(&wNSd8w9A97@j#BQmSQ7bi z1xF2BcraGCMC$@kJ>}P>4)~38@Go)i{Eq<{mT5n^Wy^`A%g~D^VB{Nb1FJzu@N|Dw z@ReA0vGc}no=^_M7&V)Rsx^?;IGbp-zDsc?9g)$zksAGTh2sr;5SNhE|hryY4n-Rk+;!#d-99Ft}C}*x~@Qh zAJWWIt_)Xw3req}U4drq_@y8rUZNEE-Hv-FJA}e6v?8c9N1$V%O;3$v{LNGD2uuK2 zv$)*emgF~OMQ64Fq}`#yQR(el{(mxkf5B|eVv0tA7SvjfBnqgDnu02C`F=q~>R|Er z)V7W@E`4u&Z7~xjv4Xa*l8WZs18Q7rTh8uPOjUm~uARgAt8aVt5}-C$u*QZsj0KO$ zn7?(G}U3Q z86W8>4_%rik%mT>x>aVa<{7A{n3fO*Z&Vt8P;)=hY|dn>9Px`;qW*4Pf2^xoNlj*7 zeD?5X3%Dh`BxTDr3O($=LVCkiDs_6G1D6Ys6bV0K5Z=j zmCEM%kM{2FrCAm^l)a@fg;YQ&{k#avA*K1==*L}%FCYDrU9IW_T1tXC z{9n<_Kn-oa%k$BZ=tj3+>OBcXdvhAKE8iCJX_G^>dp}fjDSS~mq!8JWlT^edCpCl$ zsPw`S&~ft}`2{o|QA8(L#9*XwC)F!73!(Lol*SPxtQ6 z%SVdSuzax6b(cERh;dhL^{#UbHUM2hTBjl!?f;uITV0yvgn7G|6mKg{>GcfD7g^HA zpXyKga~=7v9NtqJGpUWeg)PcYco{~0D$j-FzQOBNNNj^Vd#=3mS2sU!V)a)!v_3IZ zzo8*G-e(Et*mh?7uiwaPS6H9?@4(u$>fg?f{}5S!0Hr_JtRGtW|DYG(|C)ot_vGyH zb_$01!rs+oz*lIy|FRvhf2OnTB!FuM1o!{UI9q}lDTe!m0gnu7c~lbs()<4T=dAyR zRO?PgX8oJV=dZW@8T*&P1$#JN(mI7!wr|Tzb(Gmqz&dJFcU=!)WYpND=~u>Xr~q5< z^^h-)iPIs5-s@qc(rld2fLSwTW%Z-fc5t{)QLhZcUoA6P^oCJq2vG#69O<8yP$tcD z_cWwzY(E(Cq1UDgGbbHVsE_#8vs*Fn@8hc|Chp}S@L`XmiOp6#7_^1PJz>f&I8Un@ zhObr1wiF!X!C3v6F6>7}+P|!G=avvFwJaMRv*h4V){zLJGuphuOv7ur@Xo;YVzgNW z=awcOZA0!$PY`YoTtr}W)$VSBKR?#D6cqdyDYiNYn>=P`%K}ZZl;}6492Jk$b-JaE?vX1;bjKRrSWHp8S zOs4x(+B%XxE8ScRM4@C9&8=}@rYoxRl<~xXP54?qM0AEU`d!v>RgwPuo%)Ma2H>p- zwB!S{Uaw0%-!hkYTcfl4xFeShU{-Ut*XzgMJlPq6(dyTv zskCnU)u1{s&>KNa^gDoPX#*inXmy8(G%{Ui;WY@VBRCE_NKw#Dl%a<~+!d`SIvv%o z%|u0ww^CoM_mC4>DrIS*muy(3y!y3Wz}Oq)8Ds>?>0ki#WY{rv3}NKw+Teho?x_AA z>cGuGCJwVr*nNL7X6@xkBFir1z~VBuVStuj;r5o$ie*}`@Z}$w1gYz~Smkj4T$c{1 zu|>;|Ob$@>>^xn6NPnxe6psNJ@x>L$80M8S^B0RQn{T?8`q!3y=T`p8pnhIg{fh8ILON_3qGRtQh0`(n0&Lv4Mcq>ZEcccZI=h#u&i>lIzo-B;|d1s&qOGa8Z- zUp1{%CD~TcU8lNImp$V%wsPUhikYD)<@$m~L|K}QvhYbLcA@p6S~x?Mi+6k% zSFIz1tM&!=Rc#ToP7S;8%FvC0`O=gVx!dkKUI7t8UBVH&baH|Neq$4`{iozVnk32Z zxF++sueVl@KdJHT-ZbM;M>UID6wRwNI;VvTXNh?RZe1;&9qDNdEAIf2owjOLL(}iy zM&1gGs$wR6SBCIK+4?bVrmn~A_cD`(Eld?^c5=q+2oGgB8ODR?`)|V85UISquL7D@ zoo*aw%pA5q)3aNhOKa-jvGL@>Q#(b~Fz=oh&|6`G#cvz4=o{}Z6uRVB%AQp?srj_G z1s`+KD5q|~D{%w$Gpa1$m~Wi-YOw0DMtbYyZQ4|O*vZFkD&8iOe>yn?cVz+)e1;>3 z#eV^0uoMdCE`^+xJAYU>D%DA=+WC+|z;bXM7)n;{+QlPFEwfhbKQhX=VG*Wlt&}6t zd*vP?HsP73{K^8lb`J_MxTk8%%YEmsO~=0JjS~T3Wlk-2LP;f)G|v!vQ1PY>s@{BJ z!GQPhyWLpjU3UA->& zYUbTM(1IU-5LzP$M{%m7MjkTG7xwP9MMt$sjH~A0Y zx$``}_AlJ(Obt2j6+vE_Qd?;dWxuJ1xyBgPDsBMxDyUsrRaM9xR2uL=Sp}SF9D~V; zloqsqraaONSx@t9!=GEs)M(iV{2*vQ$V?DF>Q_oU<>Fg=-%eD&tSnUs<~~10W4vAR zXc;xI#YltZ1#2DyA2?X$!Nbe1E^5ea#);8I?LeZQFpM=ix387gR#^-BhzU|%1`r5~ z`2m@`afo}t-c{deC4|NdYRaJ zhm9VAt}s_^1@%1F)`-tyAoAzv3Gj*3n<`wjE!MNo!PE2z>{9Iuveco|Ep#b| zg^+;`gvFyvO|*k1QC<|n*Gt&s8HS*Qurcp7GTIWZ9$FtdBkHpfRP3;N-4jnJAJ!2u zqT_TPt7{eo)tmGbs*?1-7mJ?$ncs$!)wy_572?KJ-7)YzwOt%?=#*xv4E66mF{fp-QcAed#v3#}N(m%9^=wsaoaAMi?u{w-JOj zl$ms?(0j8Bg%Jei+o5B*bspfENegsXG}$7&654x;t9Bt3+@wu^Ge8dF#lrUlZXRd* zWv%pr#Y^rx_`BQ6p3jZEt1P5ZN?03oY4iY(UFO7f0O)+mbo-XiJ;0-WVn`7gu%5QT zbmdue$mia2X~|uXK$`}({Pyj6uhckOufZFDn($_rbf`tAx_r-S-wo%bDvt}id-FzK z8SzTeYpo2C3-Kf8pqTlF@yl0jpZ6+~$*B6&cC0#?;nQ~C=A3}yO?KuG>0}H>6ok)B zBhq#t52<{_$|d@BQJ;Rpw>BA!D5vyl-NN*7t`3AGNP$s9In#7I)JK;ZO#*4}vLGYB zb*yC|Zhy+}@Sa>r?quxjnogs=r+a{CL0%p4l?@y-mLkWxAu9qSoyiJY11`YL`3DsK zn)7F!S;ZmlBq6Ymkk6@EYn;_p$gPRa1}4mqm)V6D+9sZugw4sk|A?dau;$I^!hrKgP1DUxTCkx zP|iT^`4GMRqW`l?SlSM3f`r%eFU3#zj0VMmHZN-JJec?Z!Ebl_)j(`nC+drLZq|DB+g;p5?8 zw~J;gykzg&%b54+*%Nt0IfCIgs@NRXI~Tk{$o5AWuGI{-CC?5!hnq{SK7dT^iChjt z{y}Fhbf>RpQRCC*(w^D|Oo7Q9G||3f@R2Mg;f(Im`2v_}hGtX#~l}Qls z1kF}=L2@NgX`*^NOzsZr|9^5az=D5(%WHD=o(4yb{BvVNJi9B4g7Y#NG}q(Ssb8JSmN;Br+QFG@B@xNKB5yN95C% z>&})bp`>|-q2cT(0V2GN6^8b`z&i$dLws%haD;3aDx%Ya!;Ae0PaCv3Z`jGHZ7BFe z`CbVz(mL#9@`0|aH_qg$gXpof5~CH{DF~dd5ZS8~agJWqx{&&?HrPgWMzyh(dJuzD zn;92!6Qr+QsO!?*{ctd4zy>OAL|5H&KV?lJ7<77igo;v7zIM?su4;Cn$Na`2bj0?t z^PeymRW%sR4J2ne6IZQrM~%B6+g97SrB4lm=RW6apg_tZ{0vjeL9BdxVtAPC?9~KT zeVqGty$4{~1lrYXvAZ~39%#K2PAP>Ir8(;q4muuTH(lAn_dqmTx^Cm^xIffQN9bOO zV|GfUKrBNo7}6K zZ}Z2sgI`nypW5@pbVj|NDkHtm<4V74IvdMjaTwy&aON-!fB(L93+;CJH1ND1$x9Cq z(fTGw9sD|=+UM@7CZ%$FW9215?{hdKQmu3GLm_A{!pf0+F^HLsJ2^9%g0}FAQ@tMS zk{YP)n~^z7&LZzyn2sLm02`VWCa-Mrlxq0syR1H^+fiCzw#gPMPp=8USY?( zdk;81lbXJ_eT=x}%oeWNvG>v3t*LUctX}gU;0b%peKS0?@kmk$MmKCdQNyOV9Rz4^ z^BcGe2I+P**MoMGkdFK2`&GlZPW^fN)_6NCB*1X;?dm)r3^O%$^U#SbfbCcjaal>v4dZ-Q&6 z6~Cq;?2?+aS<$|@7%AwrI}es;W)tLiBJIi}pdo--RV8xUSq?F`Se-kke}=B<++%O= z!L+Elc%&FkWDXqE&llkg-^rdTUtf8CI8!93p>Z=A?ZXY#`8|)+wbg(gftg=a-_Dz& zf_6Ch=TvjZCJiF_P!b`TVX>l9*!H2My;BXEn8A%2dq&w^r@O{88jzoIqnaBKRE# zgeOsTUU$9hB&Zd=t)Xd~_=F7eOYy}NfjJzQ=MJ@`7{dsj`9gzDh5fxleR#p*!CcRS zu=ZcEF_v6@2+u@l?TBv^2+VrpYY#0K_d28(BJRZu4q2-0T-wpUo6Aq&7f^|p0Tf4e zvJg~05f3egH>5wr+hHi#gA*nA=^GV;;R4nTnX`;v{mC3QE8Vitt#TrW91n@~s}qGK z@~tvVA60V{bi4H26>}4is?{@>5xqR90!WKJm0_L9t1lrOjSgoI{qjrz8T8CMgv$?J zh1`>@c-?8_T4&^mu|*m@SBCv_GxW{+WGTC#kKIe3vCdgZH!4YEP)tT;RE|3|FX&-2-&u|V=C zK&3R5nx{%iV=jq?)v%!(o%M7yGyP@v7jf9gaNiv6ePyIbD{O7wKiAlaT*o?3G`_uo!6=UyH+E_d$@I?e+ZX%nG4cGru6;(+ z-`X=IQo3|OA1!Pazohkg_2PZAS86|8veh2eCQM>>UKro*H+Ndy-3xl?s8`sJo`Ji+W5ySWQA7fLsELssHt4XC7i&{OjsRFK}TU;Hm2W%|Y_6Jkw@p zvNmJsZ%6ZA1Q_`DJ!w_L4_^;$)m!au!Sl}kO`*603T6GHrx_o9DCC^=Ocfy}z%~Fr zC#=E&269d;WTF-y!}*LQ-89Ag*+-*{!b7~H<`efyt5Ws; zQN_i#_A}y0E6dQ8-y*Uo`?67W^V}MLgzJ z&>(}|$H-UeSx>XO#=2&M71IJA-WYhy3w~iXW4b;jOCmvn3%9dJHFqi~gRjn;4$Ai0 z@AUx{LIG5WZ2;xiC{?4eMTymbu|8&1bKUHTT(`4yWhM+(EU++N!zqg{{5m zKG(s>WX7M+=$EEQr?>PyK=Q%Um<+$iWUT)yN6KjQJX~7axU~Z?J`t*arK!asXRb-s zsYg@?N~cyJRzkg34X10&VyFq@y(Rr@i@04P@#%);L`7bvBPtyKdJSy8JR26w#=D^ zQB1a~t<}P9UThf~NYUvGCIoKiHg@wyVNNWd1M4&VS_?3!YSkJbLI0xCn!a#kg+=f8Omlu-so4Mql;vG z)g*r{=UxwG(TMd=%+5)yq+IaLTm^qxpwd!BYK=(!@%L2MPJ`b0hir+sFg$8vTOjF&aM2nTPGiL4=#O zev{Yk)Nuu3=s5l{h;Vbm)~TDXi3Me~eSYIfFcP@$J#-y4%q&t`se^~+E%O;_SP#vu z&jyUUr;WR&K7C}uc3FCe^e?VWjdlYJ<=9XvI)q1>5D2N;SZ3_<6uY%F+W-^_Z@VVj zh>4uhTMMD18wWgO0(>>h8sHCBO@G5qCI$Me$74*ZrUF<15Lyw^ZoO8_4hgUr7hZ}- zlS=H8cho2+w@|}Rlw~TLh05;e&wBpi^S8*7q0NvzQ4!EMfKy66xY9vsCQ0|OriI^z z54dXb*CG$%L!=8t(ZFPCg};sgPW+;|Kn-J5g^g_vt^!}Hkt@ND^M)gmaSRZZKvuxb zU7pq0_PIt$LpocAyR6dGr`ezMaAN|_NQs< zb4&CZrRj8be=t?4miLg`iC0bQqjJPSJ?oI2VkLfTsRQKr+c?|txV_phHghPt2(hki zp1}c=4cDgxymNviG)h!`Ngr>{LKL8u(CS9vkH5pP`T*CE}E*rBF_E!2Bdw=Jh zoo)D?aay^4xFV+ZK@R9K6;C5eE_vRvw6#6146ok{G4>iM0el~%a<7f)$tya?Bu0v3 zt`c{*5|d&rA~?AGCITHk4f_8#@DSCx`}bbd+8y3&ye4G%(tpd{D@y3o@`3X(1S=h$_E*@!QG1zUykWyAvxL#?wF(k1c0n|2ePPW&Wcd zPPHha$j>G0mdla%yc3T=jpDSUz!j=XaeXQj_Nb39#?Ur>xDB!|Z*TQngH3w{i9gpx z#JU`R7@53Ub7Qj$fNWq|Pac^ISH>3y`j@n{cvh;*JOz!T2yt+MN%yrQe?p$jI}T=Z zWhU`;HBWUF`VMXrKXnlABz(wF5}Lw-K09_!QlJmha>3R91>_K5z#?IP9t&o~UEvVF zzJTvVVLQ`540H$ByCeo}5f;LwB|>oyOLR((`mD*tFM{_3TA1HDZmZaWtBUqtscEvJ zKIF=egK9$^4vq7SdR6C%=IdM-k1(uF&}Mhy$=qH4KHQH^wx3FN%q4gFzjfqR?j1`R zL_Vs^ca6;`?fTzhmco8AOEhxDTZy0W=CkZMUv+kr(CxDrjxtW6EKnhX8g zss}(Wwei{B7Q7c{gSyT<>7wrZGNp*ZAwZmWg_+D5|9Dt+{1Y!|mxz0+dnNsTVI1{z z_4Qt!3sIo4Y_P=JnV6&^#pIMh%T)%*Ia2>2(EQ+^IC2&5M!JsFIn4$63Dz*W&MZV; z3l#rY5gJ+Ub@V+5jG=V$$}9 z@k8jKNc%DK3`+^3Yv!44(#^J6-Tsctf@jwgCmu6f7#=cdEYAVG1x8SGnB^@?Z`-$| z)=nX+Q!zr0hgU%pw$y5j8d9=+*Nu$%skv z13ZV<4Izzv#tXNa%ge33j%h)qKCa`d5`?vsgJq`@?H7eK!nKWQiMba0qm>O>TS+T# zAB_SxIKY@rOH4JkJ2ulAMrzc@-LgxW!+%UevC9hKESGhsG;4@5Ettx055@lfLzs#k mqAQzRblDW3TiB~**4FQ*&m>RqySlNrRhP95pcR^S_x~3s0g4p> literal 0 HcmV?d00001 diff --git a/docs/hyper-parameter.md b/docs/hyper-parameter.md new file mode 100644 index 0000000..2b4b443 --- /dev/null +++ b/docs/hyper-parameter.md @@ -0,0 +1,101 @@ +# 镜像超参数 + +## ymir后台如何获取镜像超参数 + +- 通过解析镜像中 `/img-man/training-template.yaml` 获得训练的超参数, 若文件不存在则标记镜像不支持训练。 + +- 通过解析镜像中 `/img-man/infer-template.yaml` 获得推理的超参数,若文件不存在则标记镜像不支持推理。 + +- 通过解析镜像中 `/img-man/mining-template.yaml` 获得挖掘的超参数,若文件不存在则标记镜像不支持挖掘。 + +以 `youdaoyzbx/ymir-executor:ymir2.0.0-yolov5-cu111-tmi` 为例,可执行以下命令查看镜像对应超参数 + +``` +docker run --rm youdaoyzbx/ymir-executor:ymir2.0.0-yolov5-cu111-tmi cat /img-man/training-template.yaml + +# 输出结果 +# training template for your executor app +# after build image, it should at /img-man/training-template.yaml +# key: gpu_id, task_id, pretrained_model_params, class_names should be preserved + +# gpu_id: '0' +# task_id: 'default-training-task' +# pretrained_model_params: [] +# class_names: [] + +shm_size: '128G' +export_format: 'ark:raw' +model: 'yolov5s' +batch_size_per_gpu: 16 +num_workers_per_gpu: 4 +epochs: 100 +img_size: 640 +opset: 11 +args_options: '--exist-ok' +save_best_only: True # save the best weight file only +save_period: 10 +sync_bn: False # work for multi-gpu only +ymir_saved_file_patterns: '' # custom saved files, support python regular expression, use , to split multiple pattern +``` + +注:同名镜像在后台更新超参数配置文件如 `/img-man/training-template.yaml` 后,需要在 ymir 网页端重新添加,使超参数配置生效。 + +## 如何更新镜像默认的超参数 + +准备以下文件与对应内容: + +- training-template.yaml + + ``` + model: 'yolov5n' # change from yolov5s --> yolov5n + batch_size_per_gpu: 2 # change from 16 --> 2 + num_workers_per_gpu: 2 # change from 4 --> 2 + epochs: 100 + img_size: 640 + opset: 12 # change from 11 --> 12 + args_options: '--exist-ok' + save_best_only: True # save the best weight file only + save_period: 10 + sync_bn: False # work for multi-gpu only + ``` + +- zzz.dockerfile + +``` +FROM youdaoyzbx/ymir-executor:ymir2.0.0-yolov5-cu111-tmi + +COPY ./training-template.yaml /img-man/training-template.yaml + +CMD bash /usr/bin/start.sh +``` + +- 执行构建命令即可获得新镜像 `youdaoyzbx/ymir-executor:ymir2.0.1-yolov5-cu111-tmi` + +``` +docker build -t youdaoyzbx/ymir-executor:ymir2.0.1-yolov5-cu111-tmi . -f zzz.dockerfile +``` + +## 如何增加或删除镜像的超参数 + +准备以下文件与对应代码, 以修改镜像中 `/app/start.py` 为例 + +- training-template.yaml + +- start.py: 修改该文件中的内容处理增加或删除的超参数 + +- zzz.dockerfile + +``` +FROM youdaoyzbx/ymir-executor:ymir2.0.0-yolov5-cu111-tmi + +COPY ./training-template.yaml /img-man/training-template.yaml +COPY ./start.py /app/start.py + +CMD bash /usr/bin/start.sh +``` + +- 执行构建命令即可获得新镜像 `youdaoyzbx/ymir-executor:ymir2.0.2-yolov5-cu111-tmi` + +``` +docker build -t youdaoyzbx/ymir-executor:ymir2.0.2-yolov5-cu111-tmi . -f zzz.dockerfile +``` diff --git a/docs/ymir-executor-version.md b/docs/ymir-executor-version.md index 1c1c30f..f1dd796 100644 --- a/docs/ymir-executor-version.md +++ b/docs/ymir-executor-version.md @@ -1,11 +1,13 @@ -# ymir1.3.0 (2022-09-30) +# ymir2.0.0 (2022-09-30) - 支持分开输出模型权重,用户可以采用epoch10.pth进行推理,也可以选择epoch20.pth进行推理 -- 训练镜像需要指定数据集标注格式, ymir1.1.0默认标注格式为`ark:raw` +- 训练镜像需要指定数据集标注格式, ymir1.1.0默认标注格式为`ark:raw`, ymir2.0.0默认标注格式为`ark:voc` - 训练镜像可以获得系统的ymir接口版本,方便镜像兼容 +- 预训练模型文件在ymir1.1.0时放在/in/models目录下, + ## 辅助库 - [ymir-executor-sdk](https://github.com/modelai/ymir-executor-sdk) 采用ymir1.3.0分支 From 0bf09feebd3283c8fd1bda56f198fd17bc1e84c5 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 10 Nov 2022 15:59:04 +0800 Subject: [PATCH 161/204] update doc --- README.MD | 14 ++------------ README_zh-CN.MD | 14 ++------------ docs/README.MD | 4 +++- 3 files changed, 7 insertions(+), 25 deletions(-) diff --git a/README.MD b/README.MD index 2566d9b..d168006 100644 --- a/README.MD +++ b/README.MD @@ -8,6 +8,8 @@ - [ymir executor](./docs/official-docker-image.md) + - [tutorial](./docs/README.MD) from zero to one, build you ymir-executor + - [user survey](https://www.wjx.cn/vm/eKFm2aq.aspx#) - [ymir mining algorithm](./docs/mining-images-overview.md) @@ -89,18 +91,6 @@ docker build -t ymir-executor/mmdet:cu102-tmi -f docker/Dockerfile.cuda102 . docker build -t ymir-executor/mmdet:cu111-tmi -f docker/Dockerfile.cuda111 . ``` -## how to custom ymir-executor - -- [demo ymir-executor](det-demo-tmi/README.md) from zero to one, build you ymir-executor - -- [custom ymir-executor](https://github.com/IndustryEssentials/ymir/blob/dev/dev_docs/ymir-dataset-zh-CN.md) - -- [ymir-executor-sdk](https://github.com/modelai/ymir-executor-sdk) ymir-executor development SDK. - - - [dataset convert](https://github.com/modelai/ymir-executor-sdk/blob/master/docs/dataset_convert.md) - -- [ymir-executor-verifer](https://github.com/modelai/ymir-executor-verifier) debug and check your ymir-executor - ## how to import pretrained model weights - [import and finetune model](https://github.com/modelai/ymir-executor-fork/wiki/import-and-finetune-model) diff --git a/README_zh-CN.MD b/README_zh-CN.MD index 7c23027..a2013a1 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -8,6 +8,8 @@ - [ymir镜像](./docs/official-docker-image.md) + - [制作教程](./docs/README.MD) 从零到一,搭建自己的 ymir-executor + - [用户调查](https://www.wjx.cn/vm/eKFm2aq.aspx#) - [ymir 挖掘算法](./docs/mining-images-overview.md) @@ -105,18 +107,6 @@ docker build -t ymir-executor/live-code:torch-tmi -f torch.dockerfile docker build -t ymir-executor/live-code:mxnet-tmi -f mxnet.dockerfile ``` -## 如何制作自己的ymir-executor - -- [示例 ymir-executor](det-demo-tmi/README.md) 从零到一,搭建自己的 ymir-executor - -- [ymir-executor 制作指南](https://github.com/IndustryEssentials/ymir/blob/dev/dev_docs/ymir-dataset-zh-CN.md) - -- [ymir-executor-sdk](https://github.com/modelai/ymir-executor-sdk) ymir镜像开发辅助库 - - - [数据集转换](https://github.com/modelai/ymir-executor-sdk/blob/master/docs/dataset_convert.md) - -- [ymir-executor-verifer](https://github.com/modelai/ymir-executor-verifier) 调试与检测 ymir-executor - ## 如何导入预训练模型 - [如何导入并精调外部模型](https://github.com/modelai/ymir-executor-fork/wiki/import-and-finetune-model) diff --git a/docs/README.MD b/docs/README.MD index 8c664a0..55fad71 100644 --- a/docs/README.MD +++ b/docs/README.MD @@ -32,10 +32,12 @@ - [数据集格式转换](https://github.com/modelai/ymir-executor-sdk/blob/master/docs/dataset_convert.md) -- [ymir镜像调试工具](https://github.com/modelai/ymir-executor-verifier) +- [ymir镜像调试校验工具](https://github.com/modelai/ymir-executor-verifier) - 样例数据下载 - 交互式调试 + - 批量校验镜像 + - [ymir版本与接口兼容](./ymir-executor-version.md) From 90887b262b9154fda652fbce76ba0434bad9e242 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 10 Nov 2022 16:05:10 +0800 Subject: [PATCH 162/204] udpate doc --- README.MD | 2 +- README_zh-CN.MD | 2 +- docs/README.MD | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/README.MD b/README.MD index d168006..30a5292 100644 --- a/README.MD +++ b/README.MD @@ -8,7 +8,7 @@ - [ymir executor](./docs/official-docker-image.md) - - [tutorial](./docs/README.MD) from zero to one, build you ymir-executor + - [develop tutorial](./docs/README.MD) from zero to one, build you ymir-executor - [user survey](https://www.wjx.cn/vm/eKFm2aq.aspx#) diff --git a/README_zh-CN.MD b/README_zh-CN.MD index a2013a1..5684772 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -8,7 +8,7 @@ - [ymir镜像](./docs/official-docker-image.md) - - [制作教程](./docs/README.MD) 从零到一,搭建自己的 ymir-executor + - [镜像开发者:制作教程](./docs/README.MD) 从零到一,搭建自己的 ymir-executor - [用户调查](https://www.wjx.cn/vm/eKFm2aq.aspx#) diff --git a/docs/README.MD b/docs/README.MD index 55fad71..ee2c6ec 100644 --- a/docs/README.MD +++ b/docs/README.MD @@ -4,19 +4,19 @@ - [已有的镜像](./official-docker-image.md) -- [挖掘算法评测](./mining-images-overview.md) +- [挖掘算法评测](./mining-images-overview.md) 完善中。。。 -## 从零定制流程 +## 从零定制 1. [制作一个简单的镜像](../det-demo-tmi/README.md) 2. [了解ymir接口与数据结构](./ymir-dataset-zh-CN.md) -## 基于已有镜像进行定制流程 +## 基于已有镜像进行定制 - [增/删/改: 默认超参数](./hyper-parameter.md) -## 镜像调试流程 +## 镜像调试 - [交互式调试](./docker-image-debug.md) From c2b74e101a62860ccbf305001ed68f2442150b3f Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 10 Nov 2022 16:13:35 +0800 Subject: [PATCH 163/204] add yolov5 custom demo --- docs/README.MD | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/README.MD b/docs/README.MD index ee2c6ec..b2d8baa 100644 --- a/docs/README.MD +++ b/docs/README.MD @@ -16,6 +16,10 @@ - [增/删/改: 默认超参数](./hyper-parameter.md) +## 基于开源仓库进行定制 + +- [yolov5示例](https://github.com/yzbx/ymir-yolov5/pull/2/files) + ## 镜像调试 - [交互式调试](./docker-image-debug.md) From b115cc0d742d89813e5942c1c8d8e972cd803379 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 11 Nov 2022 14:24:09 +0800 Subject: [PATCH 164/204] update mining algorithm --- README.MD | 4 +--- README_zh-CN.MD | 4 +--- docs/mining-images-overview.md | 38 +++++++++++++++++++++++++++++----- 3 files changed, 35 insertions(+), 11 deletions(-) diff --git a/README.MD b/README.MD index 30a5292..b17c463 100644 --- a/README.MD +++ b/README.MD @@ -10,7 +10,7 @@ - [develop tutorial](./docs/README.MD) from zero to one, build you ymir-executor - - [user survey](https://www.wjx.cn/vm/eKFm2aq.aspx#) + - [user survey](https://www.wjx.cn/vm/eKFm2aq.aspx#) help ymir team to write document and develop, user feedback are collected on a continuous and long-term basis. - [ymir mining algorithm](./docs/mining-images-overview.md) @@ -99,8 +99,6 @@ docker build -t ymir-executor/mmdet:cu111-tmi -f docker/Dockerfile.cuda111 . ## reference -- [mining algorithm: CALD](https://github.com/we1pingyu/CALD/) -- [mining algorithm: ALDD](https://gitlab.com/haghdam/deep_active_learning) - [yolov4](https://github.com/AlexeyAB/darknet) - [yolov5](https://github.com/ultralytics/yolov5) - [mmdetection](https://github.com/open-mmlab/mmdetection) diff --git a/README_zh-CN.MD b/README_zh-CN.MD index 5684772..1b93351 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -10,7 +10,7 @@ - [镜像开发者:制作教程](./docs/README.MD) 从零到一,搭建自己的 ymir-executor - - [用户调查](https://www.wjx.cn/vm/eKFm2aq.aspx#) + - [用户调查](https://www.wjx.cn/vm/eKFm2aq.aspx#) 帮助ymir团队开发文档与新功能,此调查长期有效, 定期收集 - [ymir 挖掘算法](./docs/mining-images-overview.md) @@ -117,8 +117,6 @@ docker build -t ymir-executor/live-code:mxnet-tmi -f mxnet.dockerfile ## 参考 -- [挖掘算法CALD](https://github.com/we1pingyu/CALD/) -- [挖掘算法ALDD](https://gitlab.com/haghdam/deep_active_learning) - [yolov4](https://github.com/AlexeyAB/darknet) - [yolov5](https://github.com/ultralytics/yolov5) - [mmdetection](https://github.com/open-mmlab/mmdetection) diff --git a/docs/mining-images-overview.md b/docs/mining-images-overview.md index b376652..5d2424b 100644 --- a/docs/mining-images-overview.md +++ b/docs/mining-images-overview.md @@ -3,21 +3,49 @@ | docker images | random | cald | aldd | entropy | | - | - | - | - | - | | yolov5 | ✔️ | ✔️ | ✔️ | ✔️ | -| mmdetection | ✔️ | ✔️ | ✔️ | ❌ | +| mmdetection | ✔️ | ✔️ | ✔️ | ✔️ | | yolov4 | ❌ | ✔️ | ✔️ | ❌ | | yolov7 | ❌ | ❌ | ✔️ | ❌ | | nanodet | ❌ | ❌ | ✔️ | ❌ | | vidt |❌ | ✔️ | ❌ | ❌ | | detectron2 | ❌ | ✔️ | ❌ | ❌ | -view [ALBench: Active Learning Benchmark](https://github.com/modelai/ALBench) for detail - ![](./docs/mining_score.png) +# 带负样本的单类挖掘实验 + +- view [ALBench: Active Learning Benchmark](https://github.com/modelai/ALBench) for detail + +## 实验设置 + +COCO数据集中选择三个类做实验,分别是Train,Fork,Dog,从选定类别的train中选择1000张图片加入训练集,从不包含该类别的图片中选择3000张作为负样本加入训练集。选定类别的所有val加入验证集,从不包含该图片的val中选择3倍数据作为负样本加入验证集。剩余图片全部加入挖掘集,每次迭代从中选择500图片加入训练集。 + +| class | train | val | mining | +| - | - | - | - | +| train(火车) | 4000 | 628 | 114287 | +| fork(叉子) | 4000 | 620 | 114287 | +| dog(狗) | 4000 | 708 | 114287 | + +## 挖掘实验结果 + +| class | mining algorithm | iter 0 | iter 1 | iter 2 | iter 3 | iter 4 | +| - | - | - | - | - | - | - | +| train(火车) | random | 0.647 | 0.639 | 0.652 | 0.620 | 0.622 | +| train(火车) | entropy | 0.678 | 0.703 | 0.721 | 0.738 | 0.757 | +| train(火车) | aldd | 0.665 | 0.706 | 0.738 | 0.754 | 0.778 | +| fork(叉子) | random | 0.244 | 0.221 | 0.224 | 0.227 | 0.225 | +| fork(叉子) | entropy | 0.239 | 0.255 | 0.313 | 0.367 | 0.372 | +| fork(叉子) | aldd | 0.220 | 0.290 | 0.329 | 0.368 | 0.379 | +| dog(狗) | random | 0.391 | 0.418 | 0.401 | 0.389 | 0.416 | +| dog(狗) | entropy | 0.391 | 0.418 | 0.449 | 0.535 | 0.505 | +| dog(狗) | aldd | 0.399 | 0.487 | 0.518 | 0.533 | 0.564 | + ## reference +- [awesome-active-learning](https://github.com/baifanxxx/awesome-active-learning) + - entropy: `Multi-class active learning for image classification. CVPR 2009` -- cald: `Consistency-based Active Learning for Object Detection. CVPR 2022 workshop` +- [CALD](https://github.com/we1pingyu/CALD/): `Consistency-based Active Learning for Object Detection. CVPR 2022 workshop` -- aldd: `Active Learning for Deep Detection Neural Networks. ICCV 2019` +- [ALDD](https://gitlab.com/haghdam/deep_active_learning): `Active Learning for Deep Detection Neural Networks. ICCV 2019` From f23a33d6b8ff27205340fbfc031314c07a21fa7e Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 11 Nov 2022 14:50:04 +0800 Subject: [PATCH 165/204] update doc --- docs/FAQ.md | 41 ++++++++++++++++++++++++++++++++++++ debug.png => docs/debug.png | Bin 2 files changed, 41 insertions(+) create mode 100644 docs/FAQ.md rename debug.png => docs/debug.png (100%) diff --git a/docs/FAQ.md b/docs/FAQ.md new file mode 100644 index 0000000..85e96d7 --- /dev/null +++ b/docs/FAQ.md @@ -0,0 +1,41 @@ +# FAQ + +## 关于cuda版本 + +- 推荐主机安装高版本驱动,支持11.2以上的cuda版本, 使用11.1及以上的镜像 + +- GTX3080/GTX3090不支持11.1以下的cuda,只能使用cuda11.1及以上的镜像 + +## apt 或 pip 安装慢或出错 + +- 采用国内源,如在docker file 中添加如下命令 + + ``` + RUN sed -i 's/archive.ubuntu.com/mirrors.tuna.tsinghua.edu.cn/g' /etc/apt/sources.list + + RUN pip config set global.index-url https://mirrors.aliyun.com/pypi/simple + ``` + +## docker build 的时候出错,找不到相应docker file或`COPY/ADD`时出错 + +- 回到项目根目录或docker file对应根目录,确保docker file 中`COPY/ADD`的文件与文件夹能够访问,以yolov5为例. + + ``` + cd ymir-executor/det-yolov5-tmi + + docker build -t ymir-executor/yolov5:cuda111 . -f cuda111.dockerfile + ``` + +## 模型精度/速度如何权衡与提升 + +- 模型精度与数据集大小、数据集质量、学习率、batch size、 迭代次数、模型结构、数据增强方式、损失函数等相关,在此不做展开,详情参考: + + - [Object Detection in 20 Years: A Survey](https://arxiv.org/abs/1905.05055) + + - [Paper with Code: Object Detection](https://paperswithcode.com/task/object-detection) + + - [awesome object detection](https://github.com/amusi/awesome-object-detection) + + - [voc2012 object detection leadboard](http://host.robots.ox.ac.uk:8080/leaderboard/displaylb.php?challengeid=11&compid=4) + + - [coco object detection leadboard](https://cocodataset.org/#detection-leaderboard) diff --git a/debug.png b/docs/debug.png similarity index 100% rename from debug.png rename to docs/debug.png From 961d922e656a47562923f966589370746d8c658f Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 11 Nov 2022 14:50:17 +0800 Subject: [PATCH 166/204] update doc --- README.MD | 36 +------- README_zh-CN.MD | 164 +--------------------------------- docs/docker-image-debug.md | 10 +++ docs/official-docker-image.md | 33 +++++++ 4 files changed, 45 insertions(+), 198 deletions(-) diff --git a/README.MD b/README.MD index b17c463..f4255b2 100644 --- a/README.MD +++ b/README.MD @@ -58,45 +58,11 @@ gpu: single GeForce GTX 1080 Ti --- -# build ymir executor - -## det-yolov4-tmi - -- yolov4 training, mining and infer docker image, use `mxnet` and `darknet` framework - - ``` - cd det-yolov4-tmi - docker build -t ymir-executor/yolov4:cuda101-tmi -f cuda101.dockerfile . - - docker build -t ymir-executor/yolov4:cuda112-tmi -f cuda112.dockerfile . - ``` - -## det-yolov5-tmi - -- yolov5 training, mining and infer docker image, use `pytorch` framework - -``` -cd det-yolov5-tmi -docker build -t ymir-executor/yolov5:cuda102-tmi -f cuda102.dockerfile . - -docker build -t ymir-executor/yolov5:cuda111-tmi -f cuda111.dockerfile . -``` - -## det-mmdetection-tmi - -``` -cd det-mmdetection-tmi -docker build -t ymir-executor/mmdet:cu102-tmi -f docker/Dockerfile.cuda102 . - -docker build -t ymir-executor/mmdet:cu111-tmi -f docker/Dockerfile.cuda111 . -``` - ## how to import pretrained model weights - [import and finetune model](https://github.com/modelai/ymir-executor-fork/wiki/import-and-finetune-model) -- ~~[import pretainted model weights](https://github.com/IndustryEssentials/ymir/blob/dev/dev_docs/import-extra-models.md)~~ - +- [import pretainted model weights](https://github.com/IndustryEssentials/ymir/blob/master/dev_docs/import-extra-models.md) ## reference - [yolov4](https://github.com/AlexeyAB/darknet) diff --git a/README_zh-CN.MD b/README_zh-CN.MD index 1b93351..214fc06 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -60,58 +60,11 @@ gpu: single GeForce GTX 1080 Ti --- -# 手动构建ymir镜像 - -## det-yolov4-tmi - -- yolov4的训练、挖掘与推理镜像,采用mxnet与darknet框架 - - ``` - cd det-yolov4-tmi - docker build -t ymir-executor/yolov4:cuda101-tmi -f cuda101.dockerfile . - - docker build -t ymir-executor/yolov4:cuda112-tmi -f cuda112.dockerfile . - ``` - -## det-yolov5-tmi - -- yolov5训练、挖掘及推理镜像,采用pytorch框架,镜像构建时会从github上下载权重, 如果访问github不稳定, 建议提前将模型权重下载并在构建时复制到镜像中. - -``` -cd det-yolov5-tmi -docker build -t ymir-executor/yolov5:cuda102-tmi -f cuda102.dockerfile . - -docker build -t ymir-executor/yolov5:cuda111-tmi -f cuda111.dockerfile . -``` - -## det-mmdetection-tmi - -``` -cd det-mmdetection-tmi -docker build -t ymir-executor/mmdet:cu102-tmi -f docker/Dockerfile.cuda102 . - -docker build -t ymir-executor/mmdet:cu111-tmi -f docker/Dockerfile.cuda111 . -``` - -## live-code-executor - -- 可以通过`git_url`, `commit id` 或 `tag` 从网上clone代码到镜像并运行, 不推荐使用`branch`, 因为这样拉取的代码可能随时间变化, 过程不具备可重复性. - -- 参考 [live-code](https://github.com/IndustryEssentials/ymir-remote-git) - -``` -cd live-code-executor - -docker build -t ymir-executor/live-code:torch-tmi -f torch.dockerfile - -docker build -t ymir-executor/live-code:mxnet-tmi -f mxnet.dockerfile -``` - ## 如何导入预训练模型 - [如何导入并精调外部模型](https://github.com/modelai/ymir-executor-fork/wiki/import-and-finetune-model) -- ~~[如何导入外部模型](https://github.com/IndustryEssentials/ymir/blob/dev/dev_docs/import-extra-models.md)~~ +- [如何导入外部模型](https://github.com/IndustryEssentials/ymir/blob/master/dev_docs/import-extra-models.md) - 通过ymir网页端的 `模型管理/模型列表/导入模型` 同样可以导入模型 @@ -124,118 +77,3 @@ docker build -t ymir-executor/live-code:mxnet-tmi -f mxnet.dockerfile - [detectron2](https://github.com/facebookresearch/detectron2) - [vidt](https://github.com/naver-ai/vidt) - [nanodet](https://github.com/RangiLyu/nanodet) - ---- - -# FAQ - -## 关于cuda版本 - -- 推荐主机安装高版本驱动,支持11.2以上的cuda版本, 使用11.1及以上的镜像 - -- GTX3080/GTX3090不支持11.1以下的cuda,只能使用cuda11.1及以上的镜像 - -## apt 或 pip 安装慢或出错 - -- 采用国内源,如在docker file 中添加如下命令 - - ``` - RUN sed -i 's/archive.ubuntu.com/mirrors.tuna.tsinghua.edu.cn/g' /etc/apt/sources.list - - RUN pip config set global.index-url https://mirrors.aliyun.com/pypi/simple - ``` - -## docker build 的时候出错,找不到相应docker file或`COPY/ADD`时出错 - -- 回到项目根目录或docker file对应根目录,确保docker file 中`COPY/ADD`的文件与文件夹能够访问,以yolov5为例. - - ``` - cd ymir-executor/det-yolov5-tmi - - docker build -t ymir-executor/yolov5:cuda111 . -f cuda111.dockerfile - ``` - -## 镜像运行完`/in`与`/out`目录中的文件被清理 - -- ymir系统为节省空间,会在任务`成功结束`后删除其中不必要的文件,如果不想删除,可以在部署ymir后,修改镜像`industryessentials/ymir-backend`中的`/usr/local/lib/python3.8/dist-packages/mir/tools/command_run_in_out.py`,注释其中所有的`_cleanup(work_dir=work_dir)`, 将修改覆盖到镜像`industryessentials/ymir-backend:latest`并重启ymir - - ``` - $ docker ps |grep backend - - 580c2f1dae1b industryessentials/ymir-backend ... - 5490c294982f industryessentials/ymir-backend-redis ... - - $ docker run -it --rm industryessentials/ymir-backend:latest bash - $ vim /usr/local/lib/python3.8/dist-packages/mir/tools/command_run_in_out.py - ``` - 注释所有的`_cleanup(work_dir=work_dir)`之后,不要立即退出容器,切换到另一个终端 - ``` - $ docker ps |grep backend - - dced73e51429 industryessentials/ymir-backend # use the latest one - 580c2f1dae1b industryessentials/ymir-backend ... - 5490c294982f industryessentials/ymir-backend-redis ... - - $ docker commit dced73e51429 industryessentials/ymir-backend:latest - ``` - 保存改动后,再切换回之前的终端,退出容器,重启ymir即可 - - -## 训练镜像如何调试 - -- 一般性的错误在`ymir-workplace/ymir-data/logs`下查看 - -``` -tail -f -n 100 ymir_controller.log -tail -f -n 100 ymir_app.log -``` - -![](./debug.png) - -- 先修改镜像`industryessentials/ymir-backend`,注释其中所有的`_cleanup(work_dir=work_dir)`,保存`/in`和`/out`目录下的文件 - -- 再通过失败任务的tensorboard链接拿到任务id,如`t000000100000175245d1656933456` - -- 进入ymir部署目录 `ymir-workplace/sandbox/work_dir/TaskTypeTraining/t000000100000175245d1656933456/sub_task/t000000100000175245d1656933456`, `ls` 可以看到以下结果 - - ``` - # ls - in out task_config.yaml - - # ls out - monitor.txt ymir-executor-out.log - - # ls in - assets config.yaml env.yaml ... - ``` - -- 挂载目录并运行镜像``,注意需要将ymir部署目录挂载到镜像中 - - ``` - docker run -it --gpus all --shm-size 12G -v $PWD/in:/in -v $PWD/out:/out -v : -v /sandbox//training_assset_cache:/in/assets bash - - # 以/home/ymir/ymir-workplace作为ymir部署目录为例, 以实际情况为准 - docker run -it --gpus all --shm-size 12G -v $PWD/in:/in -v /home/ymir/ymir-workplace/sandbox/0001/training_assset_cache:/in/assets -v $PWD/out:/out -v /home/ymir/ymir-workplace:/home/ymir/ymir-workplace bash - ``` - -- 进入到docker 容器中后, 执行镜像默认的命令, 如dockerfile中写的 `CMD bash /usr/bin/start.sh` - - ``` - bash /usr/bin/start.sh - ``` - -- 推理与挖掘镜像调试同理,注意对应目录均为`ymir-workplace/sandbox/work_dir/TaskTypeMining` - -## 模型精度/速度如何权衡与提升 - -- 模型精度与数据集大小、数据集质量、学习率、batch size、 迭代次数、模型结构、数据增强方式、损失函数等相关,在此不做展开,详情参考: - - - [Object Detection in 20 Years: A Survey](https://arxiv.org/abs/1905.05055) - - - [Paper with Code: Object Detection](https://paperswithcode.com/task/object-detection) - - - [awesome object detection](https://github.com/amusi/awesome-object-detection) - - - [voc2012 object detection leadboard](http://host.robots.ox.ac.uk:8080/leaderboard/displaylb.php?challengeid=11&compid=4) - - - [coco object detection leadboard](https://cocodataset.org/#detection-leaderboard) diff --git a/docs/docker-image-debug.md b/docs/docker-image-debug.md index d5a488c..a8602f8 100644 --- a/docs/docker-image-debug.md +++ b/docs/docker-image-debug.md @@ -113,6 +113,8 @@ out docker run -it --gpus all --shm-size 128G -v $PWD/in:/in -v $PWD/out:/out -v $YMIR_WORKDIR:$YMIR_WORKDIR -v $HOME/code:/code youdaoyzbx/ymir-executor:ymir2.0.0-yolov5-cu111-tmi bash ``` +7. 推理与挖掘镜像调试同理,注意对应目录均为 `ymir-workplace/sandbox/work_dir/TaskTypeMining` + ## 调试完成后构建新镜像 - 准备 `zzz.dockerfile` @@ -137,3 +139,11 @@ CMD bash /usr/bin/start.sh ``` docker build -t youdaoyzbx/ymir-executor:ymir2.0.1-yolov5-cu111-tmi . -f zzz.dockerfile ``` + +## ymir后台错误查看 + +- 在`ymir-workplace/ymir-data/logs`下查看 + +``` +tail -f -n 200 ymir_controller.log +``` diff --git a/docs/official-docker-image.md b/docs/official-docker-image.md index 39363d1..22e3714 100644 --- a/docs/official-docker-image.md +++ b/docs/official-docker-image.md @@ -140,3 +140,36 @@ youdaoyzbx/ymir-executor:ymir1.3.0-mmdet-cu111-tmi ``` docker pull youdaoyzbx/ymir-executor:ymir1.1.0-nanodet-cu111-tmi ``` + +# build ymir executor + +## det-yolov4-tmi + +- yolov4 training, mining and infer docker image, use `mxnet` and `darknet` framework + + ``` + cd det-yolov4-tmi + docker build -t ymir-executor/yolov4:cuda101-tmi -f cuda101.dockerfile . + + docker build -t ymir-executor/yolov4:cuda112-tmi -f cuda112.dockerfile . + ``` + +## det-yolov5-tmi + +- yolov5 training, mining and infer docker image, use `pytorch` framework + +``` +cd det-yolov5-tmi +docker build -t ymir-executor/yolov5:cuda102-tmi -f cuda102.dockerfile . + +docker build -t ymir-executor/yolov5:cuda111-tmi -f cuda111.dockerfile . +``` + +## det-mmdetection-tmi + +``` +cd det-mmdetection-tmi +docker build -t ymir-executor/mmdet:cu102-tmi -f docker/Dockerfile.cuda102 . + +docker build -t ymir-executor/mmdet:cu111-tmi -f docker/Dockerfile.cuda111 . +``` From d4df6b95114a4c81c2f6cb651fd3d25e7fd341ea Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 11 Nov 2022 14:57:21 +0800 Subject: [PATCH 167/204] zzz --- docs/README.MD | 2 ++ docs/mining-images-overview.md | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/README.MD b/docs/README.MD index b2d8baa..d498b80 100644 --- a/docs/README.MD +++ b/docs/README.MD @@ -44,4 +44,6 @@ - 批量校验镜像 +- [FAQ](./FAQ.md) + - [ymir版本与接口兼容](./ymir-executor-version.md) diff --git a/docs/mining-images-overview.md b/docs/mining-images-overview.md index 5d2424b..f427e55 100644 --- a/docs/mining-images-overview.md +++ b/docs/mining-images-overview.md @@ -10,7 +10,7 @@ | vidt |❌ | ✔️ | ❌ | ❌ | | detectron2 | ❌ | ✔️ | ❌ | ❌ | -![](./docs/mining_score.png) +![](./mining_score.png) # 带负样本的单类挖掘实验 From d02744cf1e22dd8a6d98b81af292ebaab697a661 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 11 Nov 2022 18:26:30 +0800 Subject: [PATCH 168/204] udpate yolov5 activation --- det-yolov5-tmi/models/common.py | 4 ++-- det-yolov5-tmi/ymir/README.md | 1 + det-yolov5-tmi/ymir/img-man/training-template.yaml | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/det-yolov5-tmi/models/common.py b/det-yolov5-tmi/models/common.py index 35bbc69..488e2ce 100644 --- a/det-yolov5-tmi/models/common.py +++ b/det-yolov5-tmi/models/common.py @@ -50,8 +50,8 @@ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, k if activation.lower() == 'relu': custom_act = nn.ReLU() else: - warnings.warn(f'unknown activation {activation}, use Hardswish instead') - custom_act = nn.Hardswish() + # view https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity + custom_act = getattr(nn, activation)() self.act = custom_act if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) def forward(self, x): diff --git a/det-yolov5-tmi/ymir/README.md b/det-yolov5-tmi/ymir/README.md index 3a2a7bb..d576a4c 100644 --- a/det-yolov5-tmi/ymir/README.md +++ b/det-yolov5-tmi/ymir/README.md @@ -42,6 +42,7 @@ docker build -t your/ymir-executor:ymir2.0.0-yolov5-cpu-tmi -f cpu.dockerfile . | save_best_only | True | 布尔: bool | 是否只保存最优模型 | 建议:为节省空间设为True即可 | | save_period | 10 | 整数: int | 保存模型的间隔 | 建议:当save_best_only为False时,可保存 `epoch/save_period` 个中间结果 | sync_bn | False | 布尔: bool | 是否同步各gpu上的归一化层 | 建议:开启以提高训练稳定性及精度 | +| activate | '' | 字符串:str | 激活函数,默认为nn.Hardswish(), 参考 [pytorch激活函数](https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) | 可选值: ELU, Hardswish, LeakyReLU, PReLU, ReLU, ReLU6, SiLU, ... | | ymir_saved_file_patterns | '' | 字符串: str | 用 `,` 分隔的保存文件模式 | 建议:专业用户当希望过滤保存的文件以节省空间时,可设置配置的正则表达式 | ### 训练结果文件示例 diff --git a/det-yolov5-tmi/ymir/img-man/training-template.yaml b/det-yolov5-tmi/ymir/img-man/training-template.yaml index 1cc4752..e855afe 100644 --- a/det-yolov5-tmi/ymir/img-man/training-template.yaml +++ b/det-yolov5-tmi/ymir/img-man/training-template.yaml @@ -19,4 +19,5 @@ args_options: '--exist-ok' save_best_only: True # save the best weight file only save_period: 10 sync_bn: False # work for multi-gpu only +activation: 'Hardswish' # view https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity ymir_saved_file_patterns: '' # custom saved files, support python regular expression, use , to split multiple pattern From d7cb1e9e16b3bea9233db494ea9cd6c0a44160ef Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 21 Nov 2022 14:09:11 +0800 Subject: [PATCH 169/204] update doc and activation --- det-yolov5-tmi/models/common.py | 7 +- .../ymir/img-man/training-template.yaml | 2 +- det-yolov5-tmi/ymir/mining/ymir_infer.py | 5 +- .../ymir/mining/ymir_mining_cald.py | 5 +- det-yolov5-tmi/ymir/start.py | 5 +- det-yolov5-tmi/ymir/ymir_yolov5.py | 8 +- docs/README.MD | 2 +- docs/segmentation.md | 138 ++++++++++++++++++ docs/ymir-dataset-zh-CN.md | 16 +- 9 files changed, 168 insertions(+), 20 deletions(-) create mode 100644 docs/segmentation.md diff --git a/det-yolov5-tmi/models/common.py b/det-yolov5-tmi/models/common.py index 488e2ce..289eb78 100644 --- a/det-yolov5-tmi/models/common.py +++ b/det-yolov5-tmi/models/common.py @@ -45,10 +45,11 @@ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, k activation = os.environ.get('ACTIVATION', None) if activation is None: - self.act = nn.Hardswish() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) + self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) else: - if activation.lower() == 'relu': - custom_act = nn.ReLU() + act_dict = dict(relu=nn.ReLU, relu6=nn.ReLU6, leakyrelu=nn.LeakyReLU, hardswish=nn.Hardswish, silu=nn.SiLU) + if activation.lower() in act_dict: + custom_act = act_dict[activation.lower()]() else: # view https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity custom_act = getattr(nn, activation)() diff --git a/det-yolov5-tmi/ymir/img-man/training-template.yaml b/det-yolov5-tmi/ymir/img-man/training-template.yaml index e855afe..dc7bb02 100644 --- a/det-yolov5-tmi/ymir/img-man/training-template.yaml +++ b/det-yolov5-tmi/ymir/img-man/training-template.yaml @@ -19,5 +19,5 @@ args_options: '--exist-ok' save_best_only: True # save the best weight file only save_period: 10 sync_bn: False # work for multi-gpu only -activation: 'Hardswish' # view https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity +activation: 'SiLU' # view https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity ymir_saved_file_patterns: '' # custom saved files, support python regular expression, use , to split multiple pattern diff --git a/det-yolov5-tmi/ymir/mining/ymir_infer.py b/det-yolov5-tmi/ymir/mining/ymir_infer.py index 34cd978..a69b063 100644 --- a/det-yolov5-tmi/ymir/mining/ymir_infer.py +++ b/det-yolov5-tmi/ymir/mining/ymir_infer.py @@ -13,11 +13,12 @@ import torch.utils.data as td from easydict import EasyDict as edict from tqdm import tqdm +from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, get_merged_config, write_ymir_monitor_process + from utils.general import scale_coords from ymir.mining.util import YmirDataset, load_image_file from ymir.ymir_yolov5 import YmirYolov5 -from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_merged_config, write_ymir_monitor_process LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) diff --git a/det-yolov5-tmi/ymir/mining/ymir_mining_cald.py b/det-yolov5-tmi/ymir/mining/ymir_mining_cald.py index b2284b8..eb977ba 100644 --- a/det-yolov5-tmi/ymir/mining/ymir_mining_cald.py +++ b/det-yolov5-tmi/ymir/mining/ymir_mining_cald.py @@ -15,12 +15,13 @@ import torch.utils.data as td from easydict import EasyDict as edict from tqdm import tqdm +from ymir_exc import result_writer as rw +from ymir_exc.util import YmirStage, get_merged_config, write_ymir_monitor_process + from utils.general import scale_coords from ymir.mining.util import (YmirDataset, collate_fn_with_fake_ann, load_image_file, load_image_file_with_ann, update_consistency) from ymir.ymir_yolov5 import YmirYolov5 -from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_merged_config, write_ymir_monitor_process LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) diff --git a/det-yolov5-tmi/ymir/start.py b/det-yolov5-tmi/ymir/start.py index a1daa2f..b368ac1 100644 --- a/det-yolov5-tmi/ymir/start.py +++ b/det-yolov5-tmi/ymir/start.py @@ -4,11 +4,12 @@ import sys from easydict import EasyDict as edict -from models.experimental import attempt_download -from ymir.ymir_yolov5 import convert_ymir_to_yolov5, get_weight_file from ymir_exc import monitor from ymir_exc.util import YmirStage, find_free_port, get_bool, get_merged_config, write_ymir_monitor_process +from models.experimental import attempt_download +from ymir.ymir_yolov5 import convert_ymir_to_yolov5, get_weight_file + def start(cfg: edict) -> int: logging.info(f'merged config: {cfg}') diff --git a/det-yolov5-tmi/ymir/ymir_yolov5.py b/det-yolov5-tmi/ymir/ymir_yolov5.py index 463db89..6b924cf 100644 --- a/det-yolov5-tmi/ymir/ymir_yolov5.py +++ b/det-yolov5-tmi/ymir/ymir_yolov5.py @@ -9,14 +9,14 @@ import torch import yaml from easydict import EasyDict as edict -from models.common import DetectMultiBackend from nptyping import NDArray, Shape, UInt8 +from ymir_exc import result_writer as rw +from ymir_exc.util import get_bool, get_weight_files + +from models.common import DetectMultiBackend from utils.augmentations import letterbox from utils.general import check_img_size, non_max_suppression, scale_coords from utils.torch_utils import select_device -from ymir_exc import monitor -from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_bool, get_weight_files, write_ymir_monitor_process BBOX = NDArray[Shape['*,4'], Any] CV_IMAGE = NDArray[Shape['*,*,3'], UInt8] diff --git a/docs/README.MD b/docs/README.MD index d498b80..fe4edce 100644 --- a/docs/README.MD +++ b/docs/README.MD @@ -18,7 +18,7 @@ ## 基于开源仓库进行定制 -- [yolov5示例](https://github.com/yzbx/ymir-yolov5/pull/2/files) +- [yolov5示例](https://github.com/modelai/ymir-yolov5/pull/2/files) ## 镜像调试 diff --git a/docs/segmentation.md b/docs/segmentation.md new file mode 100644 index 0000000..c33df9f --- /dev/null +++ b/docs/segmentation.md @@ -0,0 +1,138 @@ +# segmentation + +- update date: 2022/11/14 + +## semantic segmentation: 语义分割 + +### docker images: docker 镜像 + +- `youdaoyzbx/ymir-executor:ymir2.0.0-mmseg-cu111-tmi` + +### hyper-parameters: 超参数 + +- training: 训练 + - `export_format`: `seg-mask:raw` + +### convert dataset format: 转换数据集格式 + +``` +from ymir_exc.dataset_convert import convert_ymir_to_mmseg +from ymir_exc.util import get_merged_config + +ymir_cfg = get_merged_config() +new_ann_dict = convert_ymir_to_mmseg(ymir_cfg) +``` + +### read: 输入格式 + +``` +in +├── annotations [19 entries exceeds filelimit, not opening dir] +├── assets -> /xxx/ymir-workplace/sandbox/0001/asset_cache +├── config.yaml +├── env.yaml +├── idx-assets.tsv +├── idx-gt.tsv +├── idx-pred.tsv +├── models +├── predictions [18 entries exceeds filelimit, not opening dir] +├── pred-test-index.tsv +├── pred-train-index.tsv +├── pred-val-index.tsv +├── test-index.tsv +├── train-index.tsv +└── val-index.tsv +``` + +## in/annotations +``` +ls /in/annotations +08 15 19 32 35 3b 59 6a 72 77 85 a4 a6 cd d1 e0 e1 f0 labelmap.txt +``` + +## in/annotations/labelmap.txt + +- `class_name:R,G,B::`: + - class_name=bg, RGB=(0, 0, 0) + - class_name=fg, RGB=(1, 1, 1) + +- `cat in/annotations/labelmap.txt` +``` +bg:0,0,0:: +fg:1,1,1:: +``` + +## in/env.yaml +``` +input: [2/1804] + annotations_dir: /in/annotations + assets_dir: /in/assets + candidate_index_file: '' + config_file: /in/config.yaml + models_dir: /in/models + root_dir: /in + training_index_file: /in/train-index.tsv + val_index_file: /in/val-index.tsv +output: + executor_log_file: /out/ymir-executor-out.log + infer_result_file: /out/infer-result.json + mining_result_file: /out/result.tsv + models_dir: /out/models + monitor_file: /out/monitor.txt + root_dir: /out + tensorboard_dir: /out/tensorboard + training_result_file: /out/models/result.yaml +protocol_version: 1.1.0 +run_infer: false +run_mining: false +run_training: true +task_id: t00000010000059a17ce1668392602 +``` + +## in/train-index.tsv +``` +/in/assets/32/6371cbb7e0a2c356cb17e17ca467c7f892ccc232.png /in/annotations/32/6371cbb7e0a2c356cb17e17ca467c7f892ccc232.png +/in/assets/32/562cfd8c96bba98568673d59614d2578258f1e32.png /in/annotations/32/562cfd8c96bba98568673d59614d2578258f1e32.png +/in/assets/59/f72430463f59d0299c3258e01fc9ad2c5671b359.png /in/annotations/59/f72430463f59d0299c3258e01fc9ad2c5671b359.png +``` + +### write: 输出格式 +``` +out +├── models [17 entries exceeds filelimit, not opening dir] +├── monitor.txt +├── tensorboard -> /xxx/ymir-workplace/ymir-tensorboard-logs/0001/t00000010000059a17ce1668392602 +└── ymir-executor-out.log +``` + +- `ls /out/models` +``` +20221114_022352.log iter_1000.pth iter_1800.pth iter_600.pth ymir-info.yaml +20221114_022352.log.json iter_1200.pth iter_2000.pth iter_800.pth +best_mIoU_iter_1200.pth iter_1400.pth iter_200.pth latest.pth +fast_scnn_lr0.12_8x4_160k_cityscapes.py iter_1600.pth iter_400.pth result.yaml +``` + +- `cat /out/models/result.yaml` +``` +best_stage_name: best +map: 0.632 +model_stages: + best: + files: + - fast_scnn_lr0.12_8x4_160k_cityscapes.py + - best_mIoU_iter_1200.pth + mAP: 0.632 + stage_name: best + timestamp: 1668393850 + last: + files: + - fast_scnn_lr0.12_8x4_160k_cityscapes.py + - latest.pth + mAP: 0.5421 + stage_name: last + timestamp: 1668393874 +``` + +## instance segmentation: 实例分割 +todo: 开发中 diff --git a/docs/ymir-dataset-zh-CN.md b/docs/ymir-dataset-zh-CN.md index ec9d60d..1a61e88 100644 --- a/docs/ymir-dataset-zh-CN.md +++ b/docs/ymir-dataset-zh-CN.md @@ -117,7 +117,7 @@ task_id: t0000001000002ebb7f11653630774 # 任务id ## ymir输入镜像的标注格式 -常见的目标检测标注格式有 `voc` 与 `coco`, ymir 除自身格式, 目前还支持`voc`格式,可在超参数页面通过设置`export_format`对ymir导入镜像的数据格式进行修改。 +常见的目标检测标注格式有 `voc` 与 `coco`, ymir 除自身格式, 目前还支持`voc`格式,可在超参数页面通过设置`export_format`对ymir导入镜像的数据格式进行修改,其中检测格式 ["det-ark", "det-voc", "det-ls-json"]也可简写为 ["ark", "voc", "ls-json"], 从而兼容ymir1.1.0 。 ``` image format: ['raw', 'lmdb'] @@ -126,12 +126,18 @@ annotation format: ["none", "det-ark", "det-voc", "det-ls-json", "seg-poly", "se ### 默认数据格式 -- `export_format=ark:raw`, 标注文件为`xxx.txt` +- ymir1.1.0 默认数据导出格式 `export_format=ark:raw`, 标注文件为`xxx.txt` -- `export_format=det-ark:raw`, 标注文件为`xxx.txt` +- ymir2.0.0+ 默认检测数据导出格式 `export_format=det-voc:raw`, 标注文件为`xxx.xml` + +- ymir2.0.0+ 默认分割数据导出格式 `export_format=seg-mask:raw`, 标注文件为`xxx.png` ### voc 数据格式 -- `export_format=voc:raw`, 标注文件为`xxx.xml` +- `export_format=voc:raw` 或 `export_format=det-voc:raw` 标注文件为`xxx.xml`, 可以包含更多自定义的图像信息 + +- `export_format=ark:raw` 或 `export_format=det-ark:raw`, 标注文件为`xxx.txt` + +- `export_format=seg-mask:raw`, 标注文件为`xxx.png`, 包含`labelmap.txt`, 需要通过`labelmap.txt`将标注图像`xxx.png`从(R, G, B) 映射到`label_id`进行训练。 -- `export_format=det-voc:raw`, 标注文件为`xxx.xml` +- `export_format=seg-poly:raw`, 多边形标注文件,待定中 From e0228390c4a7a161180824a33e605fa20cc5d9f8 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 21 Nov 2022 17:48:33 +0800 Subject: [PATCH 170/204] format and rerank --- det-yolov5-tmi/train.py | 3 +-- det-yolov5-tmi/ymir/mining/ymir_infer.py | 4 ++-- det-yolov5-tmi/ymir/mining/ymir_mining_aldd.py | 2 +- det-yolov5-tmi/ymir/mining/ymir_mining_cald.py | 4 ++-- det-yolov5-tmi/ymir/mining/ymir_mining_entropy.py | 2 +- det-yolov5-tmi/ymir/mining/ymir_mining_random.py | 2 +- 6 files changed, 8 insertions(+), 9 deletions(-) diff --git a/det-yolov5-tmi/train.py b/det-yolov5-tmi/train.py index d1fdd8f..2001443 100644 --- a/det-yolov5-tmi/train.py +++ b/det-yolov5-tmi/train.py @@ -32,7 +32,6 @@ from torch.nn.parallel import DistributedDataParallel as DDP from torch.optim import SGD, Adam, AdamW, lr_scheduler from tqdm import tqdm -from ymir_exc import monitor FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory @@ -317,7 +316,7 @@ def lf(x): model.train() # ymir monitor - if epoch % monitor_gap == 0: + if epoch % monitor_gap == 0 and RANK in [0, -1]: write_ymir_monitor_process(ymir_cfg, task='training', naive_stage_percent=(epoch - start_epoch + 1) / (epochs - start_epoch + 1), stage=YmirStage.TASK) # Update image weights (optional, single-GPU only) diff --git a/det-yolov5-tmi/ymir/mining/ymir_infer.py b/det-yolov5-tmi/ymir/mining/ymir_infer.py index a69b063..5f3d56a 100644 --- a/det-yolov5-tmi/ymir/mining/ymir_infer.py +++ b/det-yolov5-tmi/ymir/mining/ymir_infer.py @@ -61,7 +61,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): results = [] dataset_size = len(images_rank) monitor_gap = max(1, dataset_size // 1000 // batch_size_per_gpu) - pbar = tqdm(origin_dataset_loader) if RANK == 0 else origin_dataset_loader + pbar = tqdm(origin_dataset_loader) if RANK in [0, -1] else origin_dataset_loader for idx, batch in enumerate(pbar): # batch-level sync, avoid 30min time-out error if WORLD_SIZE > 1 and idx < max_barrier_times: @@ -70,7 +70,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): with torch.no_grad(): pred = ymir_yolov5.forward(batch['image'].float().to(device), nms=True) - if idx % monitor_gap == 0: + if idx % monitor_gap == 0 and RANK in [0, -1]: write_ymir_monitor_process(ymir_cfg, task='infer', naive_stage_percent=idx * batch_size_per_gpu / dataset_size, diff --git a/det-yolov5-tmi/ymir/mining/ymir_mining_aldd.py b/det-yolov5-tmi/ymir/mining/ymir_mining_aldd.py index 219790d..fd0087b 100644 --- a/det-yolov5-tmi/ymir/mining/ymir_mining_aldd.py +++ b/det-yolov5-tmi/ymir/mining/ymir_mining_aldd.py @@ -157,7 +157,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): mining_results = dict() dataset_size = len(images_rank) - pbar = tqdm(origin_dataset_loader) if RANK == 0 else origin_dataset_loader + pbar = tqdm(origin_dataset_loader) if RANK in [-1, 0] else origin_dataset_loader miner = ALDD(ymir_cfg) for idx, batch in enumerate(pbar): # batch-level sync, avoid 30min time-out error diff --git a/det-yolov5-tmi/ymir/mining/ymir_mining_cald.py b/det-yolov5-tmi/ymir/mining/ymir_mining_cald.py index eb977ba..b36eb1b 100644 --- a/det-yolov5-tmi/ymir/mining/ymir_mining_cald.py +++ b/det-yolov5-tmi/ymir/mining/ymir_mining_cald.py @@ -65,7 +65,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): mining_results = dict() beta = 1.3 dataset_size = len(images_rank) - pbar = tqdm(origin_dataset_loader) if RANK == 0 else origin_dataset_loader + pbar = tqdm(origin_dataset_loader) if RANK in [-1, 0] else origin_dataset_loader for idx, batch in enumerate(pbar): # batch-level sync, avoid 30min time-out error if WORLD_SIZE > 1 and idx < max_barrier_times: @@ -108,7 +108,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): dataset_size = len(results) monitor_gap = max(1, dataset_size // 1000 // batch_size_per_gpu) - pbar = tqdm(aug_dataset_loader) if RANK == 0 else aug_dataset_loader + pbar = tqdm(aug_dataset_loader) if RANK in [0, -1] else aug_dataset_loader for idx, batch in enumerate(pbar): if idx % monitor_gap == 0 and RANK in [-1, 0]: write_ymir_monitor_process(ymir_cfg, diff --git a/det-yolov5-tmi/ymir/mining/ymir_mining_entropy.py b/det-yolov5-tmi/ymir/mining/ymir_mining_entropy.py index 6457c9a..eff29fe 100644 --- a/det-yolov5-tmi/ymir/mining/ymir_mining_entropy.py +++ b/det-yolov5-tmi/ymir/mining/ymir_mining_entropy.py @@ -60,7 +60,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): mining_results = dict() dataset_size = len(images_rank) - pbar = tqdm(origin_dataset_loader) if RANK == 0 else origin_dataset_loader + pbar = tqdm(origin_dataset_loader) if RANK in [0, -1] else origin_dataset_loader for idx, batch in enumerate(pbar): # batch-level sync, avoid 30min time-out error if WORLD_SIZE > 1 and idx < max_barrier_times: diff --git a/det-yolov5-tmi/ymir/mining/ymir_mining_random.py b/det-yolov5-tmi/ymir/mining/ymir_mining_random.py index 2edc598..815783d 100644 --- a/det-yolov5-tmi/ymir/mining/ymir_mining_random.py +++ b/det-yolov5-tmi/ymir/mining/ymir_mining_random.py @@ -37,7 +37,7 @@ def run(ymir_cfg: edict, ymir_yolov5: YmirYolov5): images_rank = images mining_results = dict() dataset_size = len(images_rank) - pbar = tqdm(images_rank) if RANK == 0 else images_rank + pbar = tqdm(images_rank) if RANK in [-1, 0] else images_rank for idx, image in enumerate(pbar): if RANK in [-1, 0]: write_ymir_monitor_process(ymir_cfg, task='mining', naive_stage_percent=idx / dataset_size, stage=YmirStage.TASK) From 3dec7b85ad258aec24fd5e8fd5ea06485cac519b Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 23 Nov 2022 12:03:37 +0800 Subject: [PATCH 171/204] update yolov5 image doc --- det-yolov5-tmi/ymir/README.md | 78 ++++++++++++++++--- .../ymir/img-man/infer-template.yaml | 1 + 2 files changed, 69 insertions(+), 10 deletions(-) diff --git a/det-yolov5-tmi/ymir/README.md b/det-yolov5-tmi/ymir/README.md index d576a4c..35998a4 100644 --- a/det-yolov5-tmi/ymir/README.md +++ b/det-yolov5-tmi/ymir/README.md @@ -1,5 +1,5 @@ # yolov5-ymir readme -update 2022/11/08 +update 2022/11/23 ## build your ymir-executor @@ -15,13 +15,26 @@ docker build -t your/ymir-executor:ymir2.0.0-yolov5-cpu-tmi -f cpu.dockerfile . ### 性能表现 +|Model |size
    (pixels) |mAPval
    0.5:0.95 |mAPval
    0.5 |Speed
    CPU b1
    (ms) |Speed
    V100 b1
    (ms) |Speed
    V100 b32
    (ms) |params
    (M) |FLOPs
    @640 (B) +|--- |--- |--- |--- |--- |--- |--- |--- |--- +|[YOLOv5n] |640 |28.0 |45.7 |**45** |**6.3**|**0.6**|**1.9**|**4.5** +|[YOLOv5s] |640 |37.4 |56.8 |98 |6.4 |0.9 |7.2 |16.5 +|[YOLOv5m] |640 |45.4 |64.1 |224 |8.2 |1.7 |21.2 |49.0 +|[YOLOv5l] |640 |49.0 |67.3 |430 |10.1 |2.7 |46.5 |109.1 +|[YOLOv5x] |640 |50.7 |68.9 |766 |12.1 |4.8 |86.7 |205.7 +| | | | | | | | | +|[YOLOv5n6] |1280 |36.0 |54.4 |153 |8.1 |2.1 |3.2 |4.6 +|[YOLOv5s6] |1280 |44.8 |63.7 |385 |8.2 |3.6 |16.8 |12.6 +|[YOLOv5m6] |1280 |51.3 |69.3 |887 |11.1 |6.8 |35.7 |50.0 +|[YOLOv5l6] |1280 |53.7 |71.3 |1784 |15.8 |10.5 |76.8 |111.4 + ### 训练参数说明 - 一些参数由ymir后台生成,如 `gpu_id`, `class_names` 等参数 - - `gpu_id`: - - `task_id`: - - `model_params_path`: - - `class_names`: + - `gpu_id`: 使用的GPU硬件编号,如`0,1,2`,类型为 `str`。实际上对应的主机GPU随机,可能为`3,1,7`,镜像中只能感知并使用`0,1,2`作为设备ID。 + - `task_id`: ymir任务id, 类型为 `str` + - `pretrained_model_params`: 预训练模型文件的路径,类型为 `List[str]` + - `class_names`: 类别名,类型为 `List[str]` - 一些参数由ymir后台进行处理,如 `shm_size`, `export_format`, 其中 `shm_size` 影响到docker镜像所能使用的共享内存,若过小会导致 `out of memory` 等错误。 `export_format` 会决定docker镜像中所看到数据的格式 @@ -47,21 +60,66 @@ docker build -t your/ymir-executor:ymir2.0.0-yolov5-cpu-tmi -f cpu.dockerfile . ### 训练结果文件示例 ``` - +. +├── data.yaml # ymir数据集转换后生成的data.yaml +├── models # 模型保存目录 +├── monitor.txt # ymir进度接口文件 +├── tensorboard # tensorboard日志文件 +│ ├── events.out.tfevents.1669112949.2cf0844ff367.337.0 +│ ├── results.csv +│ ├── results.png +│ ├── train_batch0.jpg +│ ├── train_batch1.jpg +│ └── train_batch2.jpg +├── test.tsv # ymir数据集转换后生成的测试索引文件,为空 +├── train.cache # 训练集缓存文件 +├── train.tsv # ymir数据集转换后生成的训练集索引文件 +├── val.cache # 验证集缓存文件 +└── val.tsv # ymir数据集转换后生成的测试集索引文件 ``` + +--- + ## 推理: infer -### 推理参数说明 +推理任务中,ymir后台会生成参数 `gpu_id`, `class_names`, `task_id` 与 `model_param_path`, 其中`model_param_path`与训练任务中的`pretrained_model_params`类似。 +### 推理参数说明 | 超参数 | 默认值 | 类型 | 说明 | 建议 | | - | - | - | - | - | | hyper-parameter | default value | type | note | advice | +| img_size | 640 | int | 模型的输入图像大小 | 采用32的整数倍,224 = 32*7 以上大小 | +| conf_thres | 0.25 | float | 置信度阈值 | 采用默认值 | +| iou_thres | 0.45 | float | nms时的iou阈值 | 采用默认值 | +| batch_size_per_gpu | 16 | 整数:int | 每张GPU一次处理的图片数量 | 建议大小:显存占用<50% 可增加1倍加快训练速度 | +| num_workers_per_gpu | 4 | 整数:int | 每张GPU对应的数据读取进程数 | - | +| shm_size | 128G | 字符串:str | 受ymir后台处理,docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | +| pin_memory | False | bool | 是否为数据集单独固定内存? | 内存充足时改为True可加快数据集加载 | + +--- -### 推理结果文件示例 +## 挖掘: mining + +挖掘任务中,ymir后台会生成参数 `gpu_id`, `class_names`, `task_id` 与 `model_param_path`, 其中`model_param_path`与训练任务中的`pretrained_model_params`类似。推理与挖掘任务ymir后台生成的参数一样。 + +### 挖掘参数说明 + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| img_size | 640 | int | 模型的输入图像大小 | 采用32的整数倍,224 = 32*7 以上大小 | +| mining_algorithm | aldd | str | 挖掘算法名称,可选 random, aldd, cald, entropy | 建议单类检测采用aldd,多类检测采用entropy | +| class_distribution_scores | '' | List[float]的字符表示 | aldd算法的类别平衡参数 | 不用更改, 专业用户可根据各类比较调整,如对于4类检测,用 `1.0,1.0,0.1,0.2` 降低后两类的挖掘比重 | +| conf_thres | 0.25 | float | 置信度阈值 | 采用默认值 | +| iou_thres | 0.45 | float | nms时的iou阈值 | 采用默认值 | +| batch_size_per_gpu | 16 | 整数:int | 每张GPU一次处理的图片数量 | 建议大小:显存占用<50% 可增加1倍加快训练速度 | +| num_workers_per_gpu | 4 | 整数:int | 每张GPU对应的数据读取进程数 | - | +| shm_size | 128G | 字符串:str | 受ymir后台处理,docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | +| pin_memory | False | bool | 是否为数据集单独固定内存? | 内存充足时改为True可加快数据集加载 | -## main change log +## 主要改动:main change log - add `start.py` and `ymir/ymir_yolov5.py` for train/infer/mining @@ -89,7 +147,7 @@ docker build -t your/ymir-executor:ymir2.0.0-yolov5-cpu-tmi -f cpu.dockerfile . - other modify support onnx export, not important. -## new features +## 更新功能:new features - 2022/09/08: add aldd active learning algorithm for mining task. [Active Learning for Deep Detection Neural Networks (ICCV 2019)](https://gitlab.com/haghdam/deep_active_learning) - 2022/09/14: support change hyper-parameter `num_workers_per_gpu` diff --git a/det-yolov5-tmi/ymir/img-man/infer-template.yaml b/det-yolov5-tmi/ymir/img-man/infer-template.yaml index 329887a..c19dc74 100644 --- a/det-yolov5-tmi/ymir/img-man/infer-template.yaml +++ b/det-yolov5-tmi/ymir/img-man/infer-template.yaml @@ -13,3 +13,4 @@ iou_thres: 0.45 batch_size_per_gpu: 16 num_workers_per_gpu: 4 pin_memory: False +shm_size: 128G From ba9f08302ff8258be1cb5a3c4ab6588cfa9d2699 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 24 Nov 2022 18:49:00 +0800 Subject: [PATCH 172/204] speed up --- docs/FAQ.md | 4 ++-- docs/README.MD | 2 ++ docs/speedup_apt_pip_docker.md | 36 ++++++++++++++++++++++++++++++++++ 3 files changed, 40 insertions(+), 2 deletions(-) create mode 100644 docs/speedup_apt_pip_docker.md diff --git a/docs/FAQ.md b/docs/FAQ.md index 85e96d7..7267a48 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -21,9 +21,9 @@ - 回到项目根目录或docker file对应根目录,确保docker file 中`COPY/ADD`的文件与文件夹能够访问,以yolov5为例. ``` - cd ymir-executor/det-yolov5-tmi + cd ymir-executor-fork/det-yolov5-tmi - docker build -t ymir-executor/yolov5:cuda111 . -f cuda111.dockerfile + docker build -t ymir-executor/yolov5:cuda111 . -f ymir/docker/cuda111.dockerfile ``` ## 模型精度/速度如何权衡与提升 diff --git a/docs/README.MD b/docs/README.MD index fe4edce..cedb089 100644 --- a/docs/README.MD +++ b/docs/README.MD @@ -47,3 +47,5 @@ - [FAQ](./FAQ.md) - [ymir版本与接口兼容](./ymir-executor-version.md) + +- [加速apt/pip/docker](./speedup_apt_pip_docker.md) diff --git a/docs/speedup_apt_pip_docker.md b/docs/speedup_apt_pip_docker.md new file mode 100644 index 0000000..4eac2ae --- /dev/null +++ b/docs/speedup_apt_pip_docker.md @@ -0,0 +1,36 @@ +# docker 加速 apt + +在 `dockerfile` 中添加如下命令再进行 `apt` 安装 + +``` +# Install linux package +RUN sed -i 's#http://archive.ubuntu.com#https://mirrors.ustc.edu.cn#g' /etc/apt/sources.list \ + && sed -i 's#http://security.ubuntu.com#https://mirrors.ustc.edu.cn#g' /etc/apt/sources.list \ + && apt-get update +``` + +- [ubuntu/debian 加速apt](https://mirrors.tuna.tsinghua.edu.cn/help/ubuntu/) +- [centos 加速yum](https://mirrors.tuna.tsinghua.edu.cn/help/centos/) + +# docker 加速 pip + +在 `dockerfile` 中添加如下命令再进行 `pip` 安装 + +``` +# install ymir-exc sdk +RUN pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple +``` + +- [pip 加速](https://mirrors.tuna.tsinghua.edu.cn/help/pypi/) +- [conda/anaconda 加速](https://mirrors.tuna.tsinghua.edu.cn/help/anaconda/) + + +# docker pull/push 加速 + +以下链接均没测试,欢迎反馈 + +- [南京大学 mirror](https://nju-mirror-help.njuer.org/dockerhub.html) + +- [百度网易阿里 mirror](https://yeasy.gitbook.io/docker_practice/install/mirror) + +- [华为 mirror](https://bbs.huaweicloud.com/blogs/381362) From 33191995a8ccea746645be5d6dbf7fed4569e926 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 30 Nov 2022 18:18:14 +0800 Subject: [PATCH 173/204] update docker ignore for model-store --- det-yolov5-tmi/.dockerignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/det-yolov5-tmi/.dockerignore b/det-yolov5-tmi/.dockerignore index 9f34de6..39f415a 100644 --- a/det-yolov5-tmi/.dockerignore +++ b/det-yolov5-tmi/.dockerignore @@ -11,6 +11,9 @@ data/samples/* **/results*.csv *.jpg +ymir/tensorrt/build +ymir/tensorrt/pt_result +ymir/tensorrt/trt_result # Neural Network weights ----------------------------------------------------------------------------------------------- #**/*.pt **/*.pth From 3cf793694118ef57606a5d01de35cf6707e7650c Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 30 Nov 2022 19:02:49 +0800 Subject: [PATCH 174/204] update yolov5 version --- det-yolov5-tmi/utils/downloads.py | 16 +++++----------- docs/README.MD | 2 ++ docs/import_outer_weight.md | 30 ++++++++++++++++++++++++++++++ 3 files changed, 37 insertions(+), 11 deletions(-) create mode 100644 docs/import_outer_weight.md diff --git a/det-yolov5-tmi/utils/downloads.py b/det-yolov5-tmi/utils/downloads.py index d7b87cb..c71fad2 100644 --- a/det-yolov5-tmi/utils/downloads.py +++ b/det-yolov5-tmi/utils/downloads.py @@ -58,17 +58,11 @@ def attempt_download(file, repo='ultralytics/yolov5'): # from utils.downloads i # GitHub assets file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) - try: - response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api - assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...] - tag = response['tag_name'] # i.e. 'v1.0' - except Exception: # fallback plan - assets = ['yolov5n.pt', 'yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', - 'yolov5n6.pt', 'yolov5s6.pt', 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt'] - try: - tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1] - except Exception: - tag = 'v6.0' # current release + assets = [ + 'yolov5n.pt', 'yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', 'yolov5n6.pt', 'yolov5s6.pt', + 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt' + ] + tag = 'v6.1' if name in assets: safe_download(file, diff --git a/docs/README.MD b/docs/README.MD index cedb089..0d71500 100644 --- a/docs/README.MD +++ b/docs/README.MD @@ -49,3 +49,5 @@ - [ymir版本与接口兼容](./ymir-executor-version.md) - [加速apt/pip/docker](./speedup_apt_pip_docker.md) + +- [导入外部模型权值](./import_outer_weight.md) diff --git a/docs/import_outer_weight.md b/docs/import_outer_weight.md new file mode 100644 index 0000000..6dfa16e --- /dev/null +++ b/docs/import_outer_weight.md @@ -0,0 +1,30 @@ +# 导入外部模型权值 + +## import extra model for yolov5 (ymir2.0.0) + +- create a tar file with weight file `best.pt` and config file `ymir-info.yaml` + +``` +$ tar -cf yolov5_best.tar best.pt ymir-info.yaml +$ cat ymir-info.yaml +best_stage_name: best +executor_config: + class_names: + - dog +package_version: 2.0.0 +stages: + best: + files: + - best.pt + mAP: 0.8349897782446034 + stage_name: best + timestamp: 1669186346 +task_context: + executor: youdaoyzbx/ymir-executor:ymir2.0.0-yolov5-cu111-tmi + mAP: 0.8349897782446034 + producer: ymir + task_parameters: '{"keywords": ["dog"]}' + type: 1 +``` + +![图片](https://user-images.githubusercontent.com/5005182/184783723-1ce48603-1254-4ed9-90ba-c1dd8510dc79.png) From f0746809f5db4db84193e9444360c9e28d85c861 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 3 Jan 2023 09:51:46 +0800 Subject: [PATCH 175/204] update readme for yolov5 --- det-yolov5-tmi/ymir/README.md | 58 +++++++++++++++++------------------ 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/det-yolov5-tmi/ymir/README.md b/det-yolov5-tmi/ymir/README.md index 35998a4..6343675 100644 --- a/det-yolov5-tmi/ymir/README.md +++ b/det-yolov5-tmi/ymir/README.md @@ -43,20 +43,20 @@ docker build -t your/ymir-executor:ymir2.0.0-yolov5-cpu-tmi -f cpu.dockerfile . | 超参数 | 默认值 | 类型 | 说明 | 建议 | | - | - | - | - | - | | hyper-parameter | default value | type | note | advice | -| shm_size | 128G | 字符串:str | 受ymir后台处理,docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | -| export_format | ark:raw | 字符串:str | 受ymir后台处理,ymir数据集导出格式 | - | -| model | yolov5s | 字符串:str | yolov5模型,可选yolov5n, yolov5s, yolov5m, yolov5l等 | 建议:速度快选yolov5n, 精度高选yolov5l, yolov5x, 平衡选yolov5s或yolov5m | -| batch_size_per_gpu | 16 | 整数:int | 每张GPU一次处理的图片数量 | 建议大小:显存占用<50% 可增加2倍加快训练速度 | -| num_workers_per_gpu | 4 | 整数:int | 每张GPU对应的数据读取进程数 | - | -| epochs | 100 | 整数:int | 整个数据集的训练遍历次数 | 建议:必要时分析tensorboard确定是否有必要改变,一般采用默认值即可 | -| img_size | 640 | 整数: int | 输入模型的图像分辨率 | - | -| opset | 11 | 整数: int | onnx 导出参数 opset | 建议:一般不需要用到onnx,不必改 | -| args_options | '--exist-ok' | 字符串:str | yolov5命令行参数 | 建议:专业用户可用yolov5所有命令行参数 | -| save_best_only | True | 布尔: bool | 是否只保存最优模型 | 建议:为节省空间设为True即可 | -| save_period | 10 | 整数: int | 保存模型的间隔 | 建议:当save_best_only为False时,可保存 `epoch/save_period` 个中间结果 -| sync_bn | False | 布尔: bool | 是否同步各gpu上的归一化层 | 建议:开启以提高训练稳定性及精度 | -| activate | '' | 字符串:str | 激活函数,默认为nn.Hardswish(), 参考 [pytorch激活函数](https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) | 可选值: ELU, Hardswish, LeakyReLU, PReLU, ReLU, ReLU6, SiLU, ... | -| ymir_saved_file_patterns | '' | 字符串: str | 用 `,` 分隔的保存文件模式 | 建议:专业用户当希望过滤保存的文件以节省空间时,可设置配置的正则表达式 | +| shm_size | 128G | 字符串| 受ymir后台处理,docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | +| export_format | ark:raw | 字符串| 受ymir后台处理,ymir数据集导出格式 | - | +| model | yolov5s | 字符串 | yolov5模型,可选yolov5n, yolov5s, yolov5m, yolov5l等 | 建议:速度快选yolov5n, 精度高选yolov5l, yolov5x, 平衡选yolov5s或yolov5m | +| batch_size_per_gpu | 16 | 整数 | 每张GPU一次处理的图片数量 | 建议大小:显存占用<50% 可增加2倍加快训练速度 | +| num_workers_per_gpu | 4 | 整数 | 每张GPU对应的数据读取进程数 | - | +| epochs | 100 | 整数 | 整个数据集的训练遍历次数 | 建议:必要时分析tensorboard确定是否有必要改变,一般采用默认值即可 | +| img_size | 640 | 整数 | 输入模型的图像分辨率 | - | +| opset | 11 | 整数 | onnx 导出参数 opset | 建议:一般不需要用到onnx,不必改 | +| args_options | '--exist-ok' | 字符串 | yolov5命令行参数 | 建议:专业用户可用yolov5所有命令行参数 | +| save_best_only | True | 布尔型 | 是否只保存最优模型 | 建议:为节省空间设为True即可 | +| save_period | 10 | 整数 | 保存模型的间隔 | 建议:当save_best_only为False时,可保存 `epoch/save_period` 个中间结果 +| sync_bn | False | 布尔型 | 是否同步各gpu上的归一化层 | 建议:开启以提高训练稳定性及精度 | +| activate | '' | 字符串 | 激活函数,默认为nn.Hardswish(), 参考 [pytorch激活函数](https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) | 可选值: ELU, Hardswish, LeakyReLU, PReLU, ReLU, ReLU6, SiLU, ... | +| ymir_saved_file_patterns | '' | 字符串 | 用 `,` 分隔的保存文件模式 | 建议:专业用户当希望过滤保存的文件以节省空间时,可设置配置的正则表达式 | ### 训练结果文件示例 ``` @@ -89,13 +89,13 @@ docker build -t your/ymir-executor:ymir2.0.0-yolov5-cpu-tmi -f cpu.dockerfile . | 超参数 | 默认值 | 类型 | 说明 | 建议 | | - | - | - | - | - | | hyper-parameter | default value | type | note | advice | -| img_size | 640 | int | 模型的输入图像大小 | 采用32的整数倍,224 = 32*7 以上大小 | -| conf_thres | 0.25 | float | 置信度阈值 | 采用默认值 | -| iou_thres | 0.45 | float | nms时的iou阈值 | 采用默认值 | -| batch_size_per_gpu | 16 | 整数:int | 每张GPU一次处理的图片数量 | 建议大小:显存占用<50% 可增加1倍加快训练速度 | -| num_workers_per_gpu | 4 | 整数:int | 每张GPU对应的数据读取进程数 | - | -| shm_size | 128G | 字符串:str | 受ymir后台处理,docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | -| pin_memory | False | bool | 是否为数据集单独固定内存? | 内存充足时改为True可加快数据集加载 | +| img_size | 640 | 整数 | 模型的输入图像大小 | 采用32的整数倍,224 = 32*7 以上大小 | +| conf_thres | 0.25 | 浮点数 | 置信度阈值 | 采用默认值 | +| iou_thres | 0.45 | 浮点数 | nms时的iou阈值 | 采用默认值 | +| batch_size_per_gpu | 16 | 整数| 每张GPU一次处理的图片数量 | 建议大小:显存占用<50% 可增加1倍加快训练速度 | +| num_workers_per_gpu | 4 | 整数| 每张GPU对应的数据读取进程数 | - | +| shm_size | 128G | 字符串| 受ymir后台处理,docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | +| pin_memory | False | 布尔型 | 是否为数据集单独固定内存? | 内存充足时改为True可加快数据集加载 | --- @@ -109,15 +109,15 @@ docker build -t your/ymir-executor:ymir2.0.0-yolov5-cpu-tmi -f cpu.dockerfile . | 超参数 | 默认值 | 类型 | 说明 | 建议 | | - | - | - | - | - | | hyper-parameter | default value | type | note | advice | -| img_size | 640 | int | 模型的输入图像大小 | 采用32的整数倍,224 = 32*7 以上大小 | -| mining_algorithm | aldd | str | 挖掘算法名称,可选 random, aldd, cald, entropy | 建议单类检测采用aldd,多类检测采用entropy | +| img_size | 640 | 整数 | 模型的输入图像大小 | 采用32的整数倍,224 = 32*7 以上大小 | +| mining_algorithm | aldd | 字符串 | 挖掘算法名称,可选 random, aldd, cald, entropy | 建议单类检测采用aldd,多类检测采用entropy | | class_distribution_scores | '' | List[float]的字符表示 | aldd算法的类别平衡参数 | 不用更改, 专业用户可根据各类比较调整,如对于4类检测,用 `1.0,1.0,0.1,0.2` 降低后两类的挖掘比重 | -| conf_thres | 0.25 | float | 置信度阈值 | 采用默认值 | -| iou_thres | 0.45 | float | nms时的iou阈值 | 采用默认值 | -| batch_size_per_gpu | 16 | 整数:int | 每张GPU一次处理的图片数量 | 建议大小:显存占用<50% 可增加1倍加快训练速度 | -| num_workers_per_gpu | 4 | 整数:int | 每张GPU对应的数据读取进程数 | - | -| shm_size | 128G | 字符串:str | 受ymir后台处理,docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | -| pin_memory | False | bool | 是否为数据集单独固定内存? | 内存充足时改为True可加快数据集加载 | +| conf_thres | 0.25 | 浮点数 | 置信度阈值 | 采用默认值 | +| iou_thres | 0.45 | 浮点数 | nms时的iou阈值 | 采用默认值 | +| batch_size_per_gpu | 16 | 整数 | 每张GPU一次处理的图片数量 | 建议大小:显存占用<50% 可增加1倍加快训练速度 | +| num_workers_per_gpu | 4 | 整数 | 每张GPU对应的数据读取进程数 | - | +| shm_size | 128G | 字符串 | 受ymir后台处理,docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | +| pin_memory | False | 布尔型 | 是否为数据集单独固定内存? | 内存充足时改为True可加快数据集加载 | ## 主要改动:main change log From 645fe9bb2307f96722b5c9f96d8be9ab00165eb6 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 3 Jan 2023 09:56:21 +0800 Subject: [PATCH 176/204] add empty doc --- det-demo-tmi/app/start.py | 44 ++-- det-demo-tmi/requirements.txt | 2 +- docs/cn/README.MD | 3 + docs/cn/docker_images/det-detectron2-tmi.md | 0 docs/cn/docker_images/det-mmdet-tmi.md | 0 docs/cn/docker_images/det-nanodet-tmi.md | 0 docs/cn/docker_images/det-yolov4-tmi.md | 0 docs/cn/docker_images/det-yolov5-tmi.md | 0 docs/cn/docker_images/det-yolov7-tmi.md | 0 docs/ymir-dataset-format.md | 128 ++++++++++ seg-semantic-demo-tmi/Dockerfile | 0 seg-semantic-demo-tmi/README.MD | 3 + seg-semantic-demo-tmi/app/start.py | 227 ++++++++++++++++++ .../img-man/infer-template.yaml | 12 + seg-semantic-demo-tmi/img-man/manifest.yaml | 2 + .../img-man/mining-template.yaml | 12 + .../img-man/training-template.yaml | 18 ++ seg-semantic-demo-tmi/requirements.txt | 4 + 18 files changed, 432 insertions(+), 23 deletions(-) create mode 100644 docs/cn/README.MD create mode 100644 docs/cn/docker_images/det-detectron2-tmi.md create mode 100644 docs/cn/docker_images/det-mmdet-tmi.md create mode 100644 docs/cn/docker_images/det-nanodet-tmi.md create mode 100644 docs/cn/docker_images/det-yolov4-tmi.md create mode 100644 docs/cn/docker_images/det-yolov5-tmi.md create mode 100644 docs/cn/docker_images/det-yolov7-tmi.md create mode 100644 docs/ymir-dataset-format.md create mode 100644 seg-semantic-demo-tmi/Dockerfile create mode 100644 seg-semantic-demo-tmi/README.MD create mode 100644 seg-semantic-demo-tmi/app/start.py create mode 100644 seg-semantic-demo-tmi/img-man/infer-template.yaml create mode 100644 seg-semantic-demo-tmi/img-man/manifest.yaml create mode 100644 seg-semantic-demo-tmi/img-man/mining-template.yaml create mode 100644 seg-semantic-demo-tmi/img-man/training-template.yaml create mode 100644 seg-semantic-demo-tmi/requirements.txt diff --git a/det-demo-tmi/app/start.py b/det-demo-tmi/app/start.py index 2b8e877..81eebf0 100644 --- a/det-demo-tmi/app/start.py +++ b/det-demo-tmi/app/start.py @@ -7,8 +7,8 @@ # view https://github.com/protocolbuffers/protobuf/issues/10051 for detail os.environ.setdefault('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', 'python') -from tensorboardX import SummaryWriter from easydict import EasyDict as edict +from tensorboardX import SummaryWriter from ymir_exc import monitor from ymir_exc import result_writer as rw from ymir_exc.util import get_merged_config @@ -35,19 +35,19 @@ def _run_training(cfg: edict) -> None: 3. how to write logs 4. how to write training result """ - #! use `env.get_executor_config` to get config file for training + # use `env.get_executor_config` to get config file for training gpu_id: str = cfg.param.get(key='gpu_id') class_names: List[str] = cfg.param.get(key='class_names') expected_mAP: float = cfg.param.get(key='expected_map', default=0.6) idle_seconds: float = cfg.param.get(key='idle_seconds', default=60) trigger_crash: bool = cfg.param.get(key='trigger_crash', default=False) - #! use `logging` or `print` to write log to console + # use `logging` or `print` to write log to console # notice that logging.basicConfig is invoked at executor.env logging.info(f'gpu device: {gpu_id}') logging.info(f'dataset class names: {class_names}') logging.info(f"training config: {cfg.param}") - #! count for image and annotation file + # count for image and annotation file with open(cfg.ymir.input.training_index_file, 'r') as fp: lines = fp.readlines() @@ -64,7 +64,7 @@ def _run_training(cfg: edict) -> None: if os.path.isfile(annotation_path): valid_ann_count += 1 - #! use `monitor.write_monitor_logger` to write write task process percent to monitor.txt + # use `monitor.write_monitor_logger` to write write task process percent to monitor.txt if idx % monitor_gap == 0: monitor.write_monitor_logger(percent=0.2 * idx / N) @@ -72,18 +72,18 @@ def _run_training(cfg: edict) -> None: logging.info(f'valid images: {valid_image_count}') logging.info(f'valid annotations: {valid_ann_count}') - #! use `monitor.write_monitor_logger` to write write task process percent to monitor.txt + # use `monitor.write_monitor_logger` to write write task process percent to monitor.txt monitor.write_monitor_logger(percent=0.2) # suppose we have a long time training, and have saved the final model - #! model output dir: os.path.join(cfg.ymir.output.models_dir, your_stage_name) + # model output dir: os.path.join(cfg.ymir.output.models_dir, your_stage_name) stage_dir = os.path.join(cfg.ymir.output.models_dir, 'epoch10') os.makedirs(stage_dir, exist_ok=True) with open(os.path.join(stage_dir, 'epoch10.pt'), 'w') as f: f.write('fake model weight') with open(os.path.join(stage_dir, 'config.py'), 'w') as f: f.write('fake model config file') - #! use `rw.write_model_stage` to save training result + # use `rw.write_model_stage` to save training result rw.write_model_stage(stage_name='epoch10', files=['epoch10.pt', 'config.py'], mAP=random.random() / 2) _dummy_work(idle_seconds=idle_seconds, trigger_crash=trigger_crash) @@ -98,26 +98,26 @@ def _run_training(cfg: edict) -> None: f.write('fake model config file') rw.write_model_stage(stage_name='epoch20', files=['epoch20.pt', 'config.py'], mAP=expected_mAP) - #! if task done, write 100% percent log + # if task done, write 100% percent log logging.info('training done') monitor.write_monitor_logger(percent=1.0) def _run_mining(cfg: edict) -> None: - #! use `cfg.param` to get config file for training + # use `cfg.param` to get config file for training # pretrained models in `cfg.ymir.input.models_dir` gpu_id: str = cfg.param.get(key='gpu_id') class_names: List[str] = cfg.param.get(key='class_names') idle_seconds: float = cfg.param.get('idle_seconds', 60) trigger_crash: bool = cfg.param.get('trigger_crash', False) - #! use `logging` or `print` to write log to console + # use `logging` or `print` to write log to console logging.info(f"mining config: {cfg.param}") logging.info(f'gpu device: {gpu_id}') logging.info(f'dataset class names: {class_names}') - #! use `cfg.input.candidate_index_file` to read candidate dataset items + # use `cfg.input.candidate_index_file` to read candidate dataset items # note that annotations path will be empty str if there's no annotations in that dataset - #! count for image files + # count for image files with open(cfg.ymir.input.candidate_index_file, 'r') as fp: lines = fp.readlines() @@ -128,34 +128,34 @@ def _run_mining(cfg: edict) -> None: valid_image_count += 1 valid_images.append(line.strip()) - #! use `monitor.write_monitor_logger` to write task process to monitor.txt + # use `monitor.write_monitor_logger` to write task process to monitor.txt logging.info(f"assets count: {len(lines)}, valid: {valid_image_count}") monitor.write_monitor_logger(percent=0.2) _dummy_work(idle_seconds=idle_seconds, trigger_crash=trigger_crash) - #! write mining result + # write mining result # here we give a fake score to each assets total_length = len(valid_images) mining_result = [(asset_path, index / total_length) for index, asset_path in enumerate(valid_images)] rw.write_mining_result(mining_result=mining_result) - #! if task done, write 100% percent log + # if task done, write 100% percent log logging.info('mining done') monitor.write_monitor_logger(percent=1.0) def _run_infer(cfg: edict) -> None: - #! use `cfg.param` to get config file for training + # use `cfg.param` to get config file for training # models are transfered in `cfg.ymir.input.models_dir` model_params_path class_names = cfg.param.get('class_names') idle_seconds: float = cfg.param.get('idle_seconds', 60) trigger_crash: bool = cfg.param.get('trigger_crash', False) seed: int = cfg.param.get('seed', 15) - #! use `logging` or `print` to write log to console + # use `logging` or `print` to write log to console logging.info(f"infer config: {cfg.param}") - #! use `cfg.ymir.input.candidate_index_file` to read candidate dataset items + # use `cfg.ymir.input.candidate_index_file` to read candidate dataset items # note that annotations path will be empty str if there's no annotations in that dataset with open(cfg.ymir.input.candidate_index_file, 'r') as fp: lines = fp.readlines() @@ -170,13 +170,13 @@ def _run_infer(cfg: edict) -> None: else: invalid_images.append(line.strip()) - #! use `monitor.write_monitor_logger` to write log to console and write task process percent to monitor.txt + # use `monitor.write_monitor_logger` to write log to console and write task process percent to monitor.txt logging.info(f"assets count: {len(lines)}, valid: {valid_image_count}") monitor.write_monitor_logger(percent=0.2) _dummy_work(idle_seconds=idle_seconds, trigger_crash=trigger_crash) - #! write infer result + # write infer result fake_anns = [] random.seed(seed) for class_name in class_names: @@ -193,7 +193,7 @@ def _run_infer(cfg: edict) -> None: infer_result[asset_path] = [] rw.write_infer_result(infer_result=infer_result) - #! if task done, write 100% percent log + # if task done, write 100% percent log logging.info('infer done') monitor.write_monitor_logger(percent=1.0) diff --git a/det-demo-tmi/requirements.txt b/det-demo-tmi/requirements.txt index 0517cf4..20103d3 100644 --- a/det-demo-tmi/requirements.txt +++ b/det-demo-tmi/requirements.txt @@ -1,4 +1,4 @@ pydantic>=1.8.2 pyyaml>=5.4.1 tensorboardX>=2.4 --e "git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.3.0" +#ymir_exc@git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.3.0 diff --git a/docs/cn/README.MD b/docs/cn/README.MD new file mode 100644 index 0000000..7784a09 --- /dev/null +++ b/docs/cn/README.MD @@ -0,0 +1,3 @@ +# 中文说明文档 + + diff --git a/docs/cn/docker_images/det-detectron2-tmi.md b/docs/cn/docker_images/det-detectron2-tmi.md new file mode 100644 index 0000000..e69de29 diff --git a/docs/cn/docker_images/det-mmdet-tmi.md b/docs/cn/docker_images/det-mmdet-tmi.md new file mode 100644 index 0000000..e69de29 diff --git a/docs/cn/docker_images/det-nanodet-tmi.md b/docs/cn/docker_images/det-nanodet-tmi.md new file mode 100644 index 0000000..e69de29 diff --git a/docs/cn/docker_images/det-yolov4-tmi.md b/docs/cn/docker_images/det-yolov4-tmi.md new file mode 100644 index 0000000..e69de29 diff --git a/docs/cn/docker_images/det-yolov5-tmi.md b/docs/cn/docker_images/det-yolov5-tmi.md new file mode 100644 index 0000000..e69de29 diff --git a/docs/cn/docker_images/det-yolov7-tmi.md b/docs/cn/docker_images/det-yolov7-tmi.md new file mode 100644 index 0000000..e69de29 diff --git a/docs/ymir-dataset-format.md b/docs/ymir-dataset-format.md new file mode 100644 index 0000000..b7da634 --- /dev/null +++ b/docs/ymir-dataset-format.md @@ -0,0 +1,128 @@ +# ymir 镜像数据标注格式 + +本文介绍在算法镜像中,ymir的数据标注格式。 + +| export_format | 算法类型 | 格式说明 | +| - | - | - | +| ark:raw 或 det-ark:raw | 目标检测 | 标注文件为txt | +| voc:raw 或 det-voc:raw | 目标检测 | 标注文件为xml,目标检测默认格式 | +| seg-coco:raw | 图像分割 | 标注文件为json,图像分割默认格式 | + +## 设置修改 + +- 对于训练镜像,用户可以通过设置 `/img-man/training-template.yaml` 中的 `export_format` 字段来控制镜像需要使用数据格式。 + +- 对于推理或挖掘镜像,由于不需要用到标注文件,因此不需要设置数据标注格式 + +- 目录结构 + +``` +/in +├── annotations # 标注文件所在目录 +├── assets # 图像所在目录 +├── config.yaml # 超参数配置文件 +├── env.yaml # ymir环境配置文件 +├── models # 预训练模型权重文件所在目录 +├── train-index.tsv # 训练集索引文件 +└── val-index.tsv # 验证集索引文件 +``` + +- 索引文件格式 + +每行为 `图像绝对路径` + `\t` + `标注文件绝对路径`,用户按行解析索引文件,即可获得所有的标注图像与标注文件。 + +## det-ark:raw + +- 索引文件示例 +``` +/in/assets/train/26681097a3e1194777c8dc7bb946e70d0cbbcec8.jpeg /in/annotations/train/26681097a3e1194777c8dc7bb946e70d0cbbcec8.txt +/in/assets/train/6cf24e81164571c5e5f5f10dc9f51cde13fabb05.jpeg /in/annotations/train/6cf24e81164571c5e5f5f10dc9f51cde13fabb05.txt +/in/assets/train/07b3fb8bd1e36b5edb509b822c1ad86b5863630f.jpeg /in/annotations/train/07b3fb8bd1e36b5edb509b822c1ad86b5863630f.txt +``` + +- 标注文件示例 + +每行为 `class_id` + `xmin` + `ymin` + `xmax` + `ymax`,通过 `,` 进行分隔。 + +``` +0, 122, 7, 372, 375 +1, 211, 147, 325, 255 +``` + +## det-voc:raw + +- 索引文件示例 +``` +/in/assets/train/26681097a3e1194777c8dc7bb946e70d0cbbcec8.jpeg /in/annotations/train/26681097a3e1194777c8dc7bb946e70d0cbbcec8.xml +/in/assets/train/6cf24e81164571c5e5f5f10dc9f51cde13fabb05.jpeg /in/annotations/train/6cf24e81164571c5e5f5f10dc9f51cde13fabb05.xml +/in/assets/train/07b3fb8bd1e36b5edb509b822c1ad86b5863630f.jpeg /in/annotations/train/07b3fb8bd1e36b5edb509b822c1ad86b5863630f.xml +``` + +- 标注文件示例 + +参考voc xml 格式 + +``` + + VOC2012 + 2008_000026.jpg + + The VOC2008 Database + PASCAL VOC2008 + flickr + + + 500 + 375 + 3 + + 0 + + person + Frontal + 1 + 1 + + 122 + 7 + 372 + 375 + + 0 + + + dog + Unspecified + 0 + 1 + + 211 + 147 + 325 + 255 + + 0 + + + +``` + +## seg-coco:raw + +- 索引文件示例 + +其中所有图像文件都对应同一个标注文件 + +``` +/in/assets/train/26681097a3e1194777c8dc7bb946e70d0cbbcec8.jpeg /in/annotations/coco-annotations.json +/in/assets/train/6cf24e81164571c5e5f5f10dc9f51cde13fabb05.jpeg /in/annotations/coco-annotations.json +/in/assets/train/07b3fb8bd1e36b5edb509b822c1ad86b5863630f.jpeg /in/annotations/coco-annotations.json +``` + +- 标注文件示例 + +参考coco格式 + +``` + +``` diff --git a/seg-semantic-demo-tmi/Dockerfile b/seg-semantic-demo-tmi/Dockerfile new file mode 100644 index 0000000..e69de29 diff --git a/seg-semantic-demo-tmi/README.MD b/seg-semantic-demo-tmi/README.MD new file mode 100644 index 0000000..cf47032 --- /dev/null +++ b/seg-semantic-demo-tmi/README.MD @@ -0,0 +1,3 @@ +# ymir 自定义语义分割镜像 + + diff --git a/seg-semantic-demo-tmi/app/start.py b/seg-semantic-demo-tmi/app/start.py new file mode 100644 index 0000000..dd51caa --- /dev/null +++ b/seg-semantic-demo-tmi/app/start.py @@ -0,0 +1,227 @@ +import logging +import os +import random +import sys +import time +from typing import List + +# view https://github.com/protocolbuffers/protobuf/issues/10051 for detail +os.environ.setdefault('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', 'python') +from easydict import EasyDict as edict +from tensorboardX import SummaryWriter +from ymir_exc import monitor +from ymir_exc import result_writer as rw +from ymir_exc.util import get_merged_config + + +def start() -> int: + cfg = get_merged_config() + + if cfg.ymir.run_training: + _run_training(cfg) + if cfg.ymir.run_mining: + _run_mining(cfg) + if cfg.ymir.run_infer: + _run_infer(cfg) + + return 0 + + +def _run_training(cfg: edict) -> None: + """ + sample function of training, which shows: + 1. how to get config file + 2. how to read training and validation datasets + 3. how to write logs + 4. how to write training result + """ + # use `env.get_executor_config` to get config file for training + gpu_id: str = cfg.param.get(key='gpu_id') + class_names: List[str] = cfg.param.get(key='class_names') + expected_miou: float = cfg.param.get(key='expected_miou', default=0.6) + idle_seconds: float = cfg.param.get(key='idle_seconds', default=60) + trigger_crash: bool = cfg.param.get(key='trigger_crash', default=False) + # use `logging` or `print` to write log to console + # notice that logging.basicConfig is invoked at executor.env + logging.info(f'gpu device: {gpu_id}') + logging.info(f'dataset class names: {class_names}') + logging.info(f"training config: {cfg.param}") + + # count for image and annotation file + with open(cfg.ymir.input.training_index_file, 'r') as fp: + lines = fp.readlines() + + valid_image_count = 0 + valid_ann_count = 0 + + N = len(lines) + monitor_gap = max(1, N // 100) + for idx, line in enumerate(lines): + asset_path, annotation_path = line.strip().split() + if os.path.isfile(asset_path): + valid_image_count += 1 + + if os.path.isfile(annotation_path): + valid_ann_count += 1 + + # use `monitor.write_monitor_logger` to write write task process percent to monitor.txt + if idx % monitor_gap == 0: + monitor.write_monitor_logger(percent=0.2 * idx / N) + + logging.info(f'total image-ann pair: {N}') + logging.info(f'valid images: {valid_image_count}') + logging.info(f'valid annotations: {valid_ann_count}') + + # use `monitor.write_monitor_logger` to write write task process percent to monitor.txt + monitor.write_monitor_logger(percent=0.2) + + # suppose we have a long time training, and have saved the final model + # model output dir: os.path.join(cfg.ymir.output.models_dir, your_stage_name) + stage_dir = os.path.join(cfg.ymir.output.models_dir, 'epoch10') + os.makedirs(stage_dir, exist_ok=True) + with open(os.path.join(stage_dir, 'epoch10.pt'), 'w') as f: + f.write('fake model weight') + with open(os.path.join(stage_dir, 'config.py'), 'w') as f: + f.write('fake model config file') + # use `rw.write_model_stage` to save training result + rw.write_model_stage(stage_name='epoch10', + files=['epoch10.pt', 'config.py'], + evaluation_result=dict(mIoU=random.random() / 2)) + + _dummy_work(idle_seconds=idle_seconds, trigger_crash=trigger_crash) + + write_tensorboard_log(cfg.ymir.output.tensorboard_dir) + + stage_dir = os.path.join(cfg.ymir.output.models_dir, 'epoch20') + os.makedirs(stage_dir, exist_ok=True) + with open(os.path.join(stage_dir, 'epoch20.pt'), 'w') as f: + f.write('fake model weight') + with open(os.path.join(stage_dir, 'config.py'), 'w') as f: + f.write('fake model config file') + rw.write_model_stage(stage_name='epoch20', + files=['epoch20.pt', 'config.py'], + evaluation_result=dict(mIoU=expected_miou)) + + # if task done, write 100% percent log + logging.info('training done') + monitor.write_monitor_logger(percent=1.0) + + +def _run_mining(cfg: edict) -> None: + # use `cfg.param` to get config file for training + # pretrained models in `cfg.ymir.input.models_dir` + gpu_id: str = cfg.param.get(key='gpu_id') + class_names: List[str] = cfg.param.get(key='class_names') + idle_seconds: float = cfg.param.get('idle_seconds', 60) + trigger_crash: bool = cfg.param.get('trigger_crash', False) + # use `logging` or `print` to write log to console + logging.info(f"mining config: {cfg.param}") + logging.info(f'gpu device: {gpu_id}') + logging.info(f'dataset class names: {class_names}') + + # use `cfg.input.candidate_index_file` to read candidate dataset items + # note that annotations path will be empty str if there's no annotations in that dataset + # count for image files + with open(cfg.ymir.input.candidate_index_file, 'r') as fp: + lines = fp.readlines() + + valid_images = [] + valid_image_count = 0 + for line in lines: + if os.path.isfile(line.strip()): + valid_image_count += 1 + valid_images.append(line.strip()) + + # use `monitor.write_monitor_logger` to write task process to monitor.txt + logging.info(f"assets count: {len(lines)}, valid: {valid_image_count}") + monitor.write_monitor_logger(percent=0.2) + + _dummy_work(idle_seconds=idle_seconds, trigger_crash=trigger_crash) + + # write mining result + # here we give a fake score to each assets + total_length = len(valid_images) + mining_result = [(asset_path, index / total_length) for index, asset_path in enumerate(valid_images)] + rw.write_mining_result(mining_result=mining_result) + + # if task done, write 100% percent log + logging.info('mining done') + monitor.write_monitor_logger(percent=1.0) + + +def _run_infer(cfg: edict) -> None: + # use `cfg.param` to get config file for training + # models are transfered in `cfg.ymir.input.models_dir` model_params_path + class_names = cfg.param.get('class_names') + idle_seconds: float = cfg.param.get('idle_seconds', 60) + trigger_crash: bool = cfg.param.get('trigger_crash', False) + seed: int = cfg.param.get('seed', 15) + # use `logging` or `print` to write log to console + logging.info(f"infer config: {cfg.param}") + + # use `cfg.ymir.input.candidate_index_file` to read candidate dataset items + # note that annotations path will be empty str if there's no annotations in that dataset + with open(cfg.ymir.input.candidate_index_file, 'r') as fp: + lines = fp.readlines() + + valid_images = [] + invalid_images = [] + valid_image_count = 0 + for line in lines: + if os.path.isfile(line.strip()): + valid_image_count += 1 + valid_images.append(line.strip()) + else: + invalid_images.append(line.strip()) + + # use `monitor.write_monitor_logger` to write log to console and write task process percent to monitor.txt + logging.info(f"assets count: {len(lines)}, valid: {valid_image_count}") + monitor.write_monitor_logger(percent=0.2) + + _dummy_work(idle_seconds=idle_seconds, trigger_crash=trigger_crash) + + # write infer result + fake_anns = [] + random.seed(seed) + for class_name in class_names: + x = random.randint(0, 100) + y = random.randint(0, 100) + w = random.randint(50, 100) + h = random.randint(50, 100) + ann = rw.Annotation(class_name=class_name, score=random.random(), box=rw.Box(x=x, y=y, w=w, h=h)) + + fake_anns.append(ann) + + infer_result = {asset_path: fake_anns for asset_path in valid_images} + for asset_path in invalid_images: + infer_result[asset_path] = [] + rw.write_infer_result(infer_result=infer_result) + + # if task done, write 100% percent log + logging.info('infer done') + monitor.write_monitor_logger(percent=1.0) + + +def _dummy_work(idle_seconds: float, trigger_crash: bool = False, gpu_memory_size: int = 0) -> None: + if idle_seconds > 0: + time.sleep(idle_seconds) + if trigger_crash: + raise RuntimeError('app crashed') + + +def write_tensorboard_log(tensorboard_dir: str) -> None: + tb_log = SummaryWriter(tensorboard_dir) + + total_epoch = 30 + for e in range(total_epoch): + tb_log.add_scalar("fake_loss", 10 / (1 + e), e) + time.sleep(1) + monitor.write_monitor_logger(percent=e / total_epoch) + + +if __name__ == '__main__': + logging.basicConfig(stream=sys.stdout, + format='%(levelname)-8s: [%(asctime)s] %(message)s', + datefmt='%Y%m%d-%H:%M:%S', + level=logging.INFO) + sys.exit(start()) diff --git a/seg-semantic-demo-tmi/img-man/infer-template.yaml b/seg-semantic-demo-tmi/img-man/infer-template.yaml new file mode 100644 index 0000000..67295db --- /dev/null +++ b/seg-semantic-demo-tmi/img-man/infer-template.yaml @@ -0,0 +1,12 @@ +# infer template for your executor app +# after build image, it should at /img-man/infer-template.yaml +# key: gpu_id, task_id, model_params_path, class_names, gpu_count should be preserved + +# gpu_id: '0' +# gpu_count: 1 +# task_id: 'default-infer-task' +# model_params_path: [] +# class_names: [] + +# just for test, remove this key in your own docker image +idle_seconds: 3 # idle seconds for each task diff --git a/seg-semantic-demo-tmi/img-man/manifest.yaml b/seg-semantic-demo-tmi/img-man/manifest.yaml new file mode 100644 index 0000000..633c2b2 --- /dev/null +++ b/seg-semantic-demo-tmi/img-man/manifest.yaml @@ -0,0 +1,2 @@ +# object_type: 2 if this docker image is training, mining or infer for detection, 3 for semantic segmentation, default: 2 +"object_type": 3 diff --git a/seg-semantic-demo-tmi/img-man/mining-template.yaml b/seg-semantic-demo-tmi/img-man/mining-template.yaml new file mode 100644 index 0000000..3eae941 --- /dev/null +++ b/seg-semantic-demo-tmi/img-man/mining-template.yaml @@ -0,0 +1,12 @@ +# mining template for your executor app +# after build image, it should at /img-man/mining-template.yaml +# key: gpu_id, task_id, model_params_path, class_names, gpu_count should be preserved + +# gpu_id: '0' +# gpu_count: 1 +# task_id: 'default-mining-task' +# model_params_path: [] +# class_names: [] + +# just for test, remove this key in your own docker image +idle_seconds: 3 # idle seconds for each task diff --git a/seg-semantic-demo-tmi/img-man/training-template.yaml b/seg-semantic-demo-tmi/img-man/training-template.yaml new file mode 100644 index 0000000..5f2b638 --- /dev/null +++ b/seg-semantic-demo-tmi/img-man/training-template.yaml @@ -0,0 +1,18 @@ +# training template for your executor app +# after build image, it should at /img-man/training-template.yaml +# key: gpu_id, task_id, pretrained_model_paths, class_names, gpu_count should be preserved + +# gpu_id: '0' +# gpu_count: 1 +# task_id: 'default-training-task' +# pretrained_model_params: [] +# class_names: [] + +# format of annotations and images that ymir should provide to this docker container +# annotation format: must be seg-coco +# image format: must be raw +export_format: 'seg-coco:raw' + +# just for test, remove this key in your own docker image +expected_miou: 0.983 # expected mIoU for training task +idle_seconds: 3 # idle seconds for each task diff --git a/seg-semantic-demo-tmi/requirements.txt b/seg-semantic-demo-tmi/requirements.txt new file mode 100644 index 0000000..20103d3 --- /dev/null +++ b/seg-semantic-demo-tmi/requirements.txt @@ -0,0 +1,4 @@ +pydantic>=1.8.2 +pyyaml>=5.4.1 +tensorboardX>=2.4 +#ymir_exc@git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.3.0 From 8de25b9c8de1974ef2154e6108113243f4803adb Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 3 Jan 2023 18:40:08 +0800 Subject: [PATCH 177/204] update doc --- det-mmdetection-tmi/infer-template.yaml | 1 - det-mmdetection-tmi/mining-template.yaml | 2 - det-yolov5-tmi/ymir/README.md | 7 +- docs/cn/docker_images/det-detectron2-tmi.md | 101 ++++++++++++++++++++ docs/cn/docker_images/det-mmdet-tmi.md | 89 +++++++++++++++++ docs/cn/docker_images/det-nanodet-tmi.md | 87 +++++++++++++++++ docs/cn/docker_images/det-vidt-tmi.md | 65 +++++++++++++ docs/cn/docker_images/det-yolov4-tmi.md | 67 +++++++++++++ docs/cn/docker_images/det-yolov5-tmi.md | 74 ++++++++++++++ docs/cn/docker_images/det-yolov7-tmi.md | 76 +++++++++++++++ 10 files changed, 562 insertions(+), 7 deletions(-) create mode 100644 docs/cn/docker_images/det-vidt-tmi.md diff --git a/det-mmdetection-tmi/infer-template.yaml b/det-mmdetection-tmi/infer-template.yaml index de78f9c..80967de 100644 --- a/det-mmdetection-tmi/infer-template.yaml +++ b/det-mmdetection-tmi/infer-template.yaml @@ -1,4 +1,3 @@ shm_size: '128G' -export_format: 'ark:raw' cfg_options: '' conf_threshold: 0.2 diff --git a/det-mmdetection-tmi/mining-template.yaml b/det-mmdetection-tmi/mining-template.yaml index 693463b..543b6c7 100644 --- a/det-mmdetection-tmi/mining-template.yaml +++ b/det-mmdetection-tmi/mining-template.yaml @@ -1,5 +1,3 @@ shm_size: '128G' -export_format: 'ark:raw' -cfg_options: '' mining_algorithm: cald class_distribution_scores: '' # 1.0,1.0,0.1,0.2 diff --git a/det-yolov5-tmi/ymir/README.md b/det-yolov5-tmi/ymir/README.md index 6343675..7ab2d25 100644 --- a/det-yolov5-tmi/ymir/README.md +++ b/det-yolov5-tmi/ymir/README.md @@ -4,11 +4,11 @@ update 2022/11/23 ## build your ymir-executor ``` -docker build -t your/ymir-executor:ymir2.0.0-cuda102-yolov5-tmi -f cuda102.dockerfile . +cd det-yolov5-tmi -docker build -t your/ymir-executor:ymir2.0.0-cuda111-yolov5-tmi -f cuda111.dockerfile . +docker build -t your/ymir-executor:ymir2.0.0-cuda102-yolov5-tmi -f ymir/docker/cuda102.dockerfile . -docker build -t your/ymir-executor:ymir2.0.0-yolov5-cpu-tmi -f cpu.dockerfile . +docker build -t your/ymir-executor:ymir2.0.0-cuda111-yolov5-tmi -f ymir/docker/cuda111.dockerfile . ``` ## 训练: training @@ -56,7 +56,6 @@ docker build -t your/ymir-executor:ymir2.0.0-yolov5-cpu-tmi -f cpu.dockerfile . | save_period | 10 | 整数 | 保存模型的间隔 | 建议:当save_best_only为False时,可保存 `epoch/save_period` 个中间结果 | sync_bn | False | 布尔型 | 是否同步各gpu上的归一化层 | 建议:开启以提高训练稳定性及精度 | | activate | '' | 字符串 | 激活函数,默认为nn.Hardswish(), 参考 [pytorch激活函数](https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) | 可选值: ELU, Hardswish, LeakyReLU, PReLU, ReLU, ReLU6, SiLU, ... | -| ymir_saved_file_patterns | '' | 字符串 | 用 `,` 分隔的保存文件模式 | 建议:专业用户当希望过滤保存的文件以节省空间时,可设置配置的正则表达式 | ### 训练结果文件示例 ``` diff --git a/docs/cn/docker_images/det-detectron2-tmi.md b/docs/cn/docker_images/det-detectron2-tmi.md index e69de29..24386ed 100644 --- a/docs/cn/docker_images/det-detectron2-tmi.md +++ b/docs/cn/docker_images/det-detectron2-tmi.md @@ -0,0 +1,101 @@ +# detectron2 镜像说明文档 + +## 代码仓库 + +> 参考[facebook/detectron2](https://github.com/facebookresearch/detectron2) +- [modelai/ymir-detectron2](https://github.com/modelai/ymir-detectron2) + +## 镜像地址 +``` +youdaoyzbx/ymir-exectutor:ymir2.0.0-detectron2-cu111-tmi +``` + +## 性能表现 + +> 数据参考[detectron2/Model Zoo](https://github.com/facebookresearch/detectron2/blob/main/MODEL_ZOO.md) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Namelr
    sched
    train
    time
    (s/iter)
    inference
    time
    (s/im)
    train
    mem
    (GB)
    box
    AP
    model iddownload
    R501x0.2050.0414.137.4190397773model | metrics
    R503x0.2050.0414.138.7190397829model | metrics
    R1013x0.2910.0545.240.4190397697model | metrics
    + + +## 训练参数 + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| batch_size | 2 | 整数 | batch size 大小 | - | +| config_file | configs/COCO-Detection/retinanet_R_50_FPN_1x.yaml | 文件路径 | 配置文件路径 | 参考 [configs/COCO-Detection](https://github.com/modelai/ymir-detectron2/tree/ymir/configs/COCO-Detection) | +| max_iter | 90000 | 整数 | 最大训练次数 | - | +| learning_rate | 0.001 | 浮点数 | 学习率 | - | +| args_options | '' | 字符串 | 命令行参数 | 参考 [default_argument_parser](https://github.com/modelai/ymir-detectron2/blob/ymir/detectron2/engine/defaults.py) | + +## 推理参数 + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| conf_threshold | 0.2 | 浮点数 | 置信度阈值 | 采用默认值 | + +## 挖掘参数 + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| conf_threshold | 0.2 | 浮点数 | 置信度阈值 | 采用默认值 | + + +## 引用 +``` +@misc{wu2019detectron2, + author = {Yuxin Wu and Alexander Kirillov and Francisco Massa and + Wan-Yen Lo and Ross Girshick}, + title = {Detectron2}, + howpublished = {\url{https://github.com/facebookresearch/detectron2}}, + year = {2019} +} +``` diff --git a/docs/cn/docker_images/det-mmdet-tmi.md b/docs/cn/docker_images/det-mmdet-tmi.md index e69de29..add8a0c 100644 --- a/docs/cn/docker_images/det-mmdet-tmi.md +++ b/docs/cn/docker_images/det-mmdet-tmi.md @@ -0,0 +1,89 @@ +# mmdetection 镜像说明文档 + +## 仓库地址 + +> 参考[mmdetection](https://github.com/open-mmlab/mmdetection) + +- [det-mmdetection-tmi](https://github.com/modelai/ymir-executor-fork/det-mmdetection-tmi) + +## 镜像地址 +``` +youdaoyzbx/ymir-executor:ymir2.0.0-mmdet-cu111-tmi +``` + +## 性能表现 + +> 参考[mmdetection官方数据](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolox/README.md) + +| Backbone | size | Mem (GB) | box AP | Config | Download | +| :--------: | :--: | :------: | :----: | :-------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| YOLOX-tiny | 416 | 3.5 | 32.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox/yolox_tiny_8x8_300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234.log.json) | +| YOLOX-s | 640 | 7.6 | 40.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox/yolox_s_8x8_300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_20211121_095711-4592a793.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_20211121_095711.log.json) | +| YOLOX-l | 640 | 19.9 | 49.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox/yolox_l_8x8_300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236.log.json) | +| YOLOX-x | 640 | 28.1 | 50.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox/yolox_x_8x8_300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254-1ef88d67.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254.log.json) | + +**说明**: + +1. The test score threshold is 0.001, and the box AP indicates the best AP. +2. Due to the need for pre-training weights, we cannot reproduce the performance of the `yolox-nano` model. Please refer to https://github.com/Megvii-BaseDetection/YOLOX/issues/674 for more information. +3. We also trained the model by the official release of YOLOX based on [Megvii-BaseDetection/YOLOX#735](https://github.com/Megvii-BaseDetection/YOLOX/issues/735) with commit ID [38c633](https://github.com/Megvii-BaseDetection/YOLOX/tree/38c633bf176462ee42b110c70e4ffe17b5753208). We found that the best AP of `YOLOX-tiny`, `YOLOX-s`, `YOLOX-l`, and `YOLOX-x` is 31.8, 40.3, 49.2, and 50.9, respectively. The performance is consistent with that of our re-implementation (see Table above) but still has a gap (0.3~0.8 AP) in comparison with the reported performance in their [README](https://github.com/Megvii-BaseDetection/YOLOX/blob/38c633bf176462ee42b110c70e4ffe17b5753208/README.md#benchmark). + + +## 训练参数 + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| config_file | +| shm_size | 128G | 字符串| 受ymir后台处理,docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | +| export_format | ark:raw | 字符串| 受ymir后台处理,ymir数据集导出格式 | - | +| config_file | configs/yolox/yolox_tiny_8x8_300e_coco.py | 文件路径 | mmdetection配置文件 | 建议采用yolox系列, 参考[det-mmdetection-tmi/configs](https://github.com/modelai/ymir-executor-fork/tree/master/det-mmdetection-tmi/configs) | +| samples_per_gpu | 16 | 整数 | 每张GPU一次处理的图片数量 | 建议大小:显存占用<50% 可增加2倍加快训练速度 | +| workers_per_gpu | 4 | 整数 | 每张GPU对应的数据读取进程数 | - | +| max_epochs | 100 | 整数 | 整个数据集的训练遍历次数 | 建议:必要时分析tensorboard确定是否有必要改变,一般采用默认值即可 | +| args_options | '' | 字符串 | 训练命令行参数 | 参考 [det-mmdetection-tmi/tools/train.py](https://github.com/modelai/ymir-executor-fork/blob/master/det-mmdetection-tmi/tools/train.py) +| cfg_options | '' | 字符串 | 训练命令行参数 | 参考 [det-mmdetection-tmi/tools/train.py](https://github.com/modelai/ymir-executor-fork/blob/master/det-mmdetection-tmi/tools/train.py) +| metric | bbox | 字符串 | 模型评测方式 | 采用默认值即可 | +| val_interval | 1 | 整数 | 模型在验证集上评测的周期 | 设置为1,每个epoch可评测一次 | +| max_keep_checkpoints | 1 | 整数 | 最多保存的权重文件数量 | 设置为k, 可保存k个最优权重和k个最新的权重文件,设置为-1可保存所有权重文件。 + +**说明** +1. config_file 可查看[det-mmdetection-tmi/configs](https://github.com/modelai/ymir-executor-fork/tree/master/det-mmdetection-tmi/configs)进行选择 + + +## 推理参数 + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| shm_size | 128G | 字符串| 受ymir后台处理,docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | +| conf_threshold | 0.2 | 浮点数 | 推理结果置信度过滤阈值 | 设置为0可保存所有结果,设置为0.6可过滤大量结果 | +| cfg_options | '' | 字符串 | 训练命令行参数 | 参考 [det-mmdetection-tmi/tools/train.py](https://github.com/modelai/ymir-executor-fork/blob/master/det-mmdetection-tmi/tools/train.py) + +**说明** +1. 由于没有采用批量推理技术,因此没有samples_per_gpu和workers_per_gpu选项 + + +## 挖掘参数 + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| shm_size | 128G | 字符串| 受ymir后台处理,docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | +| mining_algorithm | aldd | 字符串 | 挖掘算法可选 aldd, cald, entropy 和 random | 单类建议采用aldd, 多类检测建议采用entropy | +| cfg_options | '' | 字符串 | 训练命令行参数 | 参考 [det-mmdetection-tmi/tools/train.py](https://github.com/modelai/ymir-executor-fork/blob/master/det-mmdetection-tmi/tools/train.py) + +**说明** +1. class_distribution_scores 一些复杂的参数在此不做说明 +2. 由于没有采用批量推理技术,因此没有samples_per_gpu和workers_per_gpu选项 + +## 论文引用 + +```latex +@article{yolox2021, + title={{YOLOX}: Exceeding YOLO Series in 2021}, + author={Ge, Zheng and Liu, Songtao and Wang, Feng and Li, Zeming and Sun, Jian}, + journal={arXiv preprint arXiv:2107.08430}, + year={2021} +} +``` diff --git a/docs/cn/docker_images/det-nanodet-tmi.md b/docs/cn/docker_images/det-nanodet-tmi.md index e69de29..aa7ff93 100644 --- a/docs/cn/docker_images/det-nanodet-tmi.md +++ b/docs/cn/docker_images/det-nanodet-tmi.md @@ -0,0 +1,87 @@ +# nanodet 镜像说明文档 + +> Super fast and high accuracy lightweight anchor-free object detection model. Real-time on mobile devices. + +## 代码仓库 + +> 参考[RangiLyu/nanodet](https://github.com/RangiLyu/nanodet) +- [modelai/ymir-nanodet](https://github.com/modelai/ymir-nanodet) + +## 镜像地址 +``` +youdaoyzbx/ymir-executor:ymir2.0.0-nanodet-cu111-tmi +``` + +## 性能说明 + +> 参考[RangiLyu/nanodet](https://github.com/RangiLyu/nanodet) + +Model |Resolution| mAPval
    0.5:0.95 |CPU Latency
    (i7-8700) |ARM Latency
    (4xA76) | FLOPS | Params | Model Size +:-------------:|:--------:|:-------:|:--------------------:|:--------------------:|:----------:|:---------:|:-------: +NanoDet-m | 320*320 | 20.6 | **4.98ms** | **10.23ms** | **0.72G** | **0.95M** | **1.8MB(FP16)** | **980KB(INT8)** +**NanoDet-Plus-m** | 320*320 | **27.0** | **5.25ms** | **11.97ms** | **0.9G** | **1.17M** | **2.3MB(FP16)** | **1.2MB(INT8)** +**NanoDet-Plus-m** | 416*416 | **30.4** | **8.32ms** | **19.77ms** | **1.52G** | **1.17M** | **2.3MB(FP16)** | **1.2MB(INT8)** +**NanoDet-Plus-m-1.5x** | 320*320 | **29.9** | **7.21ms** | **15.90ms** | **1.75G** | **2.44M** | **4.7MB(FP16)** | **2.3MB(INT8)** +**NanoDet-Plus-m-1.5x** | 416*416 | **34.1** | **11.50ms** | **25.49ms** | **2.97G** | **2.44M** | **4.7MB(FP16)** | **2.3MB(INT8)** +YOLOv3-Tiny | 416*416 | 16.6 | - | 37.6ms | 5.62G | 8.86M | 33.7MB +YOLOv4-Tiny | 416*416 | 21.7 | - | 32.81ms | 6.96G | 6.06M | 23.0MB +YOLOX-Nano | 416*416 | 25.8 | - | 23.08ms | 1.08G | 0.91M | 1.8MB(FP16) +YOLOv5-n | 640*640 | 28.4 | - | 44.39ms | 4.5G | 1.9M | 3.8MB(FP16) +FBNetV5 | 320*640 | 30.4 | - | - | 1.8G | - | - +MobileDet | 320*320 | 25.6 | - | - | 0.9G | - | - + +***Download pre-trained models and find more models in [Model Zoo](#model-zoo) or in [Release Files](https://github.com/RangiLyu/nanodet/releases)*** + + +## 训练参数 + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| shm_size | 128G | 字符串| 受ymir后台处理,docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | +| export_format | ark:raw | 字符串| 受ymir后台处理,ymir数据集导出格式 | - | +| batch_size_per_gpu | 16 | 整数 | 每张GPU一次处理的图片数量 | 建议大小:显存占用<50% 可增加2倍加快训练速度 | +| workers_per_gpu | 4 | 整数 | 每张GPU对应的数据读取进程数 | - | +| config_file | config/nanodet-plus-m_416.yml | 文件路径 | 配置文件路径 | 参考[config](https://github.com/modelai/ymir-nanodet/tree/ymir-dev/config) | +| epochs | 100 | 整数 | 整个数据集的训练遍历次数 | 建议:必要时分析tensorboard确定是否有必要改变,一般采用默认值即可 | +| input_size | -1 | 整数 | 输入模型的图像分辨率 | -1表示采用config_file中定义的图像大小 | +| learning_rate | -1 | 浮点数 | 学习率 | -1表示采用config_file中定义的学习率 +| resume | False | 布尔型 | 是否继续训练 | 设置为True可实现提前中断与继续训练功能 | +| load_from | '' | 文件路径 | 加载权重位置 | 设置后可加载指定位置的权重文件 | + + +## 推理参数 + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| batch_size_per_gpu | 16 | 整数 | 每张GPU一次处理的图片数量 | 建议大小:显存占用<50% 可增加2倍加快训练速度 | +| workers_per_gpu | 4 | 整数 | 每张GPU对应的数据读取进程数 | - | +| conf_thres | 0.35 | 浮点数 | 置信度阈值 | - | +| pin_memory | False | 布尔型 | 是否为数据集单独固定内存? | 内存充足时改为True可加快数据集加载 | + + +## 挖掘参数 + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| batch_size_per_gpu | 16 | 整数 | 每张GPU一次处理的图片数量 | 建议大小:显存占用<50% 可增加2倍加快训练速度 | +| workers_per_gpu | 4 | 整数 | 每张GPU对应的数据读取进程数 | - | +| conf_thres | 0.35 | 浮点数 | 置信度阈值 | - | +| pin_memory | False | 布尔型 | 是否为数据集单独固定内存? | 内存充足时改为True可加快数据集加载 | + +**说明** +1. nanodet仅支持aldd挖掘算法 + + +## 引用 + +``` +@misc{=nanodet, + title={NanoDet-Plus: Super fast and high accuracy lightweight anchor-free object detection model.}, + author={RangiLyu}, + howpublished = {\url{https://github.com/RangiLyu/nanodet}}, + year={2021} +} +``` diff --git a/docs/cn/docker_images/det-vidt-tmi.md b/docs/cn/docker_images/det-vidt-tmi.md new file mode 100644 index 0000000..9b47750 --- /dev/null +++ b/docs/cn/docker_images/det-vidt-tmi.md @@ -0,0 +1,65 @@ +# vidt 镜像说明文档 + +ICLR 2022的 transformer 架构检测器 + +## 代码仓库 + +> 参考[naver-ai/vidt](https://github.com/naver-ai/vidt) +- [modelai/ymir-vidt](https://github.com/modelai/ymir-vidt) + +## 镜像地址 +``` +youdaoyzbx/ymir-executor:ymir2.0.0-vidt-cu111-tmi +``` + +## 性能表现 + +> 数据参考[naver-ai/vidt](https://github.com/naver-ai/vidt) + +| Backbone | Epochs | AP | AP50 | AP75 | AP_S | AP_M | AP_L | Params | FPS | Checkpoint / Log | +| :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | :-----: | +| `Swin-nano` | 50 (150) | 40.4 (42.6) | 59.9 (62.2) | 43.0 (45.7) | 23.1 (24.9) | 42.8 (45.4) | 55.9 (59.1) | 16M | 20.0 | [Github](https://github.com/naver-ai/vidt/releases/download/v0.1-vidt/vidt_nano_50.pth) / [Log](https://github.com/naver-ai/vidt/releases/download/v0.1-vidt/vidt_nano_50.txt)
    ([Github](https://github.com/naver-ai/vidt/releases/download/v0.1-vidt/vidt_nano_150.pth) / [Log](https://github.com/naver-ai/vidt/releases/download/v0.1-vidt/vidt_nano_150.txt))| +| `Swin-tiny` | 50 (150)| 44.9 (47.2) | 64.7 (66.7) | 48.3 (51.4) | 27.5 (28.4) | 47.9 (50.2) | 61.9 (64.7) | 38M | 17.2 | [Github](https://github.com/naver-ai/vidt/releases/download/v0.1-vidt/vidt_tiny_50.pth) / [Log](https://github.com/naver-ai/vidt/releases/download/v0.1-vidt/vidt_tiny_50.txt)
    ([Github](https://github.com/naver-ai/vidt/releases/download/v0.1-vidt/vidt_tiny_150.pth) / [Log](https://github.com/naver-ai/vidt/releases/download/v0.1-vidt/vidt_tiny_150.txt))| +| `Swin-small` | 50 (150) | 47.4 (48.8) | 67.7 (68.8) | 51.2 (53.0) | 30.4 (30.7) | 50.7 (52.0) | 64.6 (65.9) | 60M | 12.1 | [Github](https://github.com/naver-ai/vidt/releases/download/v0.1-vidt/vidt_small_50.pth) / [Log](https://github.com/naver-ai/vidt/releases/download/v0.1-vidt/vidt_small_50.txt)
    ([Github](https://github.com/naver-ai/vidt/releases/download/v0.1-vidt/vidt_small_150.pth) / [Log](https://github.com/naver-ai/vidt/releases/download/v0.1-vidt/vidt_small_150.txt))| +| `Swin-base` | 50 (150) | 49.4 (50.4) | 69.6 (70.4) | 53.4 (54.8) | 31.6 (34.1) | 52.4 (54.2) | 66.8 (67.4) | 0.1B | 9.0 | [Github](https://github.com/naver-ai/vidt/releases/download/v0.1-vidt/vidt_base_50.pth) / [Log](https://github.com/naver-ai/vidt/releases/download/v0.1-vidt/vidt_base_50.txt)
    ([Github](https://github.com/naver-ai/vidt/releases/download/v0.1-vidt/vidt_base_150.pth) / [Log](https://github.com/naver-ai/vidt/releases/download/v0.1-vidt/vidt_base_150.txt)) | + + +## 训练参数 + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| shm_size | 128G | 字符串| 受ymir后台处理,docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | +| export_format | ark:raw | 字符串| 受ymir后台处理,ymir数据集导出格式 | - | +| backbone_name | swin_nano | 字符串 | 骨架网络,可选swin_nano, swin_tiny, swin_small, swin_base | - | +| batch_size_per_gpu | 16 | 整数 | 每张GPU一次处理的图片数量 | 建议大小:显存占用<50% 可增加2倍加快训练速度 | +| num_workers_per_gpu | 4 | 整数 | 每张GPU对应的数据读取进程数 | - | +| epochs | 50 | 整数 | 整个数据集的训练遍历次数 | 建议:必要时分析tensorboard确定是否有必要改变,一般采用默认值即可 | +| learning_rate | 0.0001 | 浮点数 | 学习率 | - | +| eval_size | 640 | 整数 | 输入网络的图片大小 | - | +| weight_save_interval | 100 | 整数 | 权重文件保存间隔 | - | +| args_options | '' | 字符串 | 命令行参数 | 参考 [get_args_parser](https://github.com/modelai/ymir-vidt/blob/ymir-dev/arguments.py) | + +## 推理参数 + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| conf_threshold | 0.2 | 浮点数 | 置信度阈值 | 采用默认值 | + +## 挖掘参数 + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| conf_threshold | 0.2 | 浮点数 | 置信度阈值 | 采用默认值 | + +## 引用 +``` +@inproceedings{song2022vidt, + title={ViDT: An Efficient and Effective Fully Transformer-based Object Detector}, + author={Song, Hwanjun and Sun, Deqing and Chun, Sanghyuk and Jampani, Varun and Han, Dongyoon and Heo, Byeongho and Kim, Wonjae and Yang, Ming-Hsuan}, + booktitle={International Conference on Learning Representation}, + year={2022} +} +``` diff --git a/docs/cn/docker_images/det-yolov4-tmi.md b/docs/cn/docker_images/det-yolov4-tmi.md index e69de29..6cc88d7 100644 --- a/docs/cn/docker_images/det-yolov4-tmi.md +++ b/docs/cn/docker_images/det-yolov4-tmi.md @@ -0,0 +1,67 @@ +# yolov4 镜像说明文档 + +## 仓库地址 + +> 参考仓库 [AlexeyAB/darknet](https://github.com/AlexeyAB/darknet) +- [det-yolov4-tmi](https://github.com/modelai/ymir-executor-fork/tree/master/det-yolov4-tmi) + +## 镜像地址 +``` +youdaoyzbx/ymir-executor:ymir2.0.0-yolov4-cu112-tmi +``` + +## 性能表现 + +> 参考文档 [yolov4 model zoo](https://github.com/AlexeyAB/darknet/wiki/YOLOv4-model-zoo) + +| model | size | mAP@0.5:0.95 | mAP@0.5 | +| - | - | - | - | +| yolov4 | 608 | 43.5 | 65.7 | +| yolov4-Leaky | 608 | 42.9 | 65.3 | +| yolov4-Mish | 608 | 43.8 | 65.6 | + +## 训练参数 + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| shm_size | 128G | 字符串| 受ymir后台处理,docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | +| export_format | ark:raw | 字符串| 受ymir后台处理,ymir数据集导出格式 | - | +| image_height | 608 | 整数 | 输入网络的图像高度 | 采用 32的整数倍,如416, 512, 608 | +| image_width | 608 | 整数 | 输入网络的图像宽度 | 采用 32的整数倍,如416, 512, 608 | +| learning_rate | 0.0013 | 浮点数 | 学习率 | 采用默认值即可 | +| max_batches | 20000 | 整数 | 训练次数 | 如要减少训练时间,可减少max_batches | +| warmup_iterations | 1000 | 整数 | 预热训练次数 | 采用默认值即可 | +| batch | 64 | 整数 | 累计梯度的批处理大小,即batch size | 采用默认值即可 | +| subdivisions | 64 | 整数 | 累计梯度的次数 | 需要是batch参数的因数,如32。其中64表示一次加载一张图片,累计梯度64次;32表示一次加载两张图片,共累计32次。实际的batch size均为64。| + +**说明** +1. 过于复杂的参数anchors不做说明,保持默认即可 + + +## 推理参数 + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| shm_size | 128G | 字符串| 受ymir后台处理,docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | +| image_height | 608 | 整数 | 输入网络的图像高度 | 采用 32的整数倍,如416, 512, 608 | +| image_width | 608 | 整数 | 输入网络的图像宽度 | 采用 32的整数倍,如416, 512, 608 | +| confidence_thresh | 0.1 | 浮点数 | 置信度阈值 | - | +| nms_thresh | 0.45 | 浮点数 | nms时的iou阈值 | - | +| max_boxes | 50 | 整数 | 每张图像最多检测的目标数量 | - | + +## 挖掘参数 + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| shm_size | 128G | 字符串| 受ymir后台处理,docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | +| data_workers | 28 | 整数 | 读取数据时使用的进程数量 | - | +| strategy | aldd_yolo | 字符串 | 挖掘算法 | - | +| image_height | 608 | 整数 | 输入网络的图像高度 | 采用 32的整数倍,如416, 512, 608 | +| image_width | 608 | 整数 | 输入网络的图像宽度 | 采用 32的整数倍,如416, 512, 608 | +| batch_size | 4 | 整数 | 批处理大小 | - | +| confidence_thresh | 0.1 | 浮点数 | 置信度阈值 | - | +| nms_thresh | 0.45 | 浮点数 | nms时的iou阈值 | - | +| max_boxes | 50 | 整数 | 每张图像最多检测的目标数量 | - | diff --git a/docs/cn/docker_images/det-yolov5-tmi.md b/docs/cn/docker_images/det-yolov5-tmi.md index e69de29..ece0b6a 100644 --- a/docs/cn/docker_images/det-yolov5-tmi.md +++ b/docs/cn/docker_images/det-yolov5-tmi.md @@ -0,0 +1,74 @@ +# yolov5 镜像说明文档 + +## 镜像地址 + +``` +youdaoyzbx/ymir-executor:ymir2.0.0-yolov5-cu111-tmi +youdaoyzbx/ymir-executor:ymir2.0.0-yolov5-cu102-tmi +``` + +## 性能表现 + +|Model |size
    (pixels) |mAPval
    0.5:0.95 |mAPval
    0.5 |Speed
    CPU b1
    (ms) |Speed
    V100 b1
    (ms) |Speed
    V100 b32
    (ms) |params
    (M) |FLOPs
    @640 (B) +|--- |--- |--- |--- |--- |--- |--- |--- |--- +|[YOLOv5n] |640 |28.0 |45.7 |**45** |**6.3**|**0.6**|**1.9**|**4.5** +|[YOLOv5s] |640 |37.4 |56.8 |98 |6.4 |0.9 |7.2 |16.5 +|[YOLOv5m] |640 |45.4 |64.1 |224 |8.2 |1.7 |21.2 |49.0 +|[YOLOv5l] |640 |49.0 |67.3 |430 |10.1 |2.7 |46.5 |109.1 +|[YOLOv5x] |640 |50.7 |68.9 |766 |12.1 |4.8 |86.7 |205.7 +| | | | | | | | | +|[YOLOv5n6] |1280 |36.0 |54.4 |153 |8.1 |2.1 |3.2 |4.6 +|[YOLOv5s6] |1280 |44.8 |63.7 |385 |8.2 |3.6 |16.8 |12.6 +|[YOLOv5m6] |1280 |51.3 |69.3 |887 |11.1 |6.8 |35.7 |50.0 +|[YOLOv5l6] |1280 |53.7 |71.3 |1784 |15.8 |10.5 |76.8 |111.4 + +## 训练参数 + + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| shm_size | 128G | 字符串| 受ymir后台处理,docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | +| export_format | ark:raw | 字符串| 受ymir后台处理,ymir数据集导出格式 | - | +| model | yolov5s | 字符串 | yolov5模型,可选yolov5n, yolov5s, yolov5m, yolov5l等 | 建议:速度快选yolov5n, 精度高选yolov5l, yolov5x, 平衡选yolov5s或yolov5m | +| batch_size_per_gpu | 16 | 整数 | 每张GPU一次处理的图片数量 | 建议大小:显存占用<50% 可增加2倍加快训练速度 | +| num_workers_per_gpu | 4 | 整数 | 每张GPU对应的数据读取进程数 | - | +| epochs | 100 | 整数 | 整个数据集的训练遍历次数 | 建议:必要时分析tensorboard确定是否有必要改变,一般采用默认值即可 | +| img_size | 640 | 整数 | 输入模型的图像分辨率 | - | +| opset | 11 | 整数 | onnx 导出参数 opset | 建议:一般不需要用到onnx,不必改 | +| args_options | '--exist-ok' | 字符串 | yolov5命令行参数 | 建议:专业用户可用yolov5所有命令行参数 | +| save_best_only | True | 布尔型 | 是否只保存最优模型 | 建议:为节省空间设为True即可 | +| save_period | 10 | 整数 | 保存模型的间隔 | 建议:当save_best_only为False时,可保存 `epoch/save_period` 个中间结果 +| sync_bn | False | 布尔型 | 是否同步各gpu上的归一化层 | 建议:开启以提高训练稳定性及精度 | +| activate | '' | 字符串 | 激活函数,默认为nn.Hardswish(), 参考 [pytorch激活函数](https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) | 可选值: ELU, Hardswish, LeakyReLU, PReLU, ReLU, ReLU6, SiLU, ... | + +## 推理参数 + + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| img_size | 640 | 整数 | 模型的输入图像大小 | 采用32的整数倍,224 = 32*7 以上大小 | +| conf_thres | 0.25 | 浮点数 | 置信度阈值 | 采用默认值 | +| iou_thres | 0.45 | 浮点数 | nms时的iou阈值 | 采用默认值 | +| batch_size_per_gpu | 16 | 整数| 每张GPU一次处理的图片数量 | 建议大小:显存占用<50% 可增加1倍加快训练速度 | +| num_workers_per_gpu | 4 | 整数| 每张GPU对应的数据读取进程数 | - | +| shm_size | 128G | 字符串| 受ymir后台处理,docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | +| pin_memory | False | 布尔型 | 是否为数据集单独固定内存? | 内存充足时改为True可加快数据集加载 | + + +## 挖掘参数 + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| img_size | 640 | 整数 | 模型的输入图像大小 | 采用32的整数倍,224 = 32*7 以上大小 | +| mining_algorithm | aldd | 字符串 | 挖掘算法名称,可选 random, aldd, cald, entropy | 建议单类检测采用aldd,多类检测采用entropy | +| class_distribution_scores | '' | List[float]的字符表示 | aldd算法的类别平衡参数 | 不用更改, 专业用户可根据类别占比进行调整,如对于4类检测,用 `1.0,1.0,0.1,0.2` 降低后两类的挖掘比重 | +| conf_thres | 0.25 | 浮点数 | 置信度阈值 | 采用默认值 | +| iou_thres | 0.45 | 浮点数 | nms时的iou阈值 | 采用默认值 | +| batch_size_per_gpu | 16 | 整数 | 每张GPU一次处理的图片数量 | 建议大小:显存占用<50% 可增加1倍加快训练速度 | +| num_workers_per_gpu | 4 | 整数 | 每张GPU对应的数据读取进程数 | - | +| shm_size | 128G | 字符串 | 受ymir后台处理,docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | +| pin_memory | False | 布尔型 | 是否为数据集单独固定内存? | 内存充足时改为True可加快数据集加载 | + diff --git a/docs/cn/docker_images/det-yolov7-tmi.md b/docs/cn/docker_images/det-yolov7-tmi.md index e69de29..8a616d3 100644 --- a/docs/cn/docker_images/det-yolov7-tmi.md +++ b/docs/cn/docker_images/det-yolov7-tmi.md @@ -0,0 +1,76 @@ +# yolov7 镜像说明文档 + +## 代码仓库 + +> 参考[WongKinYiu/yolov7](https://github.com/WongKinYiu/yolov7) +- [modelai/ymir-yolov7](https://github.com/modelai/ymir-yolov7) + +## 镜像地址 + +``` +youdaoyzbx/ymir-executor:ymir2.0.0-yolov7-cu111-tmi +``` + +## 性能表现 + +> 数据参考[WongKinYiu/yolov7](https://github.com/WongKinYiu/yolov7) + +| Model | Test Size | APtest | AP50test | AP75test | batch 1 fps | batch 32 average time | +| :-- | :-: | :-: | :-: | :-: | :-: | :-: | +| [**YOLOv7**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt) | 640 | **51.4%** | **69.7%** | **55.9%** | 161 *fps* | 2.8 *ms* | +| [**YOLOv7-X**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7x.pt) | 640 | **53.1%** | **71.2%** | **57.8%** | 114 *fps* | 4.3 *ms* | +| | | | | | | | +| [**YOLOv7-W6**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-w6.pt) | 1280 | **54.9%** | **72.6%** | **60.1%** | 84 *fps* | 7.6 *ms* | +| [**YOLOv7-E6**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-e6.pt) | 1280 | **56.0%** | **73.5%** | **61.2%** | 56 *fps* | 12.3 *ms* | +| [**YOLOv7-D6**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-d6.pt) | 1280 | **56.6%** | **74.0%** | **61.8%** | 44 *fps* | 15.0 *ms* | +| [**YOLOv7-E6E**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-e6e.pt) | 1280 | **56.8%** | **74.4%** | **62.1%** | 36 *fps* | 18.7 *ms* | + + +## 训练参数 + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| shm_size | 128G | 字符串| 受ymir后台处理,docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | +| export_format | ark:raw | 字符串| 受ymir后台处理,ymir数据集导出格式 | - | +| model | yolov5s | 字符串 | yolov5模型,可选yolov5n, yolov5s, yolov5m, yolov5l等 | 建议:速度快选yolov5n, 精度高选yolov5l, yolov5x, 平衡选yolov5s或yolov5m | +| batch_size_per_gpu | 16 | 整数 | 每张GPU一次处理的图片数量 | 建议大小:显存占用<50% 可增加2倍加快训练速度 | +| workers_per_gpu | 4 | 整数 | 每张GPU对应的数据读取进程数 | - | +| epochs | 100 | 整数 | 整个数据集的训练遍历次数 | 建议:必要时分析tensorboard确定是否有必要改变,一般采用默认值即可 | +| img_size | 640 | 整数 | 输入模型的图像分辨率 | - | +| args_options | '--exist-ok' | 字符串 | yolov5命令行参数 | 建议:专业用户可用yolov5所有命令行参数 | +| save_weight_file_num | 1 | 整数 | 保存最新模型的数量 | - | +| sync_bn | False | 布尔型 | 是否同步各gpu上的归一化层 | 建议:开启以提高训练稳定性及精度 | +| cfg_file | cfg/training/yolov7-tiny.yaml | 文件路径 | 模型文件路径, 对应 `--cfg` | 参考[cfg/training](https://github.com/modelai/ymir-yolov7/tree/ymir/cfg/training) | +| hyp_file | data/hyp.scratch.tiny.yaml | 文件路径 | 超参数文件路径,对应 `--hyp` | 参考[data](https://github.com/modelai/ymir-yolov7/tree/ymir/data) | +| cache_images | True | 布尔 | 是否缓存图像 | 设置为True可加快训练速度 | + + +## 推理参数 + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| img_size | 640 | 整数 | 模型的输入图像大小 | 采用32的整数倍,224 = 32*7 以上大小 | +| conf_thres | 0.25 | 浮点数 | 置信度阈值 | 采用默认值 | +| iou_thres | 0.45 | 浮点数 | nms时的iou阈值 | 采用默认值 | + +## 挖掘参数 + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| shm_size | 128G | 字符串| 受ymir后台处理,docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | +| img_size | 640 | 整数 | 模型的输入图像大小 | 采用32的整数倍,224 = 32*7 以上大小 | +| conf_thres | 0.25 | 浮点数 | 置信度阈值 | 采用默认值 | +| iou_thres | 0.45 | 浮点数 | nms时的iou阈值 | 采用默认值 | + +## 引用 +``` +@article{wang2022yolov7, + title={{YOLOv7}: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors}, + author={Wang, Chien-Yao and Bochkovskiy, Alexey and Liao, Hong-Yuan Mark}, + journal={arXiv preprint arXiv:2207.02696}, + year={2022} +} +``` From 3464c516250b8dd24847901bef578e94bfef6d69 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 4 Jan 2023 10:28:43 +0800 Subject: [PATCH 178/204] update mmdet --- det-mmdetection-tmi/mining-template.yaml | 2 +- det-mmdetection-tmi/start.py | 10 ++++- det-mmdetection-tmi/ymir_infer.py | 53 ++++++++++++++++++------ det-mmdetection-tmi/ymir_mining_aldd.py | 5 ++- det-mmdetection-tmi/ymir_train.py | 8 ++-- docs/cn/docker_images/det-nanodet-tmi.md | 1 + docs/cn/docker_images/det-yolov5-tmi.md | 6 +++ 7 files changed, 66 insertions(+), 19 deletions(-) diff --git a/det-mmdetection-tmi/mining-template.yaml b/det-mmdetection-tmi/mining-template.yaml index 543b6c7..4e05032 100644 --- a/det-mmdetection-tmi/mining-template.yaml +++ b/det-mmdetection-tmi/mining-template.yaml @@ -1,3 +1,3 @@ shm_size: '128G' -mining_algorithm: cald +mining_algorithm: aldd class_distribution_scores: '' # 1.0,1.0,0.1,0.2 diff --git a/det-mmdetection-tmi/start.py b/det-mmdetection-tmi/start.py index 2911ef1..81e2174 100644 --- a/det-mmdetection-tmi/start.py +++ b/det-mmdetection-tmi/start.py @@ -53,7 +53,15 @@ def _run_mining(cfg: edict) -> None: def _run_infer() -> None: - command = 'python3 ymir_infer.py' + gpu_id: str = str(cfg.param.get('gpu_id', '0')) + gpu_count = len(gpu_id.split(',')) + + if gpu_count <= 1: + command = 'python3 ymir_infer.py' + else: + port = find_free_port() + command = f'python3 -m torch.distributed.launch --nproc_per_node {gpu_count} --master_port {port} ymir_infer.py' # noqa + logging.info(f'start infer: {command}') subprocess.run(command.split(), check=True) logging.info("infer finished") diff --git a/det-mmdetection-tmi/ymir_infer.py b/det-mmdetection-tmi/ymir_infer.py index 6d1f1ae..d136f08 100644 --- a/det-mmdetection-tmi/ymir_infer.py +++ b/det-mmdetection-tmi/ymir_infer.py @@ -1,4 +1,5 @@ import argparse +import os import os.path as osp import sys import warnings @@ -6,15 +7,21 @@ import cv2 import numpy as np +import torch.distributed as dist from easydict import EasyDict as edict from mmcv import DictAction -from mmdet.apis import inference_detector, init_detector -from mmdet.utils.util_ymir import get_best_weight_file +from mmcv.runner import init_dist from tqdm import tqdm -from ymir_exc import dataset_reader as dr -from ymir_exc import env from ymir_exc import result_writer as rw -from ymir_exc.util import YmirStage, get_merged_config, write_ymir_monitor_process +from ymir_exc.util import (YmirStage, get_merged_config, write_ymir_monitor_process) + +from mmdet.apis import inference_detector, init_detector +from mmdet.apis.test import collect_results_gpu +from mmdet.utils.util_ymir import get_best_weight_file + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) def parse_option(cfg_options: str) -> dict: @@ -80,8 +87,9 @@ def __init__(self, cfg: edict): cfg_options = parse_option(options) if options else None # current infer can only use one gpu!!! - gpu_ids = cfg.param.get('gpu_id', '0') - gpu_id = gpu_ids.split(',')[0] + # gpu_ids = cfg.param.get('gpu_id', '0') + # gpu_id = gpu_ids.split(',')[0] + gpu_id = max(0, RANK) # build the model from a config file and a checkpoint file self.model = init_detector(config_file, checkpoint_file, device=f'cuda:{gpu_id}', cfg_options=cfg_options) @@ -90,26 +98,47 @@ def infer(self, img): def main(): + if LOCAL_RANK != -1: + init_dist(launcher='pytorch', backend="nccl" if dist.is_nccl_available() else "gloo") + cfg = get_merged_config() - N = dr.items_count(env.DatasetType.CANDIDATE) + with open(cfg.ymir.input.candidate_index_file, 'r') as f: + images = [line.strip() for line in f.readlines()] + + max_barrier_times = len(images) // WORLD_SIZE + if RANK == -1: + N = len(images) + tbar = tqdm(images) + else: + images_rank = images[RANK::WORLD_SIZE] + N = len(images_rank) + if RANK == 0: + tbar = tqdm(images_rank) + else: + tbar = images_rank infer_result = dict() model = YmirModel(cfg) - idx = -1 # write infer result monitor_gap = max(1, N // 100) conf_threshold = float(cfg.param.conf_threshold) - for asset_path, _ in tqdm(dr.item_paths(dataset_type=env.DatasetType.CANDIDATE)): + for idx, asset_path in enumerate(tbar): img = cv2.imread(asset_path) result = model.infer(img) raw_anns = mmdet_result_to_ymir(result, cfg.param.class_names) + # batch-level sync, avoid 30min time-out error + if WORLD_SIZE > 1 and idx < max_barrier_times: + dist.barrier() + infer_result[asset_path] = [ann for ann in raw_anns if ann.score >= conf_threshold] - idx += 1 if idx % monitor_gap == 0: - write_ymir_monitor_process(cfg, task='infer', naive_stage_percent=idx / N, stage = YmirStage.TASK) + write_ymir_monitor_process(cfg, task='infer', naive_stage_percent=idx / N, stage=YmirStage.TASK) + + if WORLD_SIZE > 1: + infer_result = collect_results_gpu(infer_result, len(images)) rw.write_infer_result(infer_result=infer_result) write_ymir_monitor_process(cfg, task='infer', naive_stage_percent=1.0, stage=YmirStage.POSTPROCESS) diff --git a/det-mmdetection-tmi/ymir_mining_aldd.py b/det-mmdetection-tmi/ymir_mining_aldd.py index 51b5c13..59eea4b 100644 --- a/det-mmdetection-tmi/ymir_mining_aldd.py +++ b/det-mmdetection-tmi/ymir_mining_aldd.py @@ -2,12 +2,13 @@ import torch from easydict import EasyDict as edict -from mining_base import ALDDMining from mmcv.parallel import collate, scatter +from ymir_exc.util import get_merged_config + +from mining_base import ALDDMining from mmdet.datasets import replace_ImageToTensor from mmdet.datasets.pipelines import Compose from mmdet.models.detectors import YOLOX -from ymir_exc.util import get_merged_config from ymir_infer import YmirModel from ymir_mining_random import RandomMiner diff --git a/det-mmdetection-tmi/ymir_train.py b/det-mmdetection-tmi/ymir_train.py index b71595f..b06d882 100644 --- a/det-mmdetection-tmi/ymir_train.py +++ b/det-mmdetection-tmi/ymir_train.py @@ -5,9 +5,11 @@ import sys from easydict import EasyDict as edict -from mmdet.utils.util_ymir import get_best_weight_file, write_ymir_training_result -from ymir_exc import monitor -from ymir_exc.util import YmirStage, find_free_port, get_merged_config, write_ymir_monitor_process +from ymir_exc.util import (YmirStage, find_free_port, get_merged_config, + write_ymir_monitor_process) + +from mmdet.utils.util_ymir import (get_best_weight_file, + write_ymir_training_result) def main(cfg: edict) -> int: diff --git a/docs/cn/docker_images/det-nanodet-tmi.md b/docs/cn/docker_images/det-nanodet-tmi.md index aa7ff93..8d4ce81 100644 --- a/docs/cn/docker_images/det-nanodet-tmi.md +++ b/docs/cn/docker_images/det-nanodet-tmi.md @@ -10,6 +10,7 @@ ## 镜像地址 ``` youdaoyzbx/ymir-executor:ymir2.0.0-nanodet-cu111-tmi +youdaoyzbx/ymir-executor:ymir2.0.2-nanodet-cu111-tmi ``` ## 性能说明 diff --git a/docs/cn/docker_images/det-yolov5-tmi.md b/docs/cn/docker_images/det-yolov5-tmi.md index ece0b6a..19a4e27 100644 --- a/docs/cn/docker_images/det-yolov5-tmi.md +++ b/docs/cn/docker_images/det-yolov5-tmi.md @@ -1,5 +1,11 @@ # yolov5 镜像说明文档 + +## 仓库地址 + +> 参考[ultralytics/yolov5](https://github.com/ultralytics/yolov5) +- [modelai/ymir-executor-fork](https://github.com/modelai/ymir-executor-fork/tree/master/det-yolov5-tmi) + ## 镜像地址 ``` From f32e66aba4d7ccd4135ed4be8310596d253c6b22 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 4 Jan 2023 14:53:58 +0800 Subject: [PATCH 179/204] update for ddp infer support --- det-mmdetection-tmi/ymir_infer.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/det-mmdetection-tmi/ymir_infer.py b/det-mmdetection-tmi/ymir_infer.py index d136f08..3458497 100644 --- a/det-mmdetection-tmi/ymir_infer.py +++ b/det-mmdetection-tmi/ymir_infer.py @@ -117,7 +117,7 @@ def main(): tbar = tqdm(images_rank) else: tbar = images_rank - infer_result = dict() + infer_result_list = [] model = YmirModel(cfg) # write infer result @@ -132,16 +132,19 @@ def main(): if WORLD_SIZE > 1 and idx < max_barrier_times: dist.barrier() - infer_result[asset_path] = [ann for ann in raw_anns if ann.score >= conf_threshold] + infer_result_list.append((asset_path, [ann for ann in raw_anns if ann.score >= conf_threshold])) if idx % monitor_gap == 0: write_ymir_monitor_process(cfg, task='infer', naive_stage_percent=idx / N, stage=YmirStage.TASK) if WORLD_SIZE > 1: - infer_result = collect_results_gpu(infer_result, len(images)) + dist.barrier() + infer_result_list = collect_results_gpu(infer_result_list, len(images)) - rw.write_infer_result(infer_result=infer_result) - write_ymir_monitor_process(cfg, task='infer', naive_stage_percent=1.0, stage=YmirStage.POSTPROCESS) + if RANK in [0, -1]: + infer_result_dict = {k: v for k, v in infer_result_list} + rw.write_infer_result(infer_result=infer_result_dict) + write_ymir_monitor_process(cfg, task='infer', naive_stage_percent=1.0, stage=YmirStage.POSTPROCESS) return 0 From d1f719c07f70ca4f4d4985f3acd721fb942c8be8 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 4 Jan 2023 15:14:09 +0800 Subject: [PATCH 180/204] update dock --- README.MD | 2 +- README_zh-CN.MD | 2 +- docs/cn/README.MD | 4 ++++ docs/cn/docker_images/det-mmdet-tmi.md | 1 + 4 files changed, 7 insertions(+), 2 deletions(-) diff --git a/README.MD b/README.MD index f4255b2..1155c50 100644 --- a/README.MD +++ b/README.MD @@ -4,7 +4,7 @@ - [bilibili: video tutorial](https://b23.tv/KS5b5oF) -- [wiki](https://github.com/modelai/ymir-executor-fork/wiki) +- [Image Community](http://pubimg.vesionbook.com:8110/img) search and share open source. - [ymir executor](./docs/official-docker-image.md) diff --git a/README_zh-CN.MD b/README_zh-CN.MD index 214fc06..12ba47e 100644 --- a/README_zh-CN.MD +++ b/README_zh-CN.MD @@ -4,7 +4,7 @@ - [bilibili 视频教程](https://b23.tv/KS5b5oF) -- [说明文档](https://github.com/modelai/ymir-executor-fork/wiki) +- [镜像社区](http://pubimg.vesionbook.com:8110/img) 可搜索到所有公开的ymir算法镜像, 同时可共享其他人发布的镜像。 - [ymir镜像](./docs/official-docker-image.md) diff --git a/docs/cn/README.MD b/docs/cn/README.MD index 7784a09..9283d78 100644 --- a/docs/cn/README.MD +++ b/docs/cn/README.MD @@ -1,3 +1,7 @@ # 中文说明文档 +此处存放中文文档 +## 镜像社区 + +- [镜像社区](http://pubimg.vesionbook.com:8110/img) 可搜索到所有公开的ymir算法镜像, 同时可共享其他人发布的镜像。 diff --git a/docs/cn/docker_images/det-mmdet-tmi.md b/docs/cn/docker_images/det-mmdet-tmi.md index add8a0c..0f3fc35 100644 --- a/docs/cn/docker_images/det-mmdet-tmi.md +++ b/docs/cn/docker_images/det-mmdet-tmi.md @@ -9,6 +9,7 @@ ## 镜像地址 ``` youdaoyzbx/ymir-executor:ymir2.0.0-mmdet-cu111-tmi +youdaoyzbx/ymir-executor:ymir2.0.2-mmdet-cu111-tmi ``` ## 性能表现 From d4ecccace5ab28a32a8f2989341f49328a8d1191 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Tue, 10 Jan 2023 18:28:39 +0800 Subject: [PATCH 181/204] update for det-demo-tmi --- det-demo-tmi/Dockerfile | 12 ++++-- det-demo-tmi/app/start.py | 31 +++++++--------- det-demo-tmi/img-man/infer-template.yaml | 8 ++-- det-demo-tmi/img-man/manifest.yaml | 2 + det-demo-tmi/img-man/mining-template.yaml | 8 ++-- det-demo-tmi/img-man/training-template.yaml | 8 ++-- det-demo-tmi/requirements.txt | 3 +- det-mmdetection-tmi/ymir_infer.py | 9 +++-- .../cn/docker_images/det-yolov5-automl-tmi.md | 37 +++++++++++++++++++ 9 files changed, 79 insertions(+), 39 deletions(-) create mode 100644 det-demo-tmi/img-man/manifest.yaml create mode 100644 docs/cn/docker_images/det-yolov5-automl-tmi.md diff --git a/det-demo-tmi/Dockerfile b/det-demo-tmi/Dockerfile index 9a742a9..0e4918c 100644 --- a/det-demo-tmi/Dockerfile +++ b/det-demo-tmi/Dockerfile @@ -5,21 +5,25 @@ FROM python:3.8.13-alpine # Add bash RUN apk add bash # Required to build numpy wheel -RUN apk add g++ +RUN apk add g++ git COPY requirements.txt ./ -RUN pip3 install -r requirements.txt +RUN pip3 install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple WORKDIR /app # copy user code to WORKDIR COPY ./app/start.py /app/ -# copy user config template to /img-man +# copy user config template and manifest.yaml to /img-man RUN mkdir -p /img-man -COPY img-man/*-template.yaml /img-man/ +COPY img-man/*.yaml /img-man/ + +# view https://github.com/protocolbuffers/protobuf/issues/10051 for detail +ENV PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python # entry point for your app # the whole docker image will be started with `nvidia-docker run ` # and this command will run automatically + RUN echo "python /app/start.py" > /usr/bin/start.sh CMD bash /usr/bin/start.sh diff --git a/det-demo-tmi/app/start.py b/det-demo-tmi/app/start.py index 81eebf0..d961551 100644 --- a/det-demo-tmi/app/start.py +++ b/det-demo-tmi/app/start.py @@ -5,8 +5,6 @@ import time from typing import List -# view https://github.com/protocolbuffers/protobuf/issues/10051 for detail -os.environ.setdefault('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', 'python') from easydict import EasyDict as edict from tensorboardX import SummaryWriter from ymir_exc import monitor @@ -36,11 +34,11 @@ def _run_training(cfg: edict) -> None: 4. how to write training result """ # use `env.get_executor_config` to get config file for training - gpu_id: str = cfg.param.get(key='gpu_id') - class_names: List[str] = cfg.param.get(key='class_names') - expected_mAP: float = cfg.param.get(key='expected_map', default=0.6) - idle_seconds: float = cfg.param.get(key='idle_seconds', default=60) - trigger_crash: bool = cfg.param.get(key='trigger_crash', default=False) + gpu_id: str = cfg.param.get('gpu_id') + class_names: List[str] = cfg.param.get('class_names') + expected_mAP: float = cfg.param.get('expected_map') + idle_seconds: float = cfg.param.get('idle_seconds') + trigger_crash: bool = cfg.param.get('trigger_crash') # use `logging` or `print` to write log to console # notice that logging.basicConfig is invoked at executor.env logging.info(f'gpu device: {gpu_id}') @@ -76,12 +74,11 @@ def _run_training(cfg: edict) -> None: monitor.write_monitor_logger(percent=0.2) # suppose we have a long time training, and have saved the final model - # model output dir: os.path.join(cfg.ymir.output.models_dir, your_stage_name) - stage_dir = os.path.join(cfg.ymir.output.models_dir, 'epoch10') - os.makedirs(stage_dir, exist_ok=True) - with open(os.path.join(stage_dir, 'epoch10.pt'), 'w') as f: + models_dir = cfg.ymir.output.models_dir + os.makedirs(models_dir, exist_ok=True) + with open(os.path.join(models_dir, 'epoch10.pt'), 'w') as f: f.write('fake model weight') - with open(os.path.join(stage_dir, 'config.py'), 'w') as f: + with open(os.path.join(models_dir, 'config.py'), 'w') as f: f.write('fake model config file') # use `rw.write_model_stage` to save training result rw.write_model_stage(stage_name='epoch10', files=['epoch10.pt', 'config.py'], mAP=random.random() / 2) @@ -90,11 +87,9 @@ def _run_training(cfg: edict) -> None: write_tensorboard_log(cfg.ymir.output.tensorboard_dir) - stage_dir = os.path.join(cfg.ymir.output.models_dir, 'epoch20') - os.makedirs(stage_dir, exist_ok=True) - with open(os.path.join(stage_dir, 'epoch20.pt'), 'w') as f: + with open(os.path.join(models_dir, 'epoch20.pt'), 'w') as f: f.write('fake model weight') - with open(os.path.join(stage_dir, 'config.py'), 'w') as f: + with open(os.path.join(models_dir, 'config.py'), 'w') as f: f.write('fake model config file') rw.write_model_stage(stage_name='epoch20', files=['epoch20.pt', 'config.py'], mAP=expected_mAP) @@ -106,8 +101,8 @@ def _run_training(cfg: edict) -> None: def _run_mining(cfg: edict) -> None: # use `cfg.param` to get config file for training # pretrained models in `cfg.ymir.input.models_dir` - gpu_id: str = cfg.param.get(key='gpu_id') - class_names: List[str] = cfg.param.get(key='class_names') + gpu_id: str = cfg.param.get('gpu_id') + class_names: List[str] = cfg.param.get('class_names') idle_seconds: float = cfg.param.get('idle_seconds', 60) trigger_crash: bool = cfg.param.get('trigger_crash', False) # use `logging` or `print` to write log to console diff --git a/det-demo-tmi/img-man/infer-template.yaml b/det-demo-tmi/img-man/infer-template.yaml index b3d45dd..f360cff 100644 --- a/det-demo-tmi/img-man/infer-template.yaml +++ b/det-demo-tmi/img-man/infer-template.yaml @@ -2,10 +2,10 @@ # after build image, it should at /img-man/infer-template.yaml # key: gpu_id, task_id, model_params_path, class_names should be preserved -gpu_id: '0' -task_id: 'default-infer-task' -model_params_path: [] -class_names: [] +# gpu_id: '0' +# task_id: 'default-infer-task' +# model_params_path: [] +# class_names: [] # just for test, remove this key in your own docker image idle_seconds: 3 # idle seconds for each task diff --git a/det-demo-tmi/img-man/manifest.yaml b/det-demo-tmi/img-man/manifest.yaml new file mode 100644 index 0000000..73c21d2 --- /dev/null +++ b/det-demo-tmi/img-man/manifest.yaml @@ -0,0 +1,2 @@ +# object_type: 2 if this docker image is training, mining or infer for detection, 3 for semantic segmentation, default: 2 +"object_type": 2 diff --git a/det-demo-tmi/img-man/mining-template.yaml b/det-demo-tmi/img-man/mining-template.yaml index 5927eca..3e4b3ae 100644 --- a/det-demo-tmi/img-man/mining-template.yaml +++ b/det-demo-tmi/img-man/mining-template.yaml @@ -2,10 +2,10 @@ # after build image, it should at /img-man/mining-template.yaml # key: gpu_id, task_id, model_params_path, class_names should be preserved -gpu_id: '0' -task_id: 'default-mining-task' -model_params_path: [] -class_names: [] +# gpu_id: '0' +# task_id: 'default-mining-task' +# model_params_path: [] +# class_names: [] # just for test, remove this key in your own docker image idle_seconds: 6 # idle seconds for each task diff --git a/det-demo-tmi/img-man/training-template.yaml b/det-demo-tmi/img-man/training-template.yaml index f114648..f72c2b5 100644 --- a/det-demo-tmi/img-man/training-template.yaml +++ b/det-demo-tmi/img-man/training-template.yaml @@ -2,10 +2,10 @@ # after build image, it should at /img-man/training-template.yaml # key: gpu_id, task_id, pretrained_model_paths, class_names should be preserved -gpu_id: '0' -task_id: 'default-training-task' -pretrained_model_params: [] -class_names: [] +# gpu_id: '0' +# task_id: 'default-training-task' +# pretrained_model_params: [] +# class_names: [] export_format: 'det-voc:raw' # just for test, remove this key in your own docker image diff --git a/det-demo-tmi/requirements.txt b/det-demo-tmi/requirements.txt index 20103d3..6719696 100644 --- a/det-demo-tmi/requirements.txt +++ b/det-demo-tmi/requirements.txt @@ -1,4 +1,5 @@ pydantic>=1.8.2 pyyaml>=5.4.1 tensorboardX>=2.4 -#ymir_exc@git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.3.0 +packaging>=23.0 +ymir_exc@git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.3.0 diff --git a/det-mmdetection-tmi/ymir_infer.py b/det-mmdetection-tmi/ymir_infer.py index 3458497..62817ad 100644 --- a/det-mmdetection-tmi/ymir_infer.py +++ b/det-mmdetection-tmi/ymir_infer.py @@ -13,7 +13,8 @@ from mmcv.runner import init_dist from tqdm import tqdm from ymir_exc import result_writer as rw -from ymir_exc.util import (YmirStage, get_merged_config, write_ymir_monitor_process) +from ymir_exc.util import (YmirStage, get_merged_config, + write_ymir_monitor_process) from mmdet.apis import inference_detector, init_detector from mmdet.apis.test import collect_results_gpu @@ -59,9 +60,9 @@ def mmdet_result_to_ymir(results: List[Any], class_names: List[str]) -> List[rw. def get_config_file(cfg): if cfg.ymir.run_training: - model_params_path: List = cfg.param.get('pretrained_model_params', []) + model_params_path: List = cfg.param.get('pretrained_model_params', []) # type: ignore else: - model_params_path: List = cfg.param.get('model_params_path', []) + model_params_path: List = cfg.param.get('model_params_path', []) # type: ignore model_dir = cfg.ymir.input.models_dir config_files = [ @@ -134,7 +135,7 @@ def main(): infer_result_list.append((asset_path, [ann for ann in raw_anns if ann.score >= conf_threshold])) - if idx % monitor_gap == 0: + if idx % monitor_gap == 0 and RANK in [0, -1]: write_ymir_monitor_process(cfg, task='infer', naive_stage_percent=idx / N, stage=YmirStage.TASK) if WORLD_SIZE > 1: diff --git a/docs/cn/docker_images/det-yolov5-automl-tmi.md b/docs/cn/docker_images/det-yolov5-automl-tmi.md new file mode 100644 index 0000000..20ac9c4 --- /dev/null +++ b/docs/cn/docker_images/det-yolov5-automl-tmi.md @@ -0,0 +1,37 @@ +# yolov5 automl 镜像说明文档 + +## 仓库地址 + +> 参考[ultralytics/yolov5](https://github.com/ultralytics/yolov5) +- [modelai/ymir-yolov5](https://github.com/modelai/ymir-yolov5/tree/ymir-automl) + +## 镜像地址 + +``` +youdaoyzbx/ymir-executor:ymir2.0.0-yolov5-cu111-tmi +youdaoyzbx/ymir-executor:ymir2.0.0-yolov5-cu102-tmi +``` + +## 性能表现 + +|Model |size
    (pixels) |mAPval
    0.5:0.95 |mAPval
    0.5 |Speed
    CPU b1
    (ms) |Speed
    V100 b1
    (ms) |Speed
    V100 b32
    (ms) |params
    (M) |FLOPs
    @640 (B) +|--- |--- |--- |--- |--- |--- |--- |--- |--- +|[YOLOv5n] |640 |28.0 |45.7 |**45** |**6.3**|**0.6**|**1.9**|**4.5** +|[YOLOv5s] |640 |37.4 |56.8 |98 |6.4 |0.9 |7.2 |16.5 +|[YOLOv5m] |640 |45.4 |64.1 |224 |8.2 |1.7 |21.2 |49.0 +|[YOLOv5l] |640 |49.0 |67.3 |430 |10.1 |2.7 |46.5 |109.1 +|[YOLOv5x] |640 |50.7 |68.9 |766 |12.1 |4.8 |86.7 |205.7 +| | | | | | | | | +|[YOLOv5n6] |1280 |36.0 |54.4 |153 |8.1 |2.1 |3.2 |4.6 +|[YOLOv5s6] |1280 |44.8 |63.7 |385 |8.2 |3.6 |16.8 |12.6 +|[YOLOv5m6] |1280 |51.3 |69.3 |887 |11.1 |6.8 |35.7 |50.0 +|[YOLOv5l6] |1280 |53.7 |71.3 |1784 |15.8 |10.5 |76.8 |111.4 + +## 训练/推理/挖掘参数 + + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| fast | true | 布尔型 | True表示要求速度快 | True, true, False, false 大写小均支持 | +| accurate | true | 布尔型 | True表示要求精度高 | True, true, False, false 大写小均支持 | From 377f9a10071fbdb47899882e16954cf2b22e3fab Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 11 Jan 2023 18:48:17 +0800 Subject: [PATCH 182/204] add read the docs --- .gitignore | 1 + .readthedocs.yaml | 20 +++++++++ README.MD => README.md | 0 README_zh-CN.MD => README_zh-CN.md | 0 docs/index.md | 1 + docs/requirements.in | 3 ++ docs/requirements.txt | 66 ++++++++++++++++++++++++++++++ mkdocs.yml | 18 ++++++++ pyproject.toml | 8 ++++ 9 files changed, 117 insertions(+) create mode 100644 .readthedocs.yaml rename README.MD => README.md (100%) rename README_zh-CN.MD => README_zh-CN.md (100%) create mode 100644 docs/index.md create mode 100644 docs/requirements.in create mode 100644 docs/requirements.txt create mode 100644 mkdocs.yml create mode 100644 pyproject.toml diff --git a/.gitignore b/.gitignore index 2c245d8..65d1591 100644 --- a/.gitignore +++ b/.gitignore @@ -28,3 +28,4 @@ yolov4_training/build_docker.sh yolov4_training/dockerfile_tmp yolov4_training/yolov4.conv.137 det-demo-tmi/voc_dog +site/ diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 0000000..e2645f9 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,20 @@ +# .readthedocs.yaml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Set the version of Python and other tools you might need +build: + os: ubuntu-22.04 + tools: + python: "3.11" + +mkdocs: + configuration: mkdocs.yml + +# Optionally declare the Python requirements required to build your docs +python: + install: + - requirements: docs/requirements.txt diff --git a/README.MD b/README.md similarity index 100% rename from README.MD rename to README.md diff --git a/README_zh-CN.MD b/README_zh-CN.md similarity index 100% rename from README_zh-CN.MD rename to README_zh-CN.md diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 0000000..4e6ae29 --- /dev/null +++ b/docs/index.md @@ -0,0 +1 @@ +{!docs/README.MD!} diff --git a/docs/requirements.in b/docs/requirements.in new file mode 100644 index 0000000..bec300c --- /dev/null +++ b/docs/requirements.in @@ -0,0 +1,3 @@ +mkdocs +mkdocstrings[python] +markdown-include diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 0000000..f6cb652 --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,66 @@ +# +# This file is autogenerated by pip-compile with python 3.10 +# To update, run: +# +# pip-compile docs/requirements.in +# +click==8.1.3 + # via mkdocs +ghp-import==2.1.0 + # via mkdocs +griffe==0.22.0 + # via mkdocstrings-python +importlib-metadata==4.12.0 + # via mkdocs +jinja2==3.1.2 + # via + # mkdocs + # mkdocstrings +markdown==3.3.7 + # via + # markdown-include + # mkdocs + # mkdocs-autorefs + # mkdocstrings + # pymdown-extensions +markdown-include==0.6.0 + # via -r docs/requirements.in +markupsafe==2.1.1 + # via + # jinja2 + # mkdocstrings +mergedeep==1.3.4 + # via mkdocs +mkdocs==1.3.0 + # via + # -r docs/requirements.in + # mkdocs-autorefs + # mkdocstrings +mkdocs-autorefs==0.4.1 + # via mkdocstrings +mkdocstrings[python]==0.19.0 + # via + # -r docs/requirements.in + # mkdocstrings-python +mkdocstrings-python==0.7.1 + # via mkdocstrings +packaging==21.3 + # via mkdocs +pymdown-extensions==9.5 + # via mkdocstrings +pyparsing==3.0.9 + # via packaging +python-dateutil==2.8.2 + # via ghp-import +pyyaml==6.0 + # via + # mkdocs + # pyyaml-env-tag +pyyaml-env-tag==0.1 + # via mkdocs +six==1.16.0 + # via python-dateutil +watchdog==2.1.9 + # via mkdocs +zipp==3.8.0 + # via importlib-metadata diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 0000000..11b5000 --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,18 @@ +site_name: Ymir-Executor Documence +theme: + name: readthedocs + highlightjs: true +plugins: + - search + - mkdocstrings: + handlers: + # See: https://mkdocstrings.github.io/python/usage/ + python: + options: + docstring_style: sphinx +markdown_extensions: + - markdown_include.include: + base_path: . + - admonition +nav: + - Home: index.md diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..14a2dda --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,8 @@ +[build-system] +requires = ["flit_core >=3.2,<4"] +build-backend = "flit_core.buildapi" + +[project] +name = "lumache" +authors = [{name = "Graziella", email = "graziella@lumache"}] +dynamic = ["version", "description"] From 2fbcfa8636ac34a195e0fc169c7098b6be757c7e Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 11 Jan 2023 19:05:24 +0800 Subject: [PATCH 183/204] first read the docs --- mkdocs.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mkdocs.yml b/mkdocs.yml index 11b5000..f2f223e 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -14,5 +14,5 @@ markdown_extensions: - markdown_include.include: base_path: . - admonition -nav: - - Home: index.md +# nav: +# - Home: index.md From 1d1527fdb54b048d2b265a838c8e015f5cbb1275 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 13 Jan 2023 16:47:53 +0800 Subject: [PATCH 184/204] add mkdocs --- docs/README.MD | 62 +++++- docs/imgs/training-hyper-parameter-web.png | Bin 0 -> 19402 bytes docs/object_detection/simple_det_infer.md | 0 docs/object_detection/simple_det_mining.md | 0 docs/object_detection/simple_det_training.md | 0 docs/overview/dataset-format.md | 83 +++++++ docs/overview/framework.md | 63 ++++++ docs/overview/hyper-parameter.md | 203 ++++++++++++++++++ docs/overview/ymir-executor.md | 145 +++++++++++++ docs/sample_files/in_config.md | 20 ++ docs/sample_files/in_env.md | 27 +++ .../simple_semantic_seg_infer.md | 86 ++++++++ .../simple_semantic_seg_mining.md | 86 ++++++++ .../simple_semantic_seg_training.md | 93 ++++++++ .../test_semantic_seg.md | 80 +++++++ docs/ymir-dataset-zh-CN.md | 10 +- mkdocs.yml | 7 +- seg-semantic-demo-tmi/Dockerfile | 42 ++++ .../app/pycococreatortools.py | 143 ++++++++++++ seg-semantic-demo-tmi/app/result_to_coco.py | 105 +++++++++ seg-semantic-demo-tmi/app/start.py | 72 ++++--- seg-semantic-demo-tmi/fast.Dockerfile | 19 ++ seg-semantic-demo-tmi/requirements.txt | 9 +- 23 files changed, 1310 insertions(+), 45 deletions(-) create mode 100644 docs/imgs/training-hyper-parameter-web.png create mode 100644 docs/object_detection/simple_det_infer.md create mode 100644 docs/object_detection/simple_det_mining.md create mode 100644 docs/object_detection/simple_det_training.md create mode 100644 docs/overview/dataset-format.md create mode 100644 docs/overview/framework.md create mode 100644 docs/overview/hyper-parameter.md create mode 100644 docs/overview/ymir-executor.md create mode 100644 docs/sample_files/in_config.md create mode 100644 docs/sample_files/in_env.md create mode 100644 docs/semantic_segmentation/simple_semantic_seg_infer.md create mode 100644 docs/semantic_segmentation/simple_semantic_seg_mining.md create mode 100644 docs/semantic_segmentation/simple_semantic_seg_training.md create mode 100644 docs/semantic_segmentation/test_semantic_seg.md create mode 100644 seg-semantic-demo-tmi/app/pycococreatortools.py create mode 100644 seg-semantic-demo-tmi/app/result_to_coco.py create mode 100644 seg-semantic-demo-tmi/fast.Dockerfile diff --git a/docs/README.MD b/docs/README.MD index 0d71500..9c2c826 100644 --- a/docs/README.MD +++ b/docs/README.MD @@ -1,16 +1,60 @@ -# ymir-executor 文档 +# ymir镜像文档 -## 下载使用 +## 简介 -- [已有的镜像](./official-docker-image.md) +- [ymir简介](https://github.com/IndustryEssentials/ymir/wiki/%E6%93%8D%E4%BD%9C%E8%AF%B4%E6%98%8E) -- [挖掘算法评测](./mining-images-overview.md) 完善中。。。 +- ymir镜像: 为ymir提供模型训练,推理,挖掘服务的镜像 + +## 快速使用 + +- [安装ymir](https://github.com/IndustryEssentials/ymir/blob/master/README_zh-CN.md#2-%E5%AE%89%E8%A3%85) + +- [ymir操作说明](https://github.com/IndustryEssentials/ymir/wiki/%E6%93%8D%E4%BD%9C%E8%AF%B4%E6%98%8E) + +- [镜像社区](http://pubimg.vesionbook.com:8110/img) 下载、发布ymir镜像 + +- [历史镜像](./official-docker-image.md) + +- [挖掘算法评测](./mining-images-overview.md) + +## Ymir镜像说明 + +- [Ymir镜像整体流程](./overview/framework.md) + +- [Ymir镜像数据集格式](./overview/dataset-format.md) + +- [Ymir镜像超参数](./overview/hyper-parameter.md) + +## 目标检测 + +- [制作一个简单的检测训练镜像]() + +- [制作一个简单的检测推理镜像]() + +- [制作一个简单的检测挖掘镜像]() + +## 语义分割 + +- [制作一个简单的语义分割训练镜像]() + +- [制作一个简单的语义分割推理镜像]() + +- [制作一个简单的语义分割挖掘镜像]() + +## 实例分割 + +- [制作一个简单的实例分割训练镜像]() + +- [制作一个简单的实例分割推理镜像]() + +- [制作一个简单的实例分割挖掘镜像]() ## 从零定制 -1. [制作一个简单的镜像](../det-demo-tmi/README.md) +- [制作一个简单的多功能检测镜像](../det-demo-tmi/README.md) -2. [了解ymir接口与数据结构](./ymir-dataset-zh-CN.md) +- [了解ymir接口与数据结构](./ymir-dataset-zh-CN.md) ## 基于已有镜像进行定制 @@ -26,7 +70,7 @@ - [通过ymir-executor-verifier进行镜像校验](https://github.com/modelai/ymir-executor-verifier) -## 其它 +## 生态环境 - [ymir镜像开发SDK](https://github.com/modelai/ymir-executor-sdk) @@ -44,8 +88,12 @@ - 批量校验镜像 +## FAQ + - [FAQ](./FAQ.md) +## 其它 + - [ymir版本与接口兼容](./ymir-executor-version.md) - [加速apt/pip/docker](./speedup_apt_pip_docker.md) diff --git a/docs/imgs/training-hyper-parameter-web.png b/docs/imgs/training-hyper-parameter-web.png new file mode 100644 index 0000000000000000000000000000000000000000..f59945399bd660874f1efa0a1e8d06104c03bf49 GIT binary patch literal 19402 zcmeHv30PCtx^}E>_4Fcia0m!VJr1ZHY&ANgt8w&p7|{red`%Fkg-#1wFA#O9-znbxl!e-M3U z{rY495^w#%(KEYktYbrL{`xOn@U!J+Wo7Gz(+k#vKM&hn=@T!#PB8Mv-#oqfkHkx3 zKR#V}<{omX^}^E&dKawvA9ShUppD5wHSWK086FsbR0fL%Ewv1@)Lh(Vl&kg;CKh4GRL2CC~Idwa-;{m&&=j#qc)MS zJyj|@oZzwGrh~uGqS2ecCkh=rNsN5VQ7Uybr!qjmFcGfe9w)psAi8rg^mEjLx}OGHklBWsk$J94RDvl5lS_5N(`vV@#_mWmp$&H!7k_~p^niD?-)^cc5D#-arhpIl#Cj$RJ09g0=4 z#E@JFk7flCpJyuUsUMY7nEos2W~Gr(K{M8Xe@u|jnMD|d(NZB5Yt9s!iLm;z{EoSt zf?~lnT9t4Si=g8u+(U3OaU89~ESI^X4Wk4uJ6w|7VH%|vJGF)wS+;@fZ!tpz7xWn+ zi-#SR&Cec|?He)Zx=o5$bGK0#g_{Uwh7Vnjf2xQX=3(CKF%3MfpQj52F(SHejwkMC zo;eo9I$wZ^6YCxE@VuxSj-79&`&Xr==M(zSuv&#PJ4h1J^W{I?FqtU!Gt>?rXmwFS zLvP6-Rw$83g@jIpS}Lhi8+$Boz9$r49&o%w60(y`q~&8#k2_!IMeo$f+IgstNDOA3 zhgY+XBHg27wrweMa^SE(Ae7E^bz#x4@y5#!E5(^|dBE|_>4Nuyg=}A{UBLi+LY&;; z;?ILy81yPpgHpotd{@u@I>pR6p^5orN#kQtgM=nbX_uLe(sY?N{nRR%cB|%ceWNDG zBHhSmYt$2y-xPGu8IQ}H`UGovZPba~-`=bf+Te!c4%nO{Ky6Vrbw3Twg&I06pEL~& zz%Ek74~YurH`LzQoqYpe9Uk@zUu^zc$UC>?W9!<}5 zqt3sPG~>9c=#yy$VexW8uGo-8d~T=dEP@((1B72vR>eU@ug8W}`B(XuSH1c3)$BdL zj8p^>Wy5FK1W|ju8}F97nJ8qi31~xik}cC+?d^!pLAlY*da+8@H7q00ZelU?ieS5@ zK-!wdCS=kPSwNc$kO^ zgCz!7*Ke$ z`F?|=9|y-LKQ5Ljn{l~ATv`;ZM!E=U@Mtk(VM9x^<>sakwM^`5aX&SdpS(&yD&b5M zjBe42GdZabs-OlJJdt_abEls8v|0PC;c?1qM+~%5tk&&?JvN%S)LXONd$K;fqHAG@20jq$`T^%R4BEpiJ=xn zi|KJEwNu<74-}54$f8yr@p*dV;1#oftGZuFvx_st#T# zQs+40UC@tg+0f{|=g%R2?7Flq>Ym#PvAf`yYfh`R!vbO$b_FhyOfiXeyHktrm#B@x zY?*&7&*?pQZKY*hM7u&}F^{0yBTbEjk>HP(=aGg=EFyBT4wf! zux;ujw{(AKC8n$zimf}GsH_lIdn6Dq)etd12Gf>ml&mzG(`xlV4|2}@5chAWQ8CXV zAU@Tm&d@YUjWQe7f>6~c4f3a%^IZMA;#+yfWaYG$up}GPpq;ZR5*Ewa;Ld&#Tccx# z8$yby0TKfx&mcX}TC3q2wcn17_~Cw6X8wDYbn)eYlg#YVA;sOIb!wy?DgDA3c2lk5 zIlX3D$||>MPt??5;d|;Mtbvh$H{5h=TRx_?ni9qMw!?V|$tG=^L3lOUXHjP$%Hp-r zkKoVaaG^-gb+&$j&;{x57&ww+`^{8=W0kRzgFh(9tik1&&tyn8G4h3`SeRRR-IRJ} z(Jjxi&7+UY)RsZPHcq3B#Jff&#hVS|Gw*e($AmE=U4%l*CHLlZjBB>z`l2do{}W8+ zo&7kk9)ttyCghASn)b*S*I}%qIK56vc{fnGbXxP%tnrE+*G73XiFMYq7XFV%KxXl5 zE8u(b`d^Iz|4WF-?4nr=w1ps3A8bhiAoI!Mi-LEod8qKg__u>OthBWMux9e5b;!36 zXU9=t_6Nkph|eC31@K@oBOmeMlCcOD-dsh04RK){TJ}8n|N9>qhb|FXo^?HuRmSZ-j$mtONPik^@_l1!!IVH z9ME2NwAw8h>-Q%bEr2cg6=2!)R>k zP;HmMI43_kg{kgH2hq7qff~%D9C^iX&sCFX$qFdaq&l6^*koD%KuTG)^>iV$C56*L zC7=qPW7#(85Er*(l`gA-NMTul{sxsdy{F-EX9jJquWnP|zPG>22!t=Qf-R{_`h+bD z*FPLQgtNaD5M3<(s?215KZ-M2v&A~vLo$q;;PHd}1uG!;%_i9@#cl`tk1p(=MW|9C z+o*pmTrOl5F~L-KH!<^#6aY!(dVK|tkYYhe5pUp)-Er`lQirgIvQ;G%q}mdneYWPD^!UDamFQEQ%y2V+k<-!}wZt z?_?C>M;To?mlq2)CkN+p^wFn#I2PQ;CP`Edhq(T9MsRMCHhIwJ09RI|xXI%$FZTER z+RvmFnYxm{G3$x@CxYRp*%ZiJVyydo7psj_alWzPj|cO*EQoFiDMh6xltvn9*8tgx zNmFx+>L05-X-h#g4xbhrv8kIIcc~`OA7!LE3LcwEfeux(}3}0L-Bp*sW zTbX+&EXda7bdQULwZ%=RYf~t&?!Z%Uls`(w4FE#FD* zIpruEy8`73Qr&(={=-3#^KOrL%wbIo{~#H>*iFezgRHY-5B3WWy!a7YUrii;s;xUi zz>Do@MHj}pO-0D1%U!^WYphsJ)_7dL%g6*TTA-VWwbmSpp>L?E0WXyDT|Q$2N8&GZ zgUxW%XfnfUDFHBJ_)!2Zcy^||A~ohWBTKMZ^C}Y#}IU)Qu>hLdn z23t##EoPIxI&DrVUl6zLQ%5`-OG!G?do|OA5XJT($8?k)5EnC3X0dAIf#P@aa5So% ze72+OLcom&F_UJ_(^bMbq))T|L6ZIp}uEmfkkWrxe&|&HUT$V_y=kk*c zQ8C-!I7&-W(zxpn8>A zwuV*M5ucAueISOudB$;!TLm{@#wnEsxlp?2hS0J#n%$@jNY7V=iw?KpXbg@j`CDc) zaV}mPn5Uw-xZQ1iOkL$BYH<9pNYoJTvHI+5D^vB*vjXH=nn@?A>556NW!*fBGYYSP|r>7p-Hk-KD_VgFTupoFO+LOeRjwrHk zZMxD|0%r0Xrg=&s(g!eo@=a5t5p4L~n@vUdqBcWd{!d7+u@&tYYm>ef13`;L)x1vD z_Ye}Fz^8iCMcw-HHYw%Zt+7MfMGcM4-hqxJsNNh{XzXpEezuza+RCB;Hdf`hPQJbU z0AXSY^<)vlY#5lup(fn7O}5{fD#^z&aS|n@8(tk1u68{ltl$NEg|hb8)Xj^%(o#%Z zLKmBRJDJx4IZM``W(MURlkci^vu2@CHdr2rCk^8{ zYcT?`(s?{N0qL~%{&*I84S|TY0z8oQ5Qy8k|LZQ*+vCOc^%XgFp|D;RwBCQIK-3kk zYN!{BDW(ml-2$U+nX#Bh*2lI^!RVp^Im9~6+{(;Ioh>FuM@Ju-fw(kN@r0Nwll*Lr z*kh1Zgw1&lV3I0^cY2o*eVh@8Y5#;e@ZVoMljUfoP)4&*;2IZU1Z||Z2;_)%{b#=! zS(X@Vt|-P2l;~vxr7kb3l~>2rZJY|15MC?Dv-joW+3itL09W0~uLA3j^{9BBTN>Oy zxr1^aAVD(2n^+r^X)jtFq8NE4}? zkW7TGCE1=E3e7)fwl2;_%dZ zBvvXX2X#8Ld}-#?N@5q+Dx+okGp9{If^@3f;sa4|!S-T_K#|G| zp#_=q+D?_y>Uoapa!Nl|9jpvn0xIiAqUWv5Z=-MWQC&%lEYt8~0z0YydHG05DJ{Y= z4MyCa_n(Uj$oXYx-Fc)gX1L}Zx2T#^D-ZeKSvD|7JcrA@6PE0QK4XYvsa+R~;Bc87 zAHwlPm5c4GODY3G)Zv6!wxRe)*;!24oMWLU*WN(S(^kX}=dRMVhQK??o_Ve!aXcbY zJq;nd0vP#XzXk#*IX5cXQCi|)sukcpa7?yI-@@TIy#b$Ec@i#@6gX~?dlvcKrRGMY z-g}-;e2yn@gx4(;QHh=`PqHY^f-+kq`5f%8bq9h@t}R9T$SW!X;3eWEfh@kVeD5ej zkv222W7y`T2|4nuL`eeBeKVhq87d3<8JW%gB|4(>m~EdC)~sLojESAziMb0JPwP zI6NKTuQt!)qLNCnVDf$?;C^&|1z6eq_$T2Uh_V;akABpG3QxTob|{xIXx6lwN-C(8 z3_XQ!t_UWE&CJvqVNmOd3H*fX1q&JZ^F$)#!r0b`X%8;XyTM;hO@|ec(Vnbyly}ng zx1rcwGon4CrXD4pKp!LdH&*i0ED?9&<{PV#w@9Y97k|I=Q!CE9Gg+uM140$BvqHxn zK0C(sU%=DheZRrHweLco^)|gyDMg?C-8dN_{acSCDijLE8F_l)otXrpraV4OlRIb5 zoNW@s(GAhISdo#kd@^5`mX_9#y1Gc^3!RF_OJ>cfBYmp@e~wOjg%f}>=a@YX#Qzxt z<)1@){(+wg7|-*^j~~aEK8}zHC?E8GGL9PBjK-TB!ur#pOY47%UJ*JLAut8mvb&Vg z;VlnOJ!l+k=<;+~v7)$m3gWvi#}tMZ-U*1^(N!=bU&Cz+v`*3(6=Nt~M+}Jn61n;7 zi`w77v3d-!T1cW9yE9b*I8)pXP=Zhoels$yaj5rE`a5MV%xa3hdxC=b=rKcgQthPM zI3b%*%4AJf^)uDOhdb{eN1ok3bM;eMPj^jbrnp%@YSdmWKpal2qBmHp=mgFu+E^*x z&{ZThosHN^2K-C+#0Uz94CyD2Y0r=tOsB}NUTRg`zH8IC(#1IQFcI#l%K!`yP}2z z;=B%rd2;`jF}@d>O!lP3@%g?(#OKcift>dwT5PmuNWuc9LaXQe0Rcbw^L*$erp)Ps z*g{4?oJfasjeWlk=1SlaSDK zvwC|^>_#UN8qI#Ku4V`0f)U7-V|Ss)pN&X`sNZaiws_3e-y~VQqA3fQ-S20MzZXya z{VKKWpw0IVt_3N~%W{*P_zO@5`bx^rV%)Glox4Na#r()7Gw|4c@54QaBsN^Zc~97* z+;~C}4mCTjD{$O8E+lj_a^*`)RNkrdQ^bk0>=fmGf4q#x(?RwA=X- z{)w1otfBj1c7~ctDrw0neUE-nX)f{ke0Et=h8w-d8B^pZs`O2pcViZ}Jif#InetiD z$ARr!N(4NA$av!6HT32wPlE2Ue&QB{?dml6%Y$CRolAwLAnS97yoP`=6XJk1-!DIC zlRhs_f(v^c(ayrez0fwo{{SSGdpFU((~jPG<$5cHv90;dc6?E0CRLFCexmp{bNVMy zxBo-T>Gz9FPb=|x+8mcfBwXA}aZSLdSaYag9gu^J7ADG0%&#yE=@ zhI?DISYpeHG<_(sJ#5-7nJs=S*o@n?d6wZ^60NaGS;l!seSv09Dym?BlN>LfIo=R2D_7q3j(2)a3Ti@H~WM+tpdXUrAxPimePFIP+K4hItV%;s|C zAvkmTA%f7!L27uX8jeh@uOwlMRX`X9G>zP@%QdN^o0`~x7R5VaDT>u*2_BJaazzcd zP>+ypmaLS~t6FLt3K8WxkqcviS_cXB+mrd|Cl~z$*U)dl8a1{$914Hd&7<@kN#JJ& zcjy!)N>BNAOY7wUEj5A9^65mSNz?O>_|h?M098FPzeMM4%rMRUJC5BWU#(#3=I_JZ zuGWkW?R6VzdIn^QM6;vid?ON>S>$)m%x^DBtbHz?&tm13T6_nt(DD)^EM8Te{<{Xy zB%A9>AlGSJ?nA@px}axLCF1tBiKKpUuTA<_7;0Uc9CM>vg%Ri=?RK!ZBDdHkYLuXR z-d;p0j_*%XYQo!FIuG_BrdFeuCKH@g_1?AXd6N!78)uFrGNIED{ zR(G4tcGqd?VMFjqqNylcD&P6#E7H0%KK!b~JmSo{ugcIL%_ly;@sH*p<3R3LT0PKw ztgQ#FR(oIL_i)ciPKj0d`Mu796=!$5M+0Cm@`?DSEz+!*^_rF_)`?KmI&?krb`y;9V;Umce-^CSng)bAd*s4ZH+a;U@>s@Cl zo_Nf09g7f{f;v(cmO8q)_ml20^6xXk;ULvz>m*s_Q*Tl1YY2nlHw~8mKD3w=)_%V{ z{2hIsUkyXnX;t6Q*pw`vMZSe#ceoG)?O0Gw`Wz5HzN8l>JtN;MZoHRs2y}`_yI*K( zvOLVz)To2R=URoBE6($^ZVC656R8P8;bRx405)?(r7@4d1~r#M5DEk+7E3xjw zDya&gZBuE7XmsPS9;<@y$#?Lg-~+`u02Eq_m~Ogtn5mN~66AUBZ>kY3%-sp;`!vJU zxbV~pWcPqW^FUI)N5=d;?VX)mv}pwDiPTWf>-pU!RAimp6x%J=CJBmyyV?FBET~Lk zAe(}9;zu}rv7s@fUY=l5$3L)wP&A8W2?<*X9{i5|L|f-d0KG?7+0F_hd-nO?Fl#XP z$_sW`QVIr-beT1yDnuJfDGO*4q*ms#!dZ%8i9k|$okr5!GMZt<0||*OFTOay7FVg2 ztOtf>Sqs)sY&Zo;iP6chur#UyFbS#~b_2*wi5`=5Z7DI#Hr6=IYsX@va*~SmzGazG zxdslCZJP;{IYYPUV%`VE3R9OxRdTtDYKrts#6Hs%Wsk(3fXcbm+6t&3K2gPjVjb|% z@;#t`bxc(BQP6MifL)R2jXJ{$2Oh9H<`Px%b6G%gkhhy(;QYPPrr7doZ8e}#HC@<1 zwKK|aox|Sb8$gpz47~~y*A8gns6<|L(!vB??n!XrYCrcq3A?qQ~qd3@{_J~Rte9<9>A0(0?*;sm7UlqxBAyUiMv=}0M% zrULg)e*=LMO;mtdUpiot4{soA|d`UojMB5YFq% zS$7V2ht}(`Ep4Ae!29xaqGfw-HE6Xt+m-4crLoF`g`CQEnI&Jxc_-aG+O-{?KN`*V zEhfFvj|G-HdGVod7+c(?J)}-gXXK|^6Vcy6RQ_@{0r@Zv>_f3w>m!E9lOzjt$whYgh18#L{ZdVWh%P z4iXGvKd9YuQhAXGOww{|!9t4vfM{p`)vf+tm4JVN<6wszInvjBRh4RoUc_3@$_I4@ z^jZ10;vRvfM@2xM&&_2k3#00hf_@0xvPT15_STdna1$UyEF$-M%Y|rGpB1%kzqa zzS4*_cZd()1tLmYw!@ktY8*ByjhNsx?PwrzqQn6QfM}(?oEXB=V5W(w3`Tw#AH|}W zMszw|VqW~)-+Se8x#IT>_u=kOvZ1!|80+S5vwL?b*_Pv#11FF9S6 zXkWefT)}?E=|J}D>5#5qf3go#cc-@rDE7PR4W|I=&&2-w14+tSa4ZJ+{!aQeEX2wInafl6!J*AmMMKCnwNzm^At% zOFNk?L1yE_$C$&;ovi)fDUXq8GlAI^G_v~II>1fOPTrKr%HP7sh*YUC>oz-utIw3< zdyD137hlzrGKDU|4;W*{c5a06$NI3NcWsNy-Laj zo$qnR@;#dkk9@b_;KeRSN5$TYV-W&VVBFLWvCs>hE;}x_Tb`%w>goaz^|K}QKS3Ts z&Y!IP70^9!pk4J{4JIkV+PZ-fHzS^y{}slm-(Nd_u`gw9{Tb9U5=c`ereB`kd8T=g zF%(i*wX_=y};=Y2SJWYl4(}?i>Wp$jxgCU&=QF_ymV470xiMe#v$G)$kR&n zHdcs`N};fqsdLJ$dQW%kkfApXE@#yMl?O)MNO2(xmks1_Km7vuozkDKdb2jT?WNbX6DhrNuBu$)DXX&(OBYy0&XVsYFJ({ z7_Cz716El$Z?A`njT1fA`)-&8fO+J!Ix~9xNunwmY&OOuG{3;i54^K4Ib~b4FtH;{ z;*5tSEVxJB#>l@C1%z?juAVG=;(=XJiLd+ z#tX!fdPr|@=6@8Q9 zqEe~CH^hQQxSRRA;dox|h+EiIG!8}=uxboON$<<()dCr>62%e^Xz$mVF(%==Xdwt6 z<>G)OV?6pZFuQx4vFeenpe;<-zgI~A46*s#MEL_ws-$~+Z6f_YJ$N0&1E}A3Ce&{- z5UWx)l$uT%1XbA8OMNrO5bZ>wwcZoyE-fiFk-pjxUjJ+C>2ooFzM<&!SZ(^ZkZ#T4 z|D%mf{}$3G9I>29$mymkmLm@LO=dlZiR^2Y$u}n1Sk(jLV7LDM8RyJRH*im>L&^7tu(p)pGBV|!T{j1l z+Fks&vTd4)V(U(P0ZTpUdSd8oe5h@v>iA<6YppA2;<1RT9~`QO&&e-gwz7Q3fY9Vk zdO!zlr=JPgh9;%ffj(oRpmI62P72cA&QAdV**?j}t$km=2WCA?T~)?kei9a`8k_EA zgGOWPnArIO-Z8E4PPNjW-5J4wZa+qg^IcQLJ}t~bmGfFqu9Nyf;oeP|jPsb`q~@As z-GTPm^8LwdpBD4+lqS1eRY_RicJGTDz?m7VWqD$-0JbMIv$9TSc>dZa`MbX57v;AH zghH2MVEJ1iJdq}B&WOt$7ZsQLw_KK- zU9cWn{)*E9@ZrN@RxJ3h#9aSCXZ){_+~4-8V3hzJK?=O56j*<~aQ>`u#|<#x2&fC! zXJ38=Er(IxGic=gz(wg_^ZVvhk?c@FI+&y&U`uVoI)WK&Ai3uGEONRE+yd$k{2p2Rgu(C{u=3(hs7*1yj2abe zl1%{^QjHQe-{ZYVcmwswE1s+9oxoxJ!-oITDhViZ59_-FDpsa2N!KUR@c4X7Xz5KJ z0hOjyX?T=vnqeLd-n9kmk75n_>N&H332|eE(kM^zA)wS~_*)sPMMgy7=LM*@$#EeSOgy=xIhgl z{1h7E?U52!oKqWZshc!)GT#CdyTtB<3en0sMh;@8k$tF zgsz@GqT~oeW@k!?)Q#rD0KI*94lr)d;c}o|FJ?T&r2c%($xfSu6$i5mbCU#@C>?s! zJ`A;|C=eLu4P>)EW@xz({rzU(!6UA$ZOf1C(>o&Jwt$nsBcF5}E_8GS6~Ya4W=-dT zl&p5Gj#MEXSiQ1oh;N3e%b@z<1!3MyaP6af^OOs~W&xxrARCC>&zM?UL)581UyhD; z^YBZUaXgTNF6YdJq=s)HEmNf?HNEr)eevL61{E(xkh{BT-8P=$OCZl+&!%8<+^9oT1nt~3>ERt`L3o&$5NOXQmq z1tBxVI0t+Im}4eO)Q~z#jdSo7SrI#u_zk0E75!diZVZn>^rX^xwBrP|E1jOWD_NF^ zQ|TP>$JWj~y5!H8Cjnzy{H6eD7yGO~^)A&WT}ieq7X9RC2NO-7cwV7w#BNJo7E-_u zNcK^mG1Mpnp#rlJmJz+_*darK&H&ce+f#`_$1yjY7D$z3_GnpXODFD`O10bTAvVC9 zM~??5cl*^-5%Z`v0;iAVg94m`Y_eeu8Y;qaex+mIPuR~se0a+jFs7lD3 zA7*Y-lIC{UAy(1{_D*=xL6bfc_hnwU$|&#IVQw{o(+S|Wofy)9JOBL(R+{3V+WU)k z7h2s2I+nx&et3>lAmNcvV)&Wna=t6;W0U&h>_fnjAAV>!ccjr+xtuu#;XbKYfHPtk z4-q0uB0W{&g~hwVA2D?wgk>-)>V$=A*q~ z1ryebopn~3mzqND6TV|@iynNbwDQBRF-E?^K;py*W~wSIdlTh!8L7|rSvatvsFLpw z@_W=A-`Ul~uGS`v-Y`FX>~P|*Ie_igTfCQ6a1W4|(eI6x?f2d&lEctCgFPOW4wBVL z^^R+#kVrFph5;j089Pl^5E*& zp<|&w0fNk6u{{J%)2iSa>nET`k{b@{kfMgevae>ooC0uExCFyty?JkVW+=`<\t<标注文件绝对路径>` + +``` +<图像文件1绝对路径> <标注文件1绝对路径> +<图像文件2绝对路径> <标注文件2绝对路径> +<图像文件3绝对路径> <标注文件3绝对路径> +``` + +### 推理任务与挖掘任务 + +- 推理任务与挖掘任务的数据集格式相同 + +- 推理或挖掘数据集信息存储在索引文件`/in/candidate-index.tsv`中,其中每行的格式为`<图像文件绝对路径>` + +``` +<图像文件1绝对路径> +<图像文件2绝对路径> +<图像文件3绝对路径> +``` + +## det-ark:raw + +也可写为 ark:raw, 为目标检测格式 + +- export_format = det-ark:raw 时的训练/验证集索引文件 + +``` +/in/assets/02/1c5c432085dc136f6920f901792d357d4266df02.jpg /in/annotations/02/1c5c432085dc136f6920f901792d357d4266df02.txt +/in/assets/95/e47ac9932cdf6fb08681f6b0007cbdeefdf49c95.jpg /in/annotations/95/e47ac9932cdf6fb08681f6b0007cbdeefdf49c95.txt +/in/assets/56/56f3af57d381154d377ad92a99b53e4d12de6456.jpg /in/annotations/56/56f3af57d381154d377ad92a99b53e4d12de6456.txt +``` + +- txt文件每行的格式为 `class_id, xmin, ymin, xmax, ymax, ann_quality, bbox_angle` + +其中 `class_id, xmin, ymin, xmax, ymax` 均为整数,而标注质量`ann_quality`为浮点数,默认为-1.0, 标注框旋转角度`bbox_angle`为浮点数,单位为[RAD](https://baike.baidu.com/item/RAD/2262445) +``` +0, 242, 61, 424, 249, -1.0, 0.0 +``` + + +## det-voc:raw + +也可写为 voc:raw, 为目标检测格式 + +- export_format = det-ark:raw 时的训练/验证集索引文件 + +``` +/in/assets/02/1c5c432085dc136f6920f901792d357d4266df02.jpg /in/annotations/02/1c5c432085dc136f6920f901792d357d4266df02.xml +/in/assets/95/e47ac9932cdf6fb08681f6b0007cbdeefdf49c95.jpg /in/annotations/95/e47ac9932cdf6fb08681f6b0007cbdeefdf49c95.xml +/in/assets/56/56f3af57d381154d377ad92a99b53e4d12de6456.jpg /in/annotations/56/56f3af57d381154d377ad92a99b53e4d12de6456.xml +``` + +## seg-coco:raw + +语义与实例分割的标注格式 + +- export_format = seg-coco:raw 时的训练/验证集索引文件 + +!!! 注意 + 此时所有图像文件共享一个标注文件 + 此时训练集与验证集共享一个标注文件 + 语义与实例分割标注中不包含背景类,即只提供项目标签的标注mask + +``` +/in/assets/02/1c5c432085dc136f6920f901792d357d4266df02.jpg /in/annotations/coco-annotations.json +/in/assets/95/e47ac9932cdf6fb08681f6b0007cbdeefdf49c95.jpg /in/annotations/coco-annotations.json +/in/assets/56/56f3af57d381154d377ad92a99b53e4d12de6456.jpg /in/annotations/coco-annotations.json +``` diff --git a/docs/overview/framework.md b/docs/overview/framework.md new file mode 100644 index 0000000..3bfcc9e --- /dev/null +++ b/docs/overview/framework.md @@ -0,0 +1,63 @@ +# ymir镜像整体流程 + +- 从数据的角度看,ymir平台实现了数据的导入、划分、合并与标注等功能;镜像则提供代码与环境依赖,利用数据训练模型,对数据进行推理或挖掘出最有标注价值的数据。 + +- 从镜像的角度看,ymir平台提供数据集、任务与超参数信息,镜像处理后产生结果文件,ymir对结果文件进行解析,并显示在ymir平台上。 + +- 从接口的角度看,约定好ymir平台提供的数据与超参数格式,镜像产生的结果文件格式。则可以提供多种镜像,实现不同的算法功能并对接到ymir平台。 + +## ymir镜像使用 + +- [模型训练](https://github.com/IndustryEssentials/ymir/wiki/%E6%93%8D%E4%BD%9C%E8%AF%B4%E6%98%8E#%E6%A8%A1%E5%9E%8B%E8%AE%AD%E7%BB%83) + +- [模型推理](https://github.com/IndustryEssentials/ymir/wiki/%E6%93%8D%E4%BD%9C%E8%AF%B4%E6%98%8E#%E6%A8%A1%E5%9E%8B%E6%8E%A8%E7%90%86) + +- [数据挖掘](https://github.com/IndustryEssentials/ymir/wiki/%E6%93%8D%E4%BD%9C%E8%AF%B4%E6%98%8E#%E6%95%B0%E6%8D%AE%E6%8C%96%E6%8E%98) + +## ymir镜像 + +> 将ymir镜像视为一个对象或黑盒,它有以下属性 + +- 镜像类型:按镜像提供的功能,可以将镜像分类为训练镜像,推理镜像及挖掘镜像。一个镜像可以同时为训练,推理及挖掘镜像,也可以仅支持一种或两种功能。 + + - ymir平台基于镜像或数据集,可以发起训练,推理及挖掘任务,任务信息提供到选择的镜像,启动对应的代码实现对应功能。如发起训练任务,将启动镜像中对应的训练代码;发起推理任务,将启动镜像中对应的推理代码。目前ymir平台支持发起单一任务,也支持发起推理及挖掘的联合任务。 + +- 镜像地址:来自[docker](https://www.runoob.com/docker/docker-tutorial.html)的概念,即镜像的仓库源加标签,一般采用<仓库源>:<标签>的格式,如 `ubuntu:22.04`, `youdaoyzbx/ymir-executor:ymir2.0.0-yolov5-cu111-tmi`。 + - 对于公开的镜像,仓库源对应docker hub上的镜像仓库,如 [youdaoyzbx/ymir-executor](https://hub.docker.com/r/youdaoyzbx/ymir-executor/tags), [pytorch/pytorch](https://hub.docker.com/r/pytorch/pytorch/tags) + +- 镜像名称:用户自定义的镜像名称,注意名称长度,最多50个字符 + +- 关联镜像:对于单一功能的镜像,训练镜像产生的模型,其它镜像不一定能使用。如采用基于[yolov4](https://github.com/AlexeyAB/darknet)训练的模型权重,基于[yolov7](https://github.com/WongKinYiu/yolov7) 推理镜像不支持加载相应模型权重。 因此需要对此类镜像进行关联,推荐使用多功能镜像。 + +- 镜像功能参数:为提高镜像的灵活性,用户可以在ymir平台上修改镜像的默认功能参数。如 `epochs`, `batch_size_per_gpu`,控制训练镜像的训练时长及显存占用。注意ymir平台为所有镜像提供额外的[通用参数](./hyper-parameter.md) + + - 训练镜像功能参数:对应训练超参数,常见的有`epochs`, `batch_size_per_gpu`, `num_workers_per_gpu`。默认训练参数配置文件存放在镜像的`/img-man/training-template.yaml` + + - 推理镜像功能参数:常见的有`confidence_threshold`,设置推理置信度。默认推理参数配置文件存放在镜像的`/img-man/infer-template.yaml` + + - 挖掘镜像功能参数:常见的有`confidence_threshold`设置推理置信度, `mining_algorithm`设置挖掘算法。默认挖掘参数配置文件存放在镜像的`/img-man/mining-template.yaml` + +- 镜像目标:根据镜像中算法的类型,将镜像分为目标检测镜像、语义分割镜像及实例分割镜像等。 + + - 镜像目标定义在镜像的 `/img-man/manifest.yaml` 文件中,如此文件不存在,ymir则默认镜像为目标检测镜像。 + +- 添加镜像:添加镜像时需要管理员权限,ymir平台首先会通过 `docker pull` 下载镜像,再解析镜像的`/img-man`目录,确定镜像中算法的类型及镜像支持的功能。 + + +## ymir平台与镜像之间的接口 + +> 从镜像的角度看,ymir平台将任务信息,数据集信息,超参数信息放在镜像的`/in`目录,而镜像输出的进度信息,结果文件放在镜像的`/out`目录。 + +- 任务信息:任务信息包含是否要执行的训练,推理或挖掘任务,任务id。参考镜像文件[/in/env.yaml](../sample_files/in_env.md) + +- [数据集信息](./dataset-format.md):ymir平台中所有的数据集存放在相同的目录下,其中图片以其hash码命名,以避免图片的重复。ymir平台为镜像提供索引文件,索引文件的每一行包含图像绝对路径及对应标注绝对路径。 + + - 对于训练任务,标注的格式由超参数 [export-format](./hyper-parameter.md) 决定。 + + - 对于推理及挖掘任务,索引文件仅包含图像绝对路径。 + + - 参考镜像文件 [/in/env.yaml](../sample_files/in_config.md) + +- [超参数信息](./hyper-parameter.md) + +- [ymir平台接口文档](https://github.com/IndustryEssentials/ymir/blob/master/dev_docs/ymir-cmd-container.md) diff --git a/docs/overview/hyper-parameter.md b/docs/overview/hyper-parameter.md new file mode 100644 index 0000000..1dc4db0 --- /dev/null +++ b/docs/overview/hyper-parameter.md @@ -0,0 +1,203 @@ +# Ymir镜像超参数 + +- ymir平台为每个镜像提供通用的参数,同时每个镜像按任务拥有相应的训练、推理及挖掘功能参数。 + +- 部分通用参数由ymir平台自动生成,剩余通用参数可以手动修改。 + +- 默认功能参数由镜像提供,用户可以手动修改功能参数。 + +- 从镜像的角度,通用参数与功能参数均以 [yaml格式](https://www.runoob.com/w3cnote/yaml-intro.html) 存储在镜像中的 `/in/config.yaml` + +{!docs/sample_files/in_config.md!} + +## ymir平台的通用参数 + +- gpu_count: 用户可在启动任务中进行修改 + +- gpu_id: ymir平台根据gpu_count自动生成 + +- task_id: ymir平台自动生成 + +- class_names: ymir平台根据用户选择自动生成 + +- shm_size: 用户可在`超参数配置`页面中手动修改 + +- export_format: 用户可在`超参数配置`页面中手动修改 + +- pretrained_model_params: ymir平台根据用户选择自动生成 + +- model_params_path: ymir平台根据用户选择自动生成 + +### gpu_count + +ymir平台为镜像提供的显卡数量 + +``` +gpu_count: 0 # 表示不使用显卡,即仅使用cpu +gpu_count: 2 # 表示使用 2 块显卡 +``` + +### gpu_id + +ymir 平台为镜像提供的gpu编号,编号从0开始,但实际上使用的显卡为当前空闲显存超过80%的随机显卡。 + +``` +gpu_id: '0' # 采用一块显卡,实际上可能使用编号为5, 6的显卡。 +gpu_id: '0, 1' # 采用两块显卡,实际上可能使用编号为1和8的显卡,或者使用编号为3和5的显卡。 +``` + +!!! 注意 + 对于镜像而言,直接使用 `gpu_id` 对应的显卡即可,不需要考虑 `CUDA_VISIBLE_DEVICES`等变量。 ymir平台在启动镜像时通过`--gpus '"device=5,7"'`指定使用编号为5, 7的显卡,但实际镜像中只能使用编号 `0, 1`,其效果如下。 + +``` +> docker run --gpus '"device=5,7"' nvidia/cuda:10.1-cudnn7-devel-ubuntu16.04 nvidia-smi +Thu Jan 12 06:19:03 2023 ++-----------------------------------------------------------------------------+ +| NVIDIA-SMI 465.31 Driver Version: 465.31 CUDA Version: 11.3 | +|-------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|===============================+======================+======================| +| 0 NVIDIA GeForce ... On | 00000000:86:00.0 Off | N/A | +| 22% 31C P8 1W / 250W | 0MiB / 11019MiB | 0% Default | +| | | N/A | ++-------------------------------+----------------------+----------------------+ +| 1 NVIDIA GeForce ... On | 00000000:8A:00.0 Off | N/A | +| 22% 29C P8 5W / 250W | 0MiB / 11019MiB | 0% Default | +| | | N/A | ++-------------------------------+----------------------+----------------------+ +``` + +### task_id + +任务 id, 可以唯一确定某项任务, 如 `t000000100000208ac7a1664337925` + +### class_names + +数据集的类别名称 + +``` +class_names: ['cat', 'dog'] # 任务目标中包含cat和dog +class_names: ['cat'] # 任务目标中仅包含cat +``` + +### shm_size + +ymir平台为镜像提供的共享内存大小,对于ymir2.0.0后的版本,默认的共享内存为 `16G` 乘以 `gpu_count`。而ymir2.0.0之前的版本,默认的共享内存固定为 `16G`。 + +``` +shm_size: 128G # 为镜像提供128G共享内存 +shm_size: 256G # 为镜像提供256G共享内存 +``` + +!!! 注意 + 共享内存过小时,会报 `Out of Memory`的错误,即内存不足错误。可以考虑减少`gpu_count`,`batch size`, `num_workers` 或 增加 `shm_size`。服务器的共享内存可以通过 `df -h` 查看,下面服务器的共享内存为 `63G` + +``` +> df -h | grep shm +Filesystem Size Used Avail Use% Mounted on +tmpfs 63G 0 63G 0% /dev/shm +``` + +### export_format + +ymir平台为训练任务导出的图像及标注格式, 详情参考 [数据集格式](./dataset-format.md) + +- 图像格式:`['raw']`, `raw` 代表常用的图片存储格式,如 `jpg`。 + +- 标注格式:`["ark", "voc", "det-ark", "det-voc", "seg-coco"]`, 其中 `ark` 与 `det-ark` 为同一种目标检测格式,标注文件为txt文件;`voc`与`det-voc`为同一种目标检测格式,标注文件为xml文件;`seg-coco`为语义分割与实例分割的格式,标注文件为[coco格式](https://cocodataset.org/#format-data)的json文件。 + +``` +export_format: ark:raw # 类似 yolov5 的目标检测格式, 标注文件为txt格式 +export_format: det-voc:raw # 类似 voc 目标检测格式,标注文件为xml格式 +export_format: seg-coco:raw # 类似 coco 的语义分割或实例分割的格式,标注文件为json格式 +``` + +!!! 注意 + 仅对训练任务起作用,对于推理或挖掘任务,此参数不起作用 + +### pretrained_model_params + +- ymir平台为训练任务对应镜像提供的参数,其中包含预训练文件的绝对路径。对应[训练配置](https://github.com/IndustryEssentials/ymir/wiki/%E6%93%8D%E4%BD%9C%E8%AF%B4%E6%98%8E#%E8%AE%AD%E7%BB%83%E9%85%8D%E7%BD%AE)中的预训练模型。 +- 预训练模型文件对应训练任务的输出文件,可以包含任意文件,如配置文件等,不局限于权重文件。 + +``` +pretrained_model_params: ['/in/models/a.pth', '/in/models/b.pth', '/in/models/a.py'] +``` + +!!! 注意 + 对于推理或挖掘任务,此参数不提供 + + +### model_params_path + +- ymir平台为推理或挖掘任务对应镜像提供的参数,其中包含权重文件的绝对路径,对应[模型推理](https://github.com/IndustryEssentials/ymir/wiki/%E6%93%8D%E4%BD%9C%E8%AF%B4%E6%98%8E#%E6%A8%A1%E5%9E%8B%E6%8E%A8%E7%90%86) 或 [数据挖掘](https://github.com/IndustryEssentials/ymir/wiki/%E6%93%8D%E4%BD%9C%E8%AF%B4%E6%98%8E#%E6%95%B0%E6%8D%AE%E6%8C%96%E6%8E%98) 中选择的模型。 + +``` +model_params_path: ['/in/models/a.pth', '/in/models/b.pth', '/in/models/a.py'] +``` + +!!! 注意 + 对于训练任务,此参数不提供 + +## 任务超参数 + +### 训练任务超参数 + +镜像可以通过 `/img-man/training-template.yaml` 向ymir平台暴露训练任务的超参数, 以`youdaoyzbx/ymir-executor:ymir2.0.0-yolov5-cu111-tmi`镜像为例,它的训练任务超参数配置文件如下: + +- 训练任务超参数配置文件: 镜像中的 `/img-man/training-template.yaml` + +``` +shm_size: '128G' +export_format: 'ark:raw' +model: 'yolov5s' +batch_size_per_gpu: 16 +num_workers_per_gpu: 4 +epochs: 100 +img_size: 640 +opset: 11 +args_options: '--exist-ok' +save_best_only: True # save the best weight file only +save_period: 10 +sync_bn: False # work for multi-gpu only +activation: 'SiLU' # view https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity +``` + +- ymir平台对应的超参数编辑页面, 编辑页面与配置文件一一对应。 + +![](../imgs/training-hyper-parameter-web.png) + +### 推理任务超参数 + +- 推理任务超参数配置文件: 镜像中的 `/img-man/infer-template.yaml` + + +### 挖掘任务超参数 + +- 挖掘任务超参数配置文件: 镜像中的 `/img-man/mining-template.yaml` + +### 常用任务超参数 + +- epochs: 整数,如 100, 表示在训练任务中,整个数据集循环的次数。epochs 越大,数据集越大,训练时间越长。 + + - 类似的参数有 `max_epochs`, `num_epochs`,表达的意思相同。 + +- steps: 整数,如20000,表示训练任务中,训练步骤循环的次数。steps越大,训练时间越长。 + + - 类似的参数有 `max_steps`, `num_steps`, `iters`, `max_iters`, `num_iters` + + - `steps = epochs * dataset_size / batch_size` + +- batch_size: 整数,批量大小,如 8。由于数据集往往上万张,计算机无法一次性全部加载到内存或显存中,因此在处理时,可以一次处理 8 张。 + + - 类似的参数有 `batch`, `num_batch`。 + + - 对于支持分布式处理的镜像, `batch_size_per_gpu` 与 `num_images_per_gpu` 乘以 使用的GPU数(gpu_count),则为实际的 batch_size。 + +- num_workers: 整数,数据加载时使用的进程数,设置为0则是采用单进程进行加载,一般设置为4 或 8。 + + - 类似的参数有: `workers` + + - 对于支持分布式处理的镜像, `num_workers_per_gpu` 乘以使用的GPU数(gpu_count), 则为实际的 num_workers。 diff --git a/docs/overview/ymir-executor.md b/docs/overview/ymir-executor.md new file mode 100644 index 0000000..f34d6af --- /dev/null +++ b/docs/overview/ymir-executor.md @@ -0,0 +1,145 @@ +# ymir镜像制作简介 + +## 背景知识 + +- [python3](https://www.runoob.com/python3/python3-tutorial.html) ymir平台,深度学习框架,开源算法库主要以python3进行开发 + +- [docker](https://www.runoob.com/docker/docker-tutorial.html) 制作ymir镜像,需要了解docker 及 [dockerfile](https://www.runoob.com/docker/docker-dockerfile.html) + +- [linux](https://www.runoob.com/linux/linux-shell.html) ymir镜像主要基于linux系统,需要了解linux 及 [linux-shell](https://www.runoob.com/linux/linux-shell.html) + +- [深度视觉算法] ymir镜像的核心算法是深度视觉算法,需要了解[深度学习](https://leonardoaraujosantos.gitbook.io/artificial-inteligence/machine_learning/deep_learning), 计算机视觉。 + +- [深度学习框架] 应用深度学习算法离不开深度学习框架如 [pytorch](https://pytorch.org/), [tensorflow](https://tensorflow.google.cn/?hl=en) 与 [keras](https://keras.io/) 等的支持。熟悉其中的一种即可,推荐pytorch. + +- [深度学习算法库] 基于已有的算法库应用前沿算法或开发新算法是常规操作,推荐了解 [mmdetection](https://github.com/open-mmlab/mmdetection) 与 [yolov5](https://github.com/ultralytics/yolov5) + + +## 环境依赖 + +假设拥有一台带nvidia显卡的linux服务器, 以ubuntu16.04 为例 + +!!! 注意 + 如果apt update 或 apt install 速度缓慢,可以考虑更换软件源 + [清华软件源](https://mirrors.tuna.tsinghua.edu.cn/help/ubuntu/) + [中科大软件源](http://mirrors.ustc.edu.cn/help/ubuntu.html) + +- [docker](https://www.runoob.com/docker/ubuntu-docker-install.html) +``` +# 安装 +curl -sSL https://get.daocloud.io/docker | sh + +# 测试 +sudo docker run hello-world + +# 添加普通用户执行权限 +sudo usermod -aG docker $USER + +# 重新login后测试 +docker run hello-world +``` + +- [nvidia-docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installation-guide) + +!!! 注意 + 先按照上述链接中的前提条件安装好 **NVIDIA Driver** + +``` +# 添加软件源 +distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \ + && curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \ + && curl -s -L https://nvidia.github.io/libnvidia-container/$distribution/libnvidia-container.list | \ + sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \ + sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list + +# 更新索引 +sudo apt-get update + +# 安装 +sudo apt-get install -y nvidia-docker2 + +# 重启docker +sudo systemctl restart docker + +# 测试 +docker run --rm --gpus all nvidia/cuda:11.6.2-base-ubuntu20.04 nvidia-smi +``` + +## 制作一个hello world 镜像 + +### 编辑Dockerfile + +``` +# vim Dockerfile +# cat Dockerfile + +FROM ubuntu:18.04 # 基于ubuntu18.04镜像制作新镜像 + +CMD echo "hello ymir executor" # 新镜像在运行时默认执行的命令 +``` + +### 制作 hello-ymir:latest 镜像 + +``` +# docker build -t hello-ymir:latest -f Dockerfile . + +Sending build context to Docker daemon 52.74kB +Step 1/2 : FROM ubuntu:18.04 +18.04: Pulling from library/ubuntu +a055bf07b5b0: Pull complete +Digest: sha256:c1d0baf2425ecef88a2f0c3543ec43690dc16cc80d3c4e593bb95e4f45390e45 +Status: Downloaded newer image for ubuntu:18.04 + ---> e28a50f651f9 +Step 2/2 : CMD echo "hello ymir executor" + ---> Running in 6dd391c7688d +Removing intermediate container 6dd391c7688d + ---> 4c8672e6ce02 +Successfully built 4c8672e6ce02 +Successfully tagged hello-ymir:latest +``` + +### 测试 + +``` +# docker run -it --rm hello-ymir + +hello ymir executor +``` + +## ymir 镜像制作 + +### 基础镜像 + +需要选择一个合适的基础镜像,上面的例子中我们采用ubuntu18.04作用基础镜像构建新镜像,基于实践,我们推荐制作ymir镜像的基础镜像包含以下配置: + +- python 版本 >= 3.8 + +- 支持的cuda版本 >= 11.2 + +- 推荐基于[nvidia/cuda](https://hub.docker.com/r/nvidia/cuda/tags) 与 [pytorch/pytorch](https://hub.docker.com/r/pytorch/pytorch/tags) 进行ymir镜像制作 + +### 所有ymir镜像均需要实现的功能 + +- 提供超参数模板文件: 必选,ymir平台需要解析镜像的 **/img-man** 目录生成超参数配置页面 + +- 提供默认启动脚本:必选,推荐采用 **bash /usr/bin/start.sh** 作用镜像的默认启动脚本 + +- 写进度: 必选, 将程序当前完成的百分比反馈到ymir平台,从而估计程序的剩余运行时间 + +- 写结果文件:必选,将程序运行的结果反馈到ymir平台 + +- 提供镜像说明文件:可选,ymir平台通过解析 **/img-man/manifest.yaml** 得到镜像的目标类型,即镜像支持目标检测,语义分割还是实例分割。默认目标类型为目标检测。 + +### 训练镜像需要实现的额外功能 + +- 基本功能:加载数据集与超参数进行训练,将模型权重,模型精度等结果保存到 **/out** 目录的指定文件。 + +- 写tensorboard日志:可选, ymir平台支持查看训练任务的tensorboard训练日志 + +### 推理镜像需要实现的额外功能 + +- 基本功能:加载数据集与模型权重进行推理,将推理结果保存到 **/out** 目录的指定文件。 + +### 挖掘镜像需要实现的额外功能 + +- 基本功能:加载数据集与模型权重进行挖掘,基于主动学习算法获得每张图片的重要程度分数,将分数保存到 **/out** 目录的指定文件。 diff --git a/docs/sample_files/in_config.md b/docs/sample_files/in_config.md new file mode 100644 index 0000000..8d767f5 --- /dev/null +++ b/docs/sample_files/in_config.md @@ -0,0 +1,20 @@ +``` +args_options: --exist-ok +batch_size_per_gpu: 16 +class_names: +- dog +- cat +- person +epochs: 10 +export_format: ark:raw +gpu_count: 4 +gpu_id: '0,1,2,3' +img_size: 640 +model: yolov5s +num_workers_per_gpu: 8 +opset: 11 +save_period: 10 +shm_size: 32G +sync_bn: false +task_id: t000000100000208ac7a1664337925 +``` diff --git a/docs/sample_files/in_env.md b/docs/sample_files/in_env.md new file mode 100644 index 0000000..07e0a14 --- /dev/null +++ b/docs/sample_files/in_env.md @@ -0,0 +1,27 @@ +``` +input: + annotations_dir: /in/annotations # 标注文件存储目录 + assets_dir: /in/assets # 图像文件存储目录 + candidate_index_file: /in/candidate-index.tsv # 推理或挖掘任务中的数据集索引文件 + config_file: /in/config.yaml # 超参数文件 + models_dir: /in/models # 预训练模型文件存储目录 + root_dir: /in # 输入信息根目录 + training_index_file: /in/train-index.tsv # 训练任务中的训练数据集索引文件 + val_index_file: /in/val-index.tsv # 训练任务中的验证数据集索引文件 +output: + infer_result_file: /out/infer-result.json # 推理任务结果文件 + mining_result_file: /out/result.tsv # 挖掘任务结果文件 + models_dir: /out/models # 训练任务权重文件输出目录 + monitor_file: /out/monitor.txt # 进度记录文件 + root_dir: /out # 输出信息根目录 + tensorboard_dir: /out/tensorboard # 训练任务中tensorboard日志目录 + training_result_file: /out/models/result.yaml # 训练任务的结果文件 +run_infer: false # 是否执行推理任务 +run_mining: true # 是否执行挖掘任务 +run_training: false # 是否执行训练任务 +protocol_version: 1.0.0 # ymir平台镜像接口版本 +task_id: t00000020000029d077c1662111056 # 任务id +``` + +!!! 注意 + /in/env.yaml 中的所有路径均为绝对路径 diff --git a/docs/semantic_segmentation/simple_semantic_seg_infer.md b/docs/semantic_segmentation/simple_semantic_seg_infer.md new file mode 100644 index 0000000..9c41981 --- /dev/null +++ b/docs/semantic_segmentation/simple_semantic_seg_infer.md @@ -0,0 +1,86 @@ +# 制作一个简单的语义分割推理镜像 + +参考[ymir镜像制作简介](../overview/ymir-executor.md) + +## 工作目录 + +``` +cd seg-semantic-demo-tmi +``` + +## 提供超参数模型文件 + +包含**/img-man/infer-template.yaml** 表示镜像支持推理 + +- [img-man/infer-template.yaml](seg-semantic-demo-tmi/img-man/infer-template.yaml) + +指明数据格式 **export_format** 为 **seg-coco:raw** + +```yaml +{!seg-semantic-demo-tmi/img-man/infer-template.yaml!} +``` + +- Dockerfile + +``` +RUN mkdir -p /img-man # 在镜像中生成/img-man目录 +COPY img-man/*.yaml /img-man/ # 将主机中img-man目录下的所有yaml文件复制到镜像/img-man目录 +``` + +## 提供镜像说明文件 + +**object_type** 为 3 表示镜像支持语义分割 + +- [img-man/manifest.yaml](../../seg-semantic-demo-tmi/img-man/manifest.yaml) +``` +# 3 for semantic segmentation +"object_type": 3 +``` + +- Dockerfile +`COPY img-man/*.yaml /img-man/` 在复制infer-template.yaml的同时,会将manifest.yaml复制到镜像中的**/img-man**目录 + +## 提供默认启动脚本 + +- Dockerfile +``` +RUN echo "python /app/start.py" > /usr/bin/start.sh # 生成启动脚本 /usr/bin/start.sh +CMD bash /usr/bin/start.sh # 将镜像的默认启动脚本设置为 /usr/bin/start.sh +``` + +## 实现基本功能 + +- [app/start.py](../../seg-semantic-demo-tmi/app/start.py) + +::: seg-semantic-demo-tmi.app.start._run_infer + handler: python + options: + show_root_heading: false + show_source: true + +## 写进度 + +``` +# use `monitor.write_monitor_logger` to write log to console and write task process percent to monitor.txt +logging.info(f"assets count: {len(lines)}, valid: {valid_image_count}") +monitor.write_monitor_logger(percent=0.2) + +# real-time monitor +monitor.write_monitor_logger(percent=0.2 + 0.8 * iter / valid_image_count) + +# if task done, write 100% percent log +logging.info('infer done') +monitor.write_monitor_logger(percent=1.0) +``` + +## 写结果文件 + +``` +rw.write_infer_result(infer_result=coco_results, algorithm='segmentation') +``` + +## 制作镜像 demo/semantic_seg:infer + +``` +docker build -t demo/semantic_seg:infer -f Dockerfile . +``` diff --git a/docs/semantic_segmentation/simple_semantic_seg_mining.md b/docs/semantic_segmentation/simple_semantic_seg_mining.md new file mode 100644 index 0000000..bee765a --- /dev/null +++ b/docs/semantic_segmentation/simple_semantic_seg_mining.md @@ -0,0 +1,86 @@ +# 制作一个简单的语义分割挖掘镜像 + +参考[ymir镜像制作简介](../overview/ymir-executor.md) + +## 工作目录 + +``` +cd seg-semantic-demo-tmi +``` + +## 提供超参数模型文件 + +包含**/img-man/mining-template.yaml** 表示镜像支持挖掘 + +- [img-man/mining-template.yaml](seg-semantic-demo-tmi/img-man/mining-template.yaml) + +指明数据格式 **export_format** 为 **seg-coco:raw** + +```yaml +{!seg-semantic-demo-tmi/img-man/mining-template.yaml!} +``` + +- Dockerfile + +``` +RUN mkdir -p /img-man # 在镜像中生成/img-man目录 +COPY img-man/*.yaml /img-man/ # 将主机中img-man目录下的所有yaml文件复制到镜像/img-man目录 +``` + +## 提供镜像说明文件 + +**object_type** 为 3 表示镜像支持语义分割 + +- [img-man/manifest.yaml](../../seg-semantic-demo-tmi/img-man/manifest.yaml) +``` +# 3 for semantic segmentation +"object_type": 3 +``` + +- Dockerfile +`COPY img-man/*.yaml /img-man/` 在复制mining-template.yaml的同时,会将manifest.yaml复制到镜像中的**/img-man**目录 + +## 提供默认启动脚本 + +- Dockerfile +``` +RUN echo "python /app/start.py" > /usr/bin/start.sh # 生成启动脚本 /usr/bin/start.sh +CMD bash /usr/bin/start.sh # 将镜像的默认启动脚本设置为 /usr/bin/start.sh +``` + +## 实现基本功能 + +- [app/start.py](../../seg-semantic-demo-tmi/app/start.py) + +::: seg-semantic-demo-tmi.app.start._run_mining + handler: python + options: + show_root_heading: false + show_source: true + +## 写进度 + +``` +# use `monitor.write_monitor_logger` to write log to console and write task process percent to monitor.txt +logging.info(f"assets count: {len(lines)}, valid: {valid_image_count}") +monitor.write_monitor_logger(percent=0.2) + +time.sleep(0.1) +monitor.write_monitor_logger(percent=0.2 + 0.8 * index / valid_image_count) + +# if task done, write 100% percent log +logging.info('mining done') +monitor.write_monitor_logger(percent=1.0) +``` + +## 写结果文件 + +``` +rw.write_mining_result(mining_result=mining_result) +``` + +## 制作镜像 demo/semantic_seg:mining + +``` +docker build -t demo/semantic_seg:mining -f Dockerfile . +``` diff --git a/docs/semantic_segmentation/simple_semantic_seg_training.md b/docs/semantic_segmentation/simple_semantic_seg_training.md new file mode 100644 index 0000000..340d4c2 --- /dev/null +++ b/docs/semantic_segmentation/simple_semantic_seg_training.md @@ -0,0 +1,93 @@ +# 制作一个简单的语义分割训练镜像 + +参考[ymir镜像制作简介](../overview/ymir-executor.md) + +## 工作目录 +``` +cd seg-semantic-demo-tmi +``` + +## 提供超参数模型文件 + +包含**/img-man/training-template.yaml** 表示镜像支持训练 + +- [img-man/training-template.yaml](seg-semantic-demo-tmi/img-man/training-template.yaml) + +指明数据格式 **export_format** 为 **seg-coco:raw**, 即语义/实例分割标注格式。 + +``` +export_format: 'seg-coco:raw' + +# just for test, remove this key in your own docker image +expected_miou: 0.983 # expected mIoU for training task +idle_seconds: 3 # idle seconds for each task +``` + +- Dockerfile + +``` +RUN mkdir -p /img-man # 在镜像中生成/img-man目录 +COPY img-man/*.yaml /img-man/ # 将主机中img-man目录下的所有yaml文件复制到镜像/img-man目录 +``` + +## 提供镜像说明文件 + +**object_type** 为 3 表示镜像支持语义分割 + +- [img-man/manifest.yaml](../../seg-semantic-demo-tmi/img-man/manifest.yaml) +``` +# 3 for semantic segmentation +"object_type": 3 +``` + +- Dockerfile +`COPY img-man/*.yaml /img-man/` 在复制training-template.yaml的同时,会将manifest.yaml复制到镜像中的**/img-man**目录 + +## 提供默认启动脚本 + +- Dockerfile +``` +RUN echo "python /app/start.py" > /usr/bin/start.sh # 生成启动脚本 /usr/bin/start.sh +CMD bash /usr/bin/start.sh # 将镜像的默认启动脚本设置为 /usr/bin/start.sh +``` + +## 实现基本功能 + +- [app/start.py](../../seg-semantic-demo-tmi/app/start.py) + +::: seg-semantic-demo-tmi.app.start._run_training + handler: python + options: + show_root_heading: false + show_source: true + +## 写进度 + +``` +if idx % monitor_gap == 0: + monitor.write_monitor_logger(percent=0.2 * idx / N) + +monitor.write_monitor_logger(percent=0.2) + +monitor.write_monitor_logger(percent=1.0) +``` + +## 写结果文件 + +``` +rw.write_model_stage(stage_name='epoch20', + files=['epoch20.pt', 'config.py'], + evaluation_result=dict(mIoU=expected_miou)) +``` + +## 写tensorboard日志 + +``` +write_tensorboard_log(cfg.ymir.output.tensorboard_dir) +``` + +## 制作镜像 demo/semantic_seg:training + +``` +docker build -t demo/semantic_seg:training -f Dockerfile . +``` diff --git a/docs/semantic_segmentation/test_semantic_seg.md b/docs/semantic_segmentation/test_semantic_seg.md new file mode 100644 index 0000000..584f064 --- /dev/null +++ b/docs/semantic_segmentation/test_semantic_seg.md @@ -0,0 +1,80 @@ +# 测试Ymir语义分割镜像 + +## 通过YMIR平台进行测试 + +用户可以直接通过Ymir平台发起语义分割的训练,推理及挖掘任务,对镜像进行测试。 + +### 导入待测镜像 + +- 假设用户已经制作好镜像 **demo/semantic_seg:tmi**, 它支持训练、推理及挖掘 + +- 假设用户具有管理员权限,按照[新增镜像](https://github.com/IndustryEssentials/ymir/wiki/%E6%93%8D%E4%BD%9C%E8%AF%B4%E6%98%8E#%E6%96%B0%E5%A2%9E%E9%95%9C%E5%83%8F) 将**demo/semantic_seg:tmi** 添加到 **我的镜像** 中。 + +### 导入待测数据集 + +- 下载示例语义分割数据集 [train-semantic-seg.zip](https://github.com/modelai/ymir-executor-fork/releases/download/dataset-ymir2.0.0/eg100_fgonly_train.zip) [val-semantic-seg.zip](https://github.com/modelai/ymir-executor-fork/releases/download/dataset-ymir2.0.0/eg100_fgonly_val.zip) + +- 建立包含对应标签的项目, `训练类别` 中添加对应标签 `foreground` + +- 按照[添加数据集](https://github.com/IndustryEssentials/ymir/wiki/%E6%93%8D%E4%BD%9C%E8%AF%B4%E6%98%8E#%E6%B7%BB%E5%8A%A0%E6%95%B0%E6%8D%AE%E9%9B%86)导入示例语义分割数据集 + +### 发起待测任务 + +发起待测的训练、推理或挖掘任务后,等待其结束或出错。 + +### 获取任务id + +登录服务器后台,进入YMIR部署的目录 `ymir-workplace` + +- 对于训练任务:`cd sandbox/work_dir/TaskTypeTraining` + +- 对于挖掘或推理任务: `cd sandbox/work_dir/TaskTypeMining` + +- 列举当前所有的任务,按任务时间找到对应任务id, 此处假设为最新的 **t00000020000023a473e1673591617** +``` +> ls -lt . + +drwxr-xr-x 4 root root 45 Jan 13 14:33 t00000020000023a473e1673591617 +drwxr-xr-x 4 root root 45 Jan 13 14:19 t00000020000025d55ff1673590756 +drwxr-xr-x 4 root root 45 Jan 13 14:13 t00000020000028b0cce1673590425 +drwxr-xr-x 4 root root 45 Jan 10 14:09 t00000020000018429301673330944 +drwxr-xr-x 4 root root 45 Jan 9 18:21 t000000200000210e0811673259669 +drwxr-xr-x 4 root root 45 Jan 9 18:07 t00000020000029e02f61673258829 +``` + +### 通过 docker 进行交互式调试 + +- 进行任务id对应的工作目录 `cd t00000020000023a473e1673591617/sub_task/t00000020000023a473e1673591617` + +- 列举当前目录可以看到 `in` 和 `out` 目录 + +- 进行交互式调试 + + - 假设 `ymir-workplace` 存放在 **/data/ymir/ymir-workplace**, 需要将 `ymir-workplace` 目录也挂载到镜像中相同位置,以确保所有软链接均有效。 + + - 假设启动程序为 **/usr/bin/start.sh** + +``` +docker run -it --rm --gpus all --ipc host -v $PWD/in:/in -v $PWD/out:/out -v /data:/data demo/semantic_seg:tmi bash + +bash /usr/bin/start.sh +``` + +- 假设用户开发镜像的代码存放在 **/home/modelai/code**, 为方便测试, 可以将 **/home/modelai/code** 也挂载到镜像中进行测试。 + + - 假设实际启动程序为 **start.py** + +``` +docker run -it --rm --gpus all --ipc host -v $PWD/in:/in -v $PWD/out:/out -v /data:/data -v /home/modelai/code:/home/modelai/code demo/semantic_seg:tmi bash + +cd /home/modelai/code +python start.py +``` + +### 测试通过后 + +- 通过 `docker build` 重新构建镜像, 如果修改了超参数,需要在Ymir平台删除旧镜像并重新添加,使更新的超参数生效。如果仅仅修改了代码,不需要重新添加即可使用本地的最新镜像。 + +## 通过 ymir-executor-verifier 进行测试 + +[ymir-executor-verifier](https://github.com/modelai/ymir-executor-verifier) 面向企业用户,目的是对大量镜像进行自动化测试,以保障镜像的质量。 diff --git a/docs/ymir-dataset-zh-CN.md b/docs/ymir-dataset-zh-CN.md index 1a61e88..e412e1f 100644 --- a/docs/ymir-dataset-zh-CN.md +++ b/docs/ymir-dataset-zh-CN.md @@ -117,11 +117,11 @@ task_id: t0000001000002ebb7f11653630774 # 任务id ## ymir输入镜像的标注格式 -常见的目标检测标注格式有 `voc` 与 `coco`, ymir 除自身格式, 目前还支持`voc`格式,可在超参数页面通过设置`export_format`对ymir导入镜像的数据格式进行修改,其中检测格式 ["det-ark", "det-voc", "det-ls-json"]也可简写为 ["ark", "voc", "ls-json"], 从而兼容ymir1.1.0 。 +常见的目标检测标注格式有 `voc` 与 `coco`, ymir 除自身格式, 目前还支持`voc`格式,可在超参数页面通过设置`export_format`对ymir导入镜像的数据格式进行修改,其中检测格式 ["det-ark", "det-voc"]也可简写为 ["ark", "voc"], 从而兼容ymir1.1.0 。 ``` image format: ['raw', 'lmdb'] -annotation format: ["none", "det-ark", "det-voc", "det-ls-json", "seg-poly", "seg-mask"] +annotation format: ["ark", "voc", "det-ark", "det-voc", "seg-coco"] ``` ### 默认数据格式 @@ -130,7 +130,7 @@ annotation format: ["none", "det-ark", "det-voc", "det-ls-json", "seg-poly", "se - ymir2.0.0+ 默认检测数据导出格式 `export_format=det-voc:raw`, 标注文件为`xxx.xml` -- ymir2.0.0+ 默认分割数据导出格式 `export_format=seg-mask:raw`, 标注文件为`xxx.png` +- ymir2.0.0+ 默认分割数据导出格式 `export_format=seg-coco:raw`, 标注文件为`xxx.json` ### voc 数据格式 @@ -138,6 +138,4 @@ annotation format: ["none", "det-ark", "det-voc", "det-ls-json", "seg-poly", "se - `export_format=ark:raw` 或 `export_format=det-ark:raw`, 标注文件为`xxx.txt` -- `export_format=seg-mask:raw`, 标注文件为`xxx.png`, 包含`labelmap.txt`, 需要通过`labelmap.txt`将标注图像`xxx.png`从(R, G, B) 映射到`label_id`进行训练。 - -- `export_format=seg-poly:raw`, 多边形标注文件,待定中 +- `export_format=seg-coco:raw`, 标注文件为`xxx.json` diff --git a/mkdocs.yml b/mkdocs.yml index f2f223e..e7765c9 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -9,10 +9,15 @@ plugins: # See: https://mkdocstrings.github.io/python/usage/ python: options: - docstring_style: sphinx + docstring_style: numpy + watch: + - seg-semantic-demo-tmi.app.start markdown_extensions: - markdown_include.include: base_path: . - admonition + - toc: + permalink: "#" +# - sane_lists # nav: # - Home: index.md diff --git a/seg-semantic-demo-tmi/Dockerfile b/seg-semantic-demo-tmi/Dockerfile index e69de29..7481013 100644 --- a/seg-semantic-demo-tmi/Dockerfile +++ b/seg-semantic-demo-tmi/Dockerfile @@ -0,0 +1,42 @@ +# a docker file for an sample training / mining / infer executor + +# FROM ubuntu:20.04 +FROM python:3.8.16 + +ENV LANG=C.UTF-8 + +# Change mirror +RUN sed -i 's#http://archive.ubuntu.com#http://mirrors.ustc.edu.cn#g' /etc/apt/sources.list \ + && sed -i 's#http://security.ubuntu.com#http://mirrors.ustc.edu.cn#g' /etc/apt/sources.list + +# Set timezone +RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \ + && echo 'Asia/Shanghai' >/etc/timezone + +# Install linux package +RUN apt-get update && apt-get install -y gnupg2 git libglib2.0-0 \ + libgl1-mesa-glx libsm6 libxext6 libxrender-dev \ + build-essential ninja-build \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +COPY requirements.txt ./ +RUN pip3 install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple + +WORKDIR /app +# copy user code to WORKDIR +COPY ./app/*.py /app/ + +# copy user config template and manifest.yaml to /img-man +RUN mkdir -p /img-man +COPY img-man/*.yaml /img-man/ + +# view https://github.com/protocolbuffers/protobuf/issues/10051 for detail +ENV PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python + +# entry point for your app +# the whole docker image will be started with `nvidia-docker run ` +# and this command will run automatically + +RUN echo "python /app/start.py" > /usr/bin/start.sh +CMD bash /usr/bin/start.sh diff --git a/seg-semantic-demo-tmi/app/pycococreatortools.py b/seg-semantic-demo-tmi/app/pycococreatortools.py new file mode 100644 index 0000000..edf777b --- /dev/null +++ b/seg-semantic-demo-tmi/app/pycococreatortools.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python3 +""" +from https://github.com/waspinator/pycococreator/blob/0.2.1/pycococreatortools/pycococreatortools.py +""" +import datetime +from itertools import groupby + +import numpy as np +from PIL import Image +from pycocotools import mask + + +def resize_binary_mask(array, new_size): + image = Image.fromarray(array.astype(np.uint8) * 255) + image = image.resize(new_size) + return np.asarray(image).astype(np.bool_) + + +def close_contour(contour): + if not np.array_equal(contour[0], contour[-1]): + contour = np.vstack((contour, contour[0])) + return contour + + +def binary_mask_to_rle(binary_mask, compress=True): + """ + if compress: + return {'counts': b'', 'size': list(binary_mask.shape)} + else: + return {'counts': [0, 56541, 7, 338, ...], 'size': list(binary_mask.shape)} + """ + if compress: + rle = mask.encode(np.asfortranarray(binary_mask.astype(np.uint8))) + rle['counts'] = rle['counts'].decode('utf-8') + return rle + + rle = {'counts': [], 'size': list(binary_mask.shape)} + counts = rle.get('counts') + for i, (value, elements) in enumerate(groupby(binary_mask.ravel(order='F'))): + if i == 0 and value == 1: + counts.append(0) + counts.append(len(list(elements))) + + return rle + + +def binary_mask_to_polygon(binary_mask, tolerance=0): + """Converts a binary mask to COCO polygon representation + + Args: + binary_mask: a 2D binary numpy array where '1's represent the object + tolerance: Maximum distance from original points of polygon to approximated + polygonal chain. If tolerance is 0, the original coordinate array is returned. + + """ + from skimage import measure + + polygons = [] + # pad mask to close contours of shapes which start and end at an edge + padded_binary_mask = np.pad(binary_mask, pad_width=1, mode='constant', constant_values=0) + contours = measure.find_contours(padded_binary_mask, 0.5) + contours = np.subtract(contours, 1) + for contour in contours: + contour = close_contour(contour) + contour = measure.approximate_polygon(contour, tolerance) + if len(contour) < 3: + continue + contour = np.flip(contour, axis=1) + segmentation = contour.ravel().tolist() + # after padding and subtracting 1 we may get -0.5 points in our segmentation + segmentation = [0 if i < 0 else i for i in segmentation] + polygons.append(segmentation) + + return polygons + + +def create_image_info(image_id, + file_name, + image_size, + date_captured=datetime.datetime.utcnow().isoformat(' '), + license_id=1, + coco_url="", + flickr_url=""): + + image_info = { + "id": image_id, + "file_name": file_name, + "width": image_size[0], + "height": image_size[1], + "date_captured": date_captured, + "license": license_id, + "coco_url": coco_url, + "flickr_url": flickr_url + } + + return image_info + + +def create_annotation_info(annotation_id, + image_id, + category_info, + binary_mask, + image_size=None, + tolerance=2, + bounding_box=None): + + if image_size is not None: + binary_mask = resize_binary_mask(binary_mask, image_size) + + binary_mask_encoded = mask.encode(np.asfortranarray(binary_mask.astype(np.uint8))) + + area = mask.area(binary_mask_encoded) + if area < 1: + return None + + if bounding_box is None: + bounding_box = mask.toBbox(binary_mask_encoded) + + if category_info["is_crowd"]: + is_crowd = 1 + # segmentation = binary_mask_to_rle(binary_mask) + segmentation = binary_mask_encoded + # avoid TypeError: Object of type bytes is not JSON serializable + segmentation['counts'] = segmentation['counts'].decode('utf-8') + else: + is_crowd = 0 + segmentation = binary_mask_to_polygon(binary_mask, tolerance) + if not segmentation: + return None + + annotation_info = { + "id": annotation_id, + "image_id": image_id, + "category_id": category_info["id"], + "iscrowd": is_crowd, + "area": area.tolist(), + "bbox": bounding_box.tolist(), + "segmentation": segmentation, + "width": binary_mask.shape[1], + "height": binary_mask.shape[0], + } + + return annotation_info diff --git a/seg-semantic-demo-tmi/app/result_to_coco.py b/seg-semantic-demo-tmi/app/result_to_coco.py new file mode 100644 index 0000000..5346320 --- /dev/null +++ b/seg-semantic-demo-tmi/app/result_to_coco.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python3 + +import datetime +import os.path as osp +from typing import Dict, List + +import imagesize +import numpy as np +from easydict import EasyDict as edict +from tqdm import tqdm + +import pycococreatortools + +INFO = { + "description": "Example Dataset", + "url": "https://github.com/waspinator/pycococreator", + "version": "0.1.0", + "year": 2022, + "contributor": "ymir", + "date_created": datetime.datetime.utcnow().isoformat(' ') +} + +LICENSES = [{ + "id": 1, + "name": "Attribution-NonCommercial-ShareAlike License", + "url": "http://creativecommons.org/licenses/by-nc-sa/2.0/" +}] + +CATEGORIES = [ + { + 'id': 1, + 'name': 'square', + 'supercategory': 'shape', + }, + { + 'id': 2, + 'name': 'circle', + 'supercategory': 'shape', + }, + { + 'id': 3, + 'name': 'triangle', + 'supercategory': 'shape', + }, +] + + +def convert(ymir_cfg: edict, results: List[Dict], with_blank_area: bool): + """ + convert ymir infer result to coco instance segmentation format + the mask is encode in compressed rle + the is_crowd is True + """ + class_names = ymir_cfg.param.class_names + + categories = [] + # categories should start from 0 + for idx, name in enumerate(class_names): + categories.append(dict(id=idx, name=name, supercategory='none')) + + coco_output = {"info": INFO, "licenses": LICENSES, "categories": categories, "images": [], "annotations": []} + + image_id = 1 + annotation_id = 1 + + for idx, d in enumerate(tqdm(results, desc='convert result to coco')): + image_f = d['image'] + result = d['result'] + + width, height = imagesize.get(image_f) + + image_info = pycococreatortools.create_image_info(image_id=image_id, + file_name=osp.basename(image_f), + image_size=(width, height)) + + coco_output["images"].append(image_info) # type: ignore + + # category_id === class_id start from 0 + unique_ids = np.unique(result) + for np_class_id in unique_ids: + if with_blank_area: + class_id = int(np_class_id) - 1 + else: + class_id = int(np_class_id) + + # remove background class in infer-result + if with_blank_area and class_id < 0: + continue + + assert class_id < len(class_names), f'class_id {class_id} must < class_num {len(class_names)}' + category_info = {'id': class_id, 'is_crowd': True} + binary_mask = result == np_class_id + annotation_info = pycococreatortools.create_annotation_info(annotation_id, + image_id, + category_info, + binary_mask, + tolerance=2) + + if annotation_info is not None: + coco_output["annotations"].append(annotation_info) # type: ignore + annotation_id = annotation_id + 1 + + image_id += 1 + + return coco_output diff --git a/seg-semantic-demo-tmi/app/start.py b/seg-semantic-demo-tmi/app/start.py index dd51caa..a3c2b78 100644 --- a/seg-semantic-demo-tmi/app/start.py +++ b/seg-semantic-demo-tmi/app/start.py @@ -5,14 +5,16 @@ import time from typing import List -# view https://github.com/protocolbuffers/protobuf/issues/10051 for detail -os.environ.setdefault('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', 'python') +import cv2 +import numpy as np from easydict import EasyDict as edict from tensorboardX import SummaryWriter from ymir_exc import monitor from ymir_exc import result_writer as rw from ymir_exc.util import get_merged_config +from result_to_coco import convert + def start() -> int: cfg = get_merged_config() @@ -28,19 +30,20 @@ def start() -> int: def _run_training(cfg: edict) -> None: - """ - sample function of training, which shows: - 1. how to get config file - 2. how to read training and validation datasets - 3. how to write logs - 4. how to write training result + """sample function of training + + which shows: + - how to get config file + - how to read training and validation datasets + - how to write logs + - how to write training result """ # use `env.get_executor_config` to get config file for training - gpu_id: str = cfg.param.get(key='gpu_id') - class_names: List[str] = cfg.param.get(key='class_names') - expected_miou: float = cfg.param.get(key='expected_miou', default=0.6) - idle_seconds: float = cfg.param.get(key='idle_seconds', default=60) - trigger_crash: bool = cfg.param.get(key='trigger_crash', default=False) + gpu_id: str = cfg.param.get('gpu_id') + class_names: List[str] = cfg.param.get('class_names') + expected_miou: float = cfg.param.get('expected_miou', 0.6) + idle_seconds: float = cfg.param.get('idle_seconds', 60) + trigger_crash: bool = cfg.param.get('trigger_crash', False) # use `logging` or `print` to write log to console # notice that logging.basicConfig is invoked at executor.env logging.info(f'gpu device: {gpu_id}') @@ -110,8 +113,8 @@ def _run_training(cfg: edict) -> None: def _run_mining(cfg: edict) -> None: # use `cfg.param` to get config file for training # pretrained models in `cfg.ymir.input.models_dir` - gpu_id: str = cfg.param.get(key='gpu_id') - class_names: List[str] = cfg.param.get(key='class_names') + gpu_id: str = cfg.param.get('gpu_id') + class_names: List[str] = cfg.param.get('class_names') idle_seconds: float = cfg.param.get('idle_seconds', 60) trigger_crash: bool = cfg.param.get('trigger_crash', False) # use `logging` or `print` to write log to console @@ -141,7 +144,12 @@ def _run_mining(cfg: edict) -> None: # write mining result # here we give a fake score to each assets total_length = len(valid_images) - mining_result = [(asset_path, index / total_length) for index, asset_path in enumerate(valid_images)] + mining_result = [] + for index, asset_path in enumerate(valid_images): + mining_result.append((asset_path, index / total_length)) + time.sleep(0.1) + monitor.write_monitor_logger(percent=0.2 + 0.8 * index / valid_image_count) + rw.write_mining_result(mining_result=mining_result) # if task done, write 100% percent log @@ -181,21 +189,25 @@ def _run_infer(cfg: edict) -> None: _dummy_work(idle_seconds=idle_seconds, trigger_crash=trigger_crash) # write infer result - fake_anns = [] random.seed(seed) - for class_name in class_names: - x = random.randint(0, 100) - y = random.randint(0, 100) - w = random.randint(50, 100) - h = random.randint(50, 100) - ann = rw.Annotation(class_name=class_name, score=random.random(), box=rw.Box(x=x, y=y, w=w, h=h)) - - fake_anns.append(ann) - - infer_result = {asset_path: fake_anns for asset_path in valid_images} - for asset_path in invalid_images: - infer_result[asset_path] = [] - rw.write_infer_result(infer_result=infer_result) + results = [] + + fake_mask_num = min(len(class_names), 10) + for iter, img_file in enumerate(valid_images): + img = cv2.imread(img_file, cv2.IMREAD_GRAYSCALE) + mask = np.zeros(shape=img.shape[0:2], dtype=np.uint8) + for idx in range(fake_mask_num): + percent = 100 * idx / fake_mask_num + value = np.percentile(img, percent) + mask[img > value] = idx + 1 + + results.append(dict(image=img_file, result=mask)) + + # real-time monitor + monitor.write_monitor_logger(percent=0.2 + 0.8 * iter / valid_image_count) + + coco_results = convert(cfg, results, True) + rw.write_infer_result(infer_result=coco_results, algorithm='segmentation') # if task done, write 100% percent log logging.info('infer done') diff --git a/seg-semantic-demo-tmi/fast.Dockerfile b/seg-semantic-demo-tmi/fast.Dockerfile new file mode 100644 index 0000000..4f0ab11 --- /dev/null +++ b/seg-semantic-demo-tmi/fast.Dockerfile @@ -0,0 +1,19 @@ +FROM youdaoyzbx/ymir-executor:ymir2.0.2-seg-semantic-demo-base + +WORKDIR /app +# copy user code to WORKDIR +COPY ./app/*.py /app/ + +# copy user config template and manifest.yaml to /img-man +RUN mkdir -p /img-man +COPY img-man/*.yaml /img-man/ + +# view https://github.com/protocolbuffers/protobuf/issues/10051 for detail +ENV PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python + +# entry point for your app +# the whole docker image will be started with `nvidia-docker run ` +# and this command will run automatically + +RUN echo "python /app/start.py" > /usr/bin/start.sh +CMD bash /usr/bin/start.sh diff --git a/seg-semantic-demo-tmi/requirements.txt b/seg-semantic-demo-tmi/requirements.txt index 20103d3..708647b 100644 --- a/seg-semantic-demo-tmi/requirements.txt +++ b/seg-semantic-demo-tmi/requirements.txt @@ -1,4 +1,11 @@ +pycocotools pydantic>=1.8.2 pyyaml>=5.4.1 tensorboardX>=2.4 -#ymir_exc@git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.3.0 +numpy +opencv-python>=4.0 +pillow +imagesize +tqdm +easydict +ymir_exc@git+https://github.com/modelai/ymir-executor-sdk.git@ymir2.1.0 From b614e8b7f76ab6f12e42ab4f2401f89e75e7d0cb Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 13 Jan 2023 16:54:57 +0800 Subject: [PATCH 185/204] add readthedocs --- README.md | 8 +------- README_zh-CN.md | 8 +------- 2 files changed, 2 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 1155c50..994159b 100644 --- a/README.md +++ b/README.md @@ -6,13 +6,7 @@ - [Image Community](http://pubimg.vesionbook.com:8110/img) search and share open source. -- [ymir executor](./docs/official-docker-image.md) - - - [develop tutorial](./docs/README.MD) from zero to one, build you ymir-executor - - - [user survey](https://www.wjx.cn/vm/eKFm2aq.aspx#) help ymir team to write document and develop, user feedback are collected on a continuous and long-term basis. - -- [ymir mining algorithm](./docs/mining-images-overview.md) +- [ymir executor Documence](https://ymir-executor-fork.readthedocs.io/zh/latest/#) ## overview diff --git a/README_zh-CN.md b/README_zh-CN.md index 12ba47e..35f1316 100644 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -6,13 +6,7 @@ - [镜像社区](http://pubimg.vesionbook.com:8110/img) 可搜索到所有公开的ymir算法镜像, 同时可共享其他人发布的镜像。 -- [ymir镜像](./docs/official-docker-image.md) - - - [镜像开发者:制作教程](./docs/README.MD) 从零到一,搭建自己的 ymir-executor - - - [用户调查](https://www.wjx.cn/vm/eKFm2aq.aspx#) 帮助ymir团队开发文档与新功能,此调查长期有效, 定期收集 - -- [ymir 挖掘算法](./docs/mining-images-overview.md) +- [ymir镜像文档](https://ymir-executor-fork.readthedocs.io/zh/latest/#) ## 比较 From 2d2645872f305656953eef457f953d92a896c4c5 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Sat, 28 Jan 2023 11:23:27 +0800 Subject: [PATCH 186/204] add simple det doc --- det-demo-tmi/app/start.py | 8 +- det-demo-tmi/img-man/manifest.yaml | 2 +- det-demo-tmi/img-man/training-template.yaml | 2 +- docs/object_detection/simple_det_infer.md | 85 ++++++++++++++++ docs/object_detection/simple_det_mining.md | 85 ++++++++++++++++ docs/object_detection/simple_det_training.md | 98 +++++++++++++++++++ .../simple_semantic_seg_infer.md | 16 +-- .../simple_semantic_seg_mining.md | 16 +-- .../simple_semantic_seg_training.md | 24 ++--- seg-semantic-demo-tmi/img-man/manifest.yaml | 2 +- 10 files changed, 307 insertions(+), 31 deletions(-) diff --git a/det-demo-tmi/app/start.py b/det-demo-tmi/app/start.py index d961551..b9d17a7 100644 --- a/det-demo-tmi/app/start.py +++ b/det-demo-tmi/app/start.py @@ -81,7 +81,9 @@ def _run_training(cfg: edict) -> None: with open(os.path.join(models_dir, 'config.py'), 'w') as f: f.write('fake model config file') # use `rw.write_model_stage` to save training result - rw.write_model_stage(stage_name='epoch10', files=['epoch10.pt', 'config.py'], mAP=random.random() / 2) + rw.write_model_stage(stage_name='epoch10', + files=['epoch10.pt', 'config.py'], + evaluation_result=dict(mAP=random.random() / 2)) _dummy_work(idle_seconds=idle_seconds, trigger_crash=trigger_crash) @@ -91,7 +93,9 @@ def _run_training(cfg: edict) -> None: f.write('fake model weight') with open(os.path.join(models_dir, 'config.py'), 'w') as f: f.write('fake model config file') - rw.write_model_stage(stage_name='epoch20', files=['epoch20.pt', 'config.py'], mAP=expected_mAP) + rw.write_model_stage(stage_name='epoch20', + files=['epoch20.pt', 'config.py'], + evaluation_result=dict(mAP=expected_mAP)) # if task done, write 100% percent log logging.info('training done') diff --git a/det-demo-tmi/img-man/manifest.yaml b/det-demo-tmi/img-man/manifest.yaml index 73c21d2..3353f64 100644 --- a/det-demo-tmi/img-man/manifest.yaml +++ b/det-demo-tmi/img-man/manifest.yaml @@ -1,2 +1,2 @@ -# object_type: 2 if this docker image is training, mining or infer for detection, 3 for semantic segmentation, default: 2 +# object_type: 2 for object detection, 3 for semantic segmentation, default: 2 "object_type": 2 diff --git a/det-demo-tmi/img-man/training-template.yaml b/det-demo-tmi/img-man/training-template.yaml index f72c2b5..ac88de3 100644 --- a/det-demo-tmi/img-man/training-template.yaml +++ b/det-demo-tmi/img-man/training-template.yaml @@ -6,7 +6,7 @@ # task_id: 'default-training-task' # pretrained_model_params: [] # class_names: [] -export_format: 'det-voc:raw' +export_format: 'det-ark:raw' # just for test, remove this key in your own docker image expected_map: 0.983 # expected map for training task diff --git a/docs/object_detection/simple_det_infer.md b/docs/object_detection/simple_det_infer.md index e69de29..eff27c3 100644 --- a/docs/object_detection/simple_det_infer.md +++ b/docs/object_detection/simple_det_infer.md @@ -0,0 +1,85 @@ +# 制作一个简单的目标检测推理镜像 + +参考[ymir镜像制作简介](../overview/ymir-executor.md) + +## 工作目录 +``` +cd det-demo-tmi +``` + +## 提供超参数模型文件 + +镜像中包含**/img-man/infer-template.yaml** 表示镜像支持推理 + +- [img-man/infer-template.yaml](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/det-demo-tmi/img-man/infer-template.yaml) + +```yaml +{!det-demo-tmi/img-man/infer-template.yaml!} +``` + +- [Dockerfile](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/det-demo-tmi/Dockerfile) + +``` +RUN mkdir -p /img-man # 在镜像中生成/img-man目录 +COPY img-man/*.yaml /img-man/ # 将主机中img-man目录下的所有yaml文件复制到镜像/img-man目录 +``` + +## 提供镜像说明文件 + +**object_type** 为 2 表示镜像支持目标检测 + +- [img-man/manifest.yaml](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/det-demo-tmi/img-man/manifest.yaml) +``` +# 2 for object detection +"object_type": 2 +``` + +- Dockerfile +`COPY img-man/*.yaml /img-man/` 在复制mining-template.yaml的同时,会将manifest.yaml复制到镜像中的**/img-man**目录 + +## 提供默认启动脚本 + +- Dockerfile +``` +RUN echo "python /app/start.py" > /usr/bin/start.sh # 生成启动脚本 /usr/bin/start.sh +CMD bash /usr/bin/start.sh # 将镜像的默认启动脚本设置为 /usr/bin/start.sh +``` + +## 实现基本功能 + +- [app/start.py](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/det-demo-tmi/app/start.py) + +::: det-demo-tmi.app.start._run_mining + handler: python + options: + show_root_heading: false + show_source: true + + +## 写进度 + +``` +# use `monitor.write_monitor_logger` to write log to console and write task process percent to monitor.txt +logging.info(f"assets count: {len(lines)}, valid: {valid_image_count}") +monitor.write_monitor_logger(percent=0.2) + +# if task done, write 100% percent log +logging.info('infer done') +monitor.write_monitor_logger(percent=1.0) +``` + +## 写结果文件 + +``` +rw.write_infer_result(infer_result=coco_results, algorithm='detection') +``` + +## 制作镜像 demo/det:infer + +```dockerfile +{!det-demo-tmi/Dockerfile!} +``` + +``` +docker build -t demo/det:infer -f Dockerfile . +``` diff --git a/docs/object_detection/simple_det_mining.md b/docs/object_detection/simple_det_mining.md index e69de29..100d1c4 100644 --- a/docs/object_detection/simple_det_mining.md +++ b/docs/object_detection/simple_det_mining.md @@ -0,0 +1,85 @@ +# 制作一个简单的目标检测挖掘镜像 + +参考[ymir镜像制作简介](../overview/ymir-executor.md) + +## 工作目录 + +``` +cd det-demo-tmi +``` + +## 提供超参数模型文件 + +镜像中包含**/img-man/mining-template.yaml** 表示镜像支持挖掘 + +- [img-man/mining-template.yaml](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/det-demo-tmi/img-man/mining-template.yaml) + +```yaml +{!det-demo-tmi/img-man/mining-template.yaml!} +``` + +- [Dockerfile](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/det-demo-tmi/Dockerfile) + +``` +RUN mkdir -p /img-man # 在镜像中生成/img-man目录 +COPY img-man/*.yaml /img-man/ # 将主机中img-man目录下的所有yaml文件复制到镜像/img-man目录 +``` + +## 提供镜像说明文件 + +**object_type** 为 2 表示镜像支持目标检测 + +- [img-man/manifest.yaml](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/det-demo-tmi/img-man/manifest.yaml) +``` +# 2 for object detection +"object_type": 2 +``` + +- Dockerfile +`COPY img-man/*.yaml /img-man/` 在复制mining-template.yaml的同时,会将manifest.yaml复制到镜像中的**/img-man**目录 + +## 提供默认启动脚本 + +- Dockerfile +``` +RUN echo "python /app/start.py" > /usr/bin/start.sh # 生成启动脚本 /usr/bin/start.sh +CMD bash /usr/bin/start.sh # 将镜像的默认启动脚本设置为 /usr/bin/start.sh +``` + +## 实现基本功能 + +- [app/start.py](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/det-demo-tmi/app/start.py) + +::: det-demo-tmi.app.start._run_mining + handler: python + options: + show_root_heading: false + show_source: true + +## 写进度 + +``` +# use `monitor.write_monitor_logger` to write log to console and write task process percent to monitor.txt +logging.info(f"assets count: {len(lines)}, valid: {valid_image_count}") +monitor.write_monitor_logger(percent=0.2) + +# if task done, write 100% percent log +logging.info('mining done') +monitor.write_monitor_logger(percent=1.0) +``` + +## 写结果文件 + +``` +rw.write_mining_result(mining_result=mining_result) +``` + +## 制作镜像 demo/det:mining + +```dockerfile +{!det-demo-tmi/Dockerfile!} +``` + +``` +docker build -t demo/det:mining -f Dockerfile . +``` diff --git a/docs/object_detection/simple_det_training.md b/docs/object_detection/simple_det_training.md index e69de29..321c977 100644 --- a/docs/object_detection/simple_det_training.md +++ b/docs/object_detection/simple_det_training.md @@ -0,0 +1,98 @@ +# 制作一个简单的目标检测训练镜像 + +参考[ymir镜像制作简介](../overview/ymir-executor.md) + +## 工作目录 +``` +cd det-demo-tmi +``` + +## 提供超参数模型文件 + +镜像中包含**/img-man/training-template.yaml** 表示镜像支持训练 + +- [img-man/training-template.yaml](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/det-demo-tmi/img-man/training-template.yaml) + +指明数据格式 **export_format** 为 **det-ark:raw**, 即目标检测标注格式,详情参考[Ymir镜像数据集格式](../overview/dataset-format.md) + +```yaml +{!det-demo-tmi/img-man/training-template.yaml!} +``` + +- [Dockerfile](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/det-demo-tmi/Dockerfile) + +``` +RUN mkdir -p /img-man # 在镜像中生成/img-man目录 +COPY img-man/*.yaml /img-man/ # 将主机中img-man目录下的所有yaml文件复制到镜像/img-man目录 +``` + +## 提供镜像说明文件 + +**object_type** 为 2 表示镜像支持目标检测 + +- [img-man/manifest.yaml](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/det-demo-tmi/img-man/manifest.yaml) +``` +# 3 for object detection +"object_type": 2 +``` + +- Dockerfile +`COPY img-man/*.yaml /img-man/` 在复制training-template.yaml的同时,会将manifest.yaml复制到镜像中的**/img-man**目录 + +## 提供默认启动脚本 + +- Dockerfile +``` +RUN echo "python /app/start.py" > /usr/bin/start.sh # 生成启动脚本 /usr/bin/start.sh +CMD bash /usr/bin/start.sh # 将镜像的默认启动脚本设置为 /usr/bin/start.sh +``` + +## 实现基本功能 + +- [app/start.py](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/det-demo-tmi/app/start.py) + +::: det-demo-tmi.app.start._run_training + handler: python + options: + show_root_heading: false + show_source: true + +## 写进度 + +``` +if idx % monitor_gap == 0: + monitor.write_monitor_logger(percent=0.2 * idx / N) + +monitor.write_monitor_logger(percent=0.2) + +monitor.write_monitor_logger(percent=1.0) +``` + +## 写结果文件 + +``` +# use `rw.write_model_stage` to save training result +rw.write_model_stage(stage_name='epoch10', + files=['epoch10.pt', 'config.py'], + evaluation_result=dict(mAP=random.random() / 2)) + +rw.write_model_stage(stage_name='epoch20', + files=['epoch20.pt', 'config.py'], + evaluation_result=dict(mAP=expected_mAP)) +``` + +## 写tensorboard日志 + +``` +write_tensorboard_log(cfg.ymir.output.tensorboard_dir) +``` + +## 制作镜像 demo/det:training + +```dockerfile +{!det-demo-tmi/Dockerfile!} +``` + +``` +docker build -t demo/det:training -f Dockerfile . +``` diff --git a/docs/semantic_segmentation/simple_semantic_seg_infer.md b/docs/semantic_segmentation/simple_semantic_seg_infer.md index 9c41981..e5c7c63 100644 --- a/docs/semantic_segmentation/simple_semantic_seg_infer.md +++ b/docs/semantic_segmentation/simple_semantic_seg_infer.md @@ -10,17 +10,15 @@ cd seg-semantic-demo-tmi ## 提供超参数模型文件 -包含**/img-man/infer-template.yaml** 表示镜像支持推理 +镜像中包含**/img-man/infer-template.yaml** 表示镜像支持推理 -- [img-man/infer-template.yaml](seg-semantic-demo-tmi/img-man/infer-template.yaml) - -指明数据格式 **export_format** 为 **seg-coco:raw** +- [img-man/infer-template.yaml](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/seg-semantic-demo-tmi/img-man/infer-template.yaml) ```yaml {!seg-semantic-demo-tmi/img-man/infer-template.yaml!} ``` -- Dockerfile +- [Dockerfile](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/seg-semantic-demo-tmi/Dockerfile) ``` RUN mkdir -p /img-man # 在镜像中生成/img-man目录 @@ -31,7 +29,7 @@ COPY img-man/*.yaml /img-man/ # 将主机中img-man目录下的所有yaml文件 **object_type** 为 3 表示镜像支持语义分割 -- [img-man/manifest.yaml](../../seg-semantic-demo-tmi/img-man/manifest.yaml) +- [img-man/manifest.yaml](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/seg-semantic-demo-tmi/img-man/manifest.yaml) ``` # 3 for semantic segmentation "object_type": 3 @@ -50,7 +48,7 @@ CMD bash /usr/bin/start.sh # 将镜像的默认启动脚本设置为 /usr/bin/s ## 实现基本功能 -- [app/start.py](../../seg-semantic-demo-tmi/app/start.py) +- [app/start.py](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/seg-semantic-demo-tmi/app/start.py) ::: seg-semantic-demo-tmi.app.start._run_infer handler: python @@ -81,6 +79,10 @@ rw.write_infer_result(infer_result=coco_results, algorithm='segmentation') ## 制作镜像 demo/semantic_seg:infer +```dockerfile +{!seg-semantic-demo-tmi/Dockerfile!} +``` + ``` docker build -t demo/semantic_seg:infer -f Dockerfile . ``` diff --git a/docs/semantic_segmentation/simple_semantic_seg_mining.md b/docs/semantic_segmentation/simple_semantic_seg_mining.md index bee765a..cc4e9e4 100644 --- a/docs/semantic_segmentation/simple_semantic_seg_mining.md +++ b/docs/semantic_segmentation/simple_semantic_seg_mining.md @@ -10,17 +10,15 @@ cd seg-semantic-demo-tmi ## 提供超参数模型文件 -包含**/img-man/mining-template.yaml** 表示镜像支持挖掘 +镜像中包含**/img-man/mining-template.yaml** 表示镜像支持挖掘 -- [img-man/mining-template.yaml](seg-semantic-demo-tmi/img-man/mining-template.yaml) - -指明数据格式 **export_format** 为 **seg-coco:raw** +- [img-man/mining-template.yaml](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/seg-semantic-demo-tmi/img-man/mining-template.yaml) ```yaml {!seg-semantic-demo-tmi/img-man/mining-template.yaml!} ``` -- Dockerfile +- [Dockerfile](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/seg-semantic-demo-tmi/Dockerfile) ``` RUN mkdir -p /img-man # 在镜像中生成/img-man目录 @@ -31,7 +29,7 @@ COPY img-man/*.yaml /img-man/ # 将主机中img-man目录下的所有yaml文件 **object_type** 为 3 表示镜像支持语义分割 -- [img-man/manifest.yaml](../../seg-semantic-demo-tmi/img-man/manifest.yaml) +- [img-man/manifest.yaml](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/seg-semantic-demo-tmi/img-man/manifest.yaml) ``` # 3 for semantic segmentation "object_type": 3 @@ -50,7 +48,7 @@ CMD bash /usr/bin/start.sh # 将镜像的默认启动脚本设置为 /usr/bin/s ## 实现基本功能 -- [app/start.py](../../seg-semantic-demo-tmi/app/start.py) +- [app/start.py](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/seg-semantic-demo-tmi/app/start.py) ::: seg-semantic-demo-tmi.app.start._run_mining handler: python @@ -81,6 +79,10 @@ rw.write_mining_result(mining_result=mining_result) ## 制作镜像 demo/semantic_seg:mining +```dockerfile +{!seg-semantic-demo-tmi/Dockerfile!} +``` + ``` docker build -t demo/semantic_seg:mining -f Dockerfile . ``` diff --git a/docs/semantic_segmentation/simple_semantic_seg_training.md b/docs/semantic_segmentation/simple_semantic_seg_training.md index 340d4c2..3dc917e 100644 --- a/docs/semantic_segmentation/simple_semantic_seg_training.md +++ b/docs/semantic_segmentation/simple_semantic_seg_training.md @@ -9,21 +9,17 @@ cd seg-semantic-demo-tmi ## 提供超参数模型文件 -包含**/img-man/training-template.yaml** 表示镜像支持训练 +镜像中包含**/img-man/training-template.yaml** 表示镜像支持训练 -- [img-man/training-template.yaml](seg-semantic-demo-tmi/img-man/training-template.yaml) +- [img-man/training-template.yaml](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/seg-semantic-demo-tmi/img-man/training-template.yaml) -指明数据格式 **export_format** 为 **seg-coco:raw**, 即语义/实例分割标注格式。 +指明数据格式 **export_format** 为 **seg-coco:raw**, 即语义/实例分割标注格式,详情参考[Ymir镜像数据集格式](../overview/dataset-format.md) +```yaml +{!seg-semantic-demo-tmi/img-man/training-template.yaml!} ``` -export_format: 'seg-coco:raw' -# just for test, remove this key in your own docker image -expected_miou: 0.983 # expected mIoU for training task -idle_seconds: 3 # idle seconds for each task -``` - -- Dockerfile +- [Dockerfile](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/seg-semantic-demo-tmi/Dockerfile) ``` RUN mkdir -p /img-man # 在镜像中生成/img-man目录 @@ -34,7 +30,7 @@ COPY img-man/*.yaml /img-man/ # 将主机中img-man目录下的所有yaml文件 **object_type** 为 3 表示镜像支持语义分割 -- [img-man/manifest.yaml](../../seg-semantic-demo-tmi/img-man/manifest.yaml) +- [img-man/manifest.yaml](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/seg-semantic-demo-tmi/img-man/manifest.yaml) ``` # 3 for semantic segmentation "object_type": 3 @@ -53,7 +49,7 @@ CMD bash /usr/bin/start.sh # 将镜像的默认启动脚本设置为 /usr/bin/s ## 实现基本功能 -- [app/start.py](../../seg-semantic-demo-tmi/app/start.py) +- [app/start.py](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/seg-semantic-demo-tmi/app/start.py) ::: seg-semantic-demo-tmi.app.start._run_training handler: python @@ -88,6 +84,10 @@ write_tensorboard_log(cfg.ymir.output.tensorboard_dir) ## 制作镜像 demo/semantic_seg:training +```dockerfile +{!seg-semantic-demo-tmi/Dockerfile!} +``` + ``` docker build -t demo/semantic_seg:training -f Dockerfile . ``` diff --git a/seg-semantic-demo-tmi/img-man/manifest.yaml b/seg-semantic-demo-tmi/img-man/manifest.yaml index 633c2b2..1aadd8e 100644 --- a/seg-semantic-demo-tmi/img-man/manifest.yaml +++ b/seg-semantic-demo-tmi/img-man/manifest.yaml @@ -1,2 +1,2 @@ -# object_type: 2 if this docker image is training, mining or infer for detection, 3 for semantic segmentation, default: 2 +# object_type: 2 for object detection, 3 for semantic segmentation, default: 2 "object_type": 3 From fa3d1b435da1c451e7b729c8a005e1ee354ed3c3 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Sat, 28 Jan 2023 15:00:55 +0800 Subject: [PATCH 187/204] reorder nav --- docs/README.MD | 36 ++--- docs/cn/README.MD | 7 - docs/docker-image-debug.md | 149 ------------------ docs/object_detection/test_det.md | 98 ++++++++++++ docs/official-docker-image.md | 4 +- docs/overview/dataset-format.md | 136 +++++++++++++++- docs/segmentation.md | 138 ---------------- .../test_semantic_seg.md | 19 ++- docs/ymir-dataset-format.md | 128 --------------- docs/ymir-dataset-zh-CN.md | 141 ----------------- docs/ymir-executor-version.md | 4 +- .../det-detectron2-tmi.md | 0 .../det-mmdet-tmi.md | 0 .../det-nanodet-tmi.md | 0 .../det-vidt-tmi.md | 0 .../det-yolov4-tmi.md | 0 .../det-yolov5-automl-tmi.md | 0 .../det-yolov5-tmi.md | 0 .../det-yolov7-tmi.md | 0 mkdocs.yml | 13 +- 20 files changed, 279 insertions(+), 594 deletions(-) delete mode 100644 docs/cn/README.MD delete mode 100644 docs/docker-image-debug.md create mode 100644 docs/object_detection/test_det.md delete mode 100644 docs/segmentation.md delete mode 100644 docs/ymir-dataset-format.md delete mode 100644 docs/ymir-dataset-zh-CN.md rename docs/{cn/docker_images => ymir_executor}/det-detectron2-tmi.md (100%) rename docs/{cn/docker_images => ymir_executor}/det-mmdet-tmi.md (100%) rename docs/{cn/docker_images => ymir_executor}/det-nanodet-tmi.md (100%) rename docs/{cn/docker_images => ymir_executor}/det-vidt-tmi.md (100%) rename docs/{cn/docker_images => ymir_executor}/det-yolov4-tmi.md (100%) rename docs/{cn/docker_images => ymir_executor}/det-yolov5-automl-tmi.md (100%) rename docs/{cn/docker_images => ymir_executor}/det-yolov5-tmi.md (100%) rename docs/{cn/docker_images => ymir_executor}/det-yolov7-tmi.md (100%) diff --git a/docs/README.MD b/docs/README.MD index 9c2c826..f75a7be 100644 --- a/docs/README.MD +++ b/docs/README.MD @@ -14,9 +14,9 @@ - [镜像社区](http://pubimg.vesionbook.com:8110/img) 下载、发布ymir镜像 -- [历史镜像](./official-docker-image.md) +- 💫[历史镜像](./official-docker-image.md) -- [挖掘算法评测](./mining-images-overview.md) +- 💫[挖掘算法评测](./mining-images-overview.md) ## Ymir镜像说明 @@ -26,21 +26,27 @@ - [Ymir镜像超参数](./overview/hyper-parameter.md) +- [Ymir镜像制作简介](./overview/ymir-executor.md) + ## 目标检测 -- [制作一个简单的检测训练镜像]() +- [制作一个简单的检测训练镜像](./object_detection/simple_det_training.md) + +- [制作一个简单的检测推理镜像](./object_detection/simple_det_infer.md) -- [制作一个简单的检测推理镜像]() +- [制作一个简单的检测挖掘镜像](./object_detection/simple_det_mining.md) -- [制作一个简单的检测挖掘镜像]() +- [测试Ymir目标检测镜像](./object_detection/test_det.md) ## 语义分割 -- [制作一个简单的语义分割训练镜像]() +- [制作一个简单的语义分割训练镜像](./semantic_segmentation/simple_semantic_seg_training.md) + +- [制作一个简单的语义分割推理镜像](./semantic_segmentation/simple_semantic_seg_infer.md) -- [制作一个简单的语义分割推理镜像]() +- [制作一个简单的语义分割挖掘镜像](./semantic_segmentation/simple_semantic_seg_mining.md) -- [制作一个简单的语义分割挖掘镜像]() +- [测试Ymir语义分割镜像](./semantic_segmentation/test_semantic_seg.md) ## 实例分割 @@ -50,12 +56,6 @@ - [制作一个简单的实例分割挖掘镜像]() -## 从零定制 - -- [制作一个简单的多功能检测镜像](../det-demo-tmi/README.md) - -- [了解ymir接口与数据结构](./ymir-dataset-zh-CN.md) - ## 基于已有镜像进行定制 - [增/删/改: 默认超参数](./hyper-parameter.md) @@ -64,13 +64,7 @@ - [yolov5示例](https://github.com/modelai/ymir-yolov5/pull/2/files) -## 镜像调试 - -- [交互式调试](./docker-image-debug.md) - -- [通过ymir-executor-verifier进行镜像校验](https://github.com/modelai/ymir-executor-verifier) - -## 生态环境 +## 💫 生态环境 - [ymir镜像开发SDK](https://github.com/modelai/ymir-executor-sdk) diff --git a/docs/cn/README.MD b/docs/cn/README.MD deleted file mode 100644 index 9283d78..0000000 --- a/docs/cn/README.MD +++ /dev/null @@ -1,7 +0,0 @@ -# 中文说明文档 - -此处存放中文文档 - -## 镜像社区 - -- [镜像社区](http://pubimg.vesionbook.com:8110/img) 可搜索到所有公开的ymir算法镜像, 同时可共享其他人发布的镜像。 diff --git a/docs/docker-image-debug.md b/docs/docker-image-debug.md deleted file mode 100644 index a8602f8..0000000 --- a/docs/docker-image-debug.md +++ /dev/null @@ -1,149 +0,0 @@ -# docker 镜像调试 - -假设所有本地代码放在 $HOME/code 下, 以 `youdaoyzbx/ymir-executor:ymir2.0.0-yolov5-cu111-tmi` 为例 - -## 基于 voc dog 数据集进行调试 - -1. 下载数据集并进行预处理, [参考脚本](https://github.com/modelai/ymir-executor-verifier/blob/main/start.sh) - -``` -set -e - -echo "download voc dog dataset" -wget https://github.com/modelai/ymir-executor-fork/releases/download/dataset/voc_dog_debug_sample.zip -O voc_dog_debug_sample.zip - -echo "unzip voc dog dataset" -unzip voc_dog_debug_sample.zip - -echo "generate candidate-index.tsv for mining and infer" -cd voc_dog/in && cat val-index.tsv | awk '{print $1}' > candidate-index.tsv - -echo "download env.yaml" -wget https://raw.githubusercontent.com/modelai/ymir-executor-verifier/main/tests/configs/env.yaml -O env.yaml - -echo "download demo yolov5 config.yaml" -wget https://raw.githubusercontent.com/modelai/ymir-executor-verifier/main/tests/configs/config.yaml -``` - -得到以下目录,注意根据训练镜像修改 `config.yaml` - -``` -voc_dog -├── in -│   ├── annotations [标注文件夹] -│   │   ├── train [ymir后台不会按照train/val进行文件夹划分,请按train-index.tsv获取训练集] -│   │   └── val [ymir后台不会按照train/val进行文件夹划分,请按val-index.tsv获取测试集] -│   ├── assets [图片文件夹] -│   │   ├── train [ymir后台不会按照train/val进行文件夹划分,请按train-index.tsv获取训练集] -│   │   └── val [ymir后台不会按照train/val进行文件夹划分,请按val-index.tsv获取训练集] -│   ├── candidate-index.tsv -│   ├── config.yaml [ymir后台提供的超参数文件] -│   ├── env.yaml [ymir后台提供的路径文件] -│   ├── models [ymir后台提供的预训练模型存放目录] -│   ├── train-index.tsv -│   └── val-index.tsv -└── out [12 entries exceeds filelimit, not opening dir] - -9 directories, 5 files -``` - -2. 交互式启动镜像并运行 `$HOME/code/start.py` 文件进行调试 - -``` -docker run -it --gpus all --shm-size 128G -v $PWD/voc_dog/in:/in -v $PWD/voc_dog/out:/out -v $HOME/code:/code youdaoyzbx/ymir-executor:ymir2.0.0-yolov5-cu111-tmi bash - -cd /code -python start.py -``` - -## 基于 ymir 发起的训练任务进行调试 - -假设某个训练任务失败了, 可通过tensorboard获取对应的 task_id 与 ymir-workspace. - -1. 进入项目的模型管理,跳转到失败任务的训练过程 -![](./failed_training_task.png) - -2. 获取失败任务的 `task_id: t0000001000002dbc2291666595529` 与 ymir后台工作目录 `ymir-workspace: /home/intellif/backup/ymir/ymir-workplace` -![](./failed_tensorboard_task_id.png) - -3. 进行失败任务的目录 -``` -YMIR_WORKDIR=/home/intellif/backup/ymir/ymir-workplace -TASK_ID=t0000001000002dbc2291666595529 -cd $YMIR_WORKDIR/sandbox/work_dir/TaskTypeTraining/$TASK_ID/sub_task/$TASK_ID -ls - -# 将输出 in out task_config.yaml -``` - -4. 其中 in 有以下目录结构: -``` -in -├── annotations -│   └── index.tsv -├── assets -> /home/intellif/backup/ymir/ymir-workplace/sandbox/0001/asset_cache -├── config.yaml -├── env.yaml -├── models -├── prediction -├── predictions -│   └── index.tsv -├── pred-test-index.tsv -├── pred-train-index.tsv -├── pred-val-index.tsv -├── test-index.tsv -├── train-index.tsv -└── val-index.tsv -``` - -5. 其中 out 有以下文件: - -``` -out -├── monitor.txt -├── tensorboard -> /home/intellif/backup/ymir/ymir-workplace/ymir-tensorboard-logs/0001/t0000001000002dbc2291666595529 -└── ymir-executor-out.log -``` - -6. 交互式启动镜像进行调试 - -注:其中`in/assets` 为软链接,为确保在镜像中该软链接有效,需要将 `ymir-workspace` 挂载到镜像中对应位置 - -``` -docker run -it --gpus all --shm-size 128G -v $PWD/in:/in -v $PWD/out:/out -v $YMIR_WORKDIR:$YMIR_WORKDIR -v $HOME/code:/code youdaoyzbx/ymir-executor:ymir2.0.0-yolov5-cu111-tmi bash -``` - -7. 推理与挖掘镜像调试同理,注意对应目录均为 `ymir-workplace/sandbox/work_dir/TaskTypeMining` - -## 调试完成后构建新镜像 - -- 准备 `zzz.dockerfile` - -``` -FROM youdaoyzbx/ymir-executor:ymir2.0.0-yolov5-cu111-tmi - -# 当$HOME/code目录下的代码文件复制到/app -WORKDIR /app -COPY . /app/ - -# 如果更新了超参数配置文件,复制到/img-man -# COPY ./img-man/*.yaml /img-man/ - -# 如果改变了入口函数, 对应修改 /usr/bin/start.sh的内容 -RUN echo "python3 /app/start.py" > /usr/bin/start.sh -CMD bash /usr/bin/start.sh -``` - -- 进行构建 - -``` -docker build -t youdaoyzbx/ymir-executor:ymir2.0.1-yolov5-cu111-tmi . -f zzz.dockerfile -``` - -## ymir后台错误查看 - -- 在`ymir-workplace/ymir-data/logs`下查看 - -``` -tail -f -n 200 ymir_controller.log -``` diff --git a/docs/object_detection/test_det.md b/docs/object_detection/test_det.md new file mode 100644 index 0000000..7a443f1 --- /dev/null +++ b/docs/object_detection/test_det.md @@ -0,0 +1,98 @@ +# 测试Ymir目标检测镜像 + +## 通过YMIR平台进行测试 + +用户可以直接通过Ymir平台发起目标检测的训练,推理及挖掘任务,对镜像进行测试。、 + +!!! 注意 + YMIR平台发起的任务在顺利结束时,会清理相应的目录,因此在测试时,请确保相应目录存在。 + +### 导入待测镜像 + +- 假设用户已经制作好镜像 **demo/det:tmi**, 它支持训练、推理及挖掘 + +- 假设用户具有管理员权限,按照[新增镜像](https://github.com/IndustryEssentials/ymir/wiki/%E6%93%8D%E4%BD%9C%E8%AF%B4%E6%98%8E#%E6%96%B0%E5%A2%9E%E9%95%9C%E5%83%8F) 将**demo/det:tmi** 添加到 **我的镜像** 中。 + +### 导入待测数据集 + +- 下载示例目标检测数据集 [ymir2.0.0_dog_train.zip](https://github.com/modelai/ymir-executor-fork/releases/download/dataset-ymir2.0.0/ymir2.0.0_dog_train.zip) [ymir2.0.0_dog_val.zip](https://github.com/modelai/ymir-executor-fork/releases/download/dataset-ymir2.0.0/ymir2.0.0_dog_val.zip) + +- 建立包含对应标签的项目, `训练类别` 中添加对应标签 `dog` + +- 按照[添加数据集](https://github.com/IndustryEssentials/ymir/wiki/%E6%93%8D%E4%BD%9C%E8%AF%B4%E6%98%8E#%E6%B7%BB%E5%8A%A0%E6%95%B0%E6%8D%AE%E9%9B%86)导入示例目标检测数据集 + +### 发起待测任务 + +发起待测的训练、推理或挖掘任务后,等待其结束或出错。 + +### 获取任务id + +登录服务器后台,进入YMIR部署的目录 `ymir-workplace` + +- 对于训练任务:`cd sandbox/work_dir/TaskTypeTraining` + +- 对于挖掘或推理任务: `cd sandbox/work_dir/TaskTypeMining` + +- 对于单张图片测试任务: `cd sandbox/work_dir/TaskTypeInfer` + +- 列举当前所有的任务,按任务时间找到对应任务id, 此处假设为最新的 **t00000020000023a473e1673591617** + +!!! 注意 + 对于训练任务, 可通过tensorboard链接获得对应任务id。 + +``` +> ls -lt . + +drwxr-xr-x 4 root root 45 Jan 13 14:33 t00000020000023a473e1673591617 +drwxr-xr-x 4 root root 45 Jan 13 14:19 t00000020000025d55ff1673590756 +drwxr-xr-x 4 root root 45 Jan 13 14:13 t00000020000028b0cce1673590425 +drwxr-xr-x 4 root root 45 Jan 10 14:09 t00000020000018429301673330944 +drwxr-xr-x 4 root root 45 Jan 9 18:21 t000000200000210e0811673259669 +drwxr-xr-x 4 root root 45 Jan 9 18:07 t00000020000029e02f61673258829 +``` + +### 通过 docker 进行交互式调试 + +- 进行任务id对应的工作目录 `cd t00000020000023a473e1673591617/sub_task/t00000020000023a473e1673591617` + +- 列举当前目录可以看到 `in` 和 `out` 目录 + +- 进行交互式调试 + + - 假设 `ymir-workplace` 存放在 **/data/ymir/ymir-workplace**, 需要将 `ymir-workplace` 目录也挂载到镜像中相同位置,以确保所有软链接均有效。 + + - 假设启动程序为 **/usr/bin/start.sh** + +``` +docker run -it --rm --gpus all --ipc host -v $PWD/in:/in -v $PWD/out:/out -v /data:/data demo/det:tmi bash + +bash /usr/bin/start.sh +``` + +- 假设用户开发镜像的代码存放在 **/home/modelai/code**, 为方便测试, 可以将 **/home/modelai/code** 也挂载到镜像中进行测试。 + + - 假设实际启动程序为 **start.py** + +``` +docker run -it --rm --gpus all --ipc host -v $PWD/in:/in -v $PWD/out:/out -v /data:/data -v /home/modelai/code:/home/modelai/code demo/det:tmi bash + +cd /home/modelai/code +python start.py +``` + +### 测试通过后 + +- 通过 `docker build` 重新构建镜像, 如果修改了超参数,需要在Ymir平台删除旧镜像并重新添加,使更新的超参数生效。如果仅仅修改了代码,不需要重新添加即可使用本地的最新镜像。 + + +## 💫 YMIR后台错误查看 + +- 如镜像正确运行,但输出格式不符合YMIR后台要求,或其他错误,可在 `ymir-workplace/ymir-data/logs` 下查看 + +``` +tail -f -n 200 ymir_controller.log +``` + +## 💫 通过 ymir-executor-verifier 进行测试 + +[ymir-executor-verifier](https://github.com/modelai/ymir-executor-verifier) 面向企业用户,目的是对大量镜像进行自动化测试,以保障镜像的质量。 diff --git a/docs/official-docker-image.md b/docs/official-docker-image.md index 22e3714..a737618 100644 --- a/docs/official-docker-image.md +++ b/docs/official-docker-image.md @@ -91,7 +91,7 @@ youdaoyzbx/ymir-executor:ymir1.3.0-mmdet-cu111-tmi - [yolov5](https://github.com/modelai/ymir-executor-fork#det-yolov5-tmi) - - [change log](./det-yolov5-tmi/README.md) + - [change log](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/det-yolov5-tmi/ymir/README.md) ``` docker pull youdaoyzbx/ymir-executor:ymir1.1.0-yolov5-cu111-tmi @@ -101,7 +101,7 @@ youdaoyzbx/ymir-executor:ymir1.3.0-mmdet-cu111-tmi - [mmdetection](https://github.com/modelai/ymir-executor-fork#det-mmdetection-tmi) - - [change log](./det-mmdetection-tmi/README.md) + - [change log](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/det-mmdetection-tmi/README.md) ``` docker pull youdaoyzbx/ymir-executor:ymir1.1.0-mmdet-cu111-tmi diff --git a/docs/overview/dataset-format.md b/docs/overview/dataset-format.md index 70833d5..7c0ee28 100644 --- a/docs/overview/dataset-format.md +++ b/docs/overview/dataset-format.md @@ -50,6 +50,8 @@ ymir平台导出的数据集格式,其中图片格式固定为 'raw', 而标 其中 `class_id, xmin, ymin, xmax, ymax` 均为整数,而标注质量`ann_quality`为浮点数,默认为-1.0, 标注框旋转角度`bbox_angle`为浮点数,单位为[RAD](https://baike.baidu.com/item/RAD/2262445) ``` 0, 242, 61, 424, 249, -1.0, 0.0 +1, 211, 147, 325, 255, -1.0, 0.0 +1, 122, 7, 372, 375, -1.0, 0.0 ``` @@ -65,11 +67,56 @@ ymir平台导出的数据集格式,其中图片格式固定为 'raw', 而标 /in/assets/56/56f3af57d381154d377ad92a99b53e4d12de6456.jpg /in/annotations/56/56f3af57d381154d377ad92a99b53e4d12de6456.xml ``` +- 示例xml文件 +``` + + VOC2012 + 2008_000026.jpg + + The VOC2008 Database + PASCAL VOC2008 + flickr + + + 500 + 375 + 3 + + 0 + + person + Frontal + 1 + 1 + + 122 + 7 + 372 + 375 + + 0 + + + dog + Unspecified + 0 + 1 + + 211 + 147 + 325 + 255 + + 0 + + +``` + ## seg-coco:raw 语义与实例分割的标注格式 -- export_format = seg-coco:raw 时的训练/验证集索引文件 +- `export_format = seg-coco:raw` 时的训练/验证集索引文件 !!! 注意 此时所有图像文件共享一个标注文件 @@ -81,3 +128,90 @@ ymir平台导出的数据集格式,其中图片格式固定为 'raw', 而标 /in/assets/95/e47ac9932cdf6fb08681f6b0007cbdeefdf49c95.jpg /in/annotations/coco-annotations.json /in/assets/56/56f3af57d381154d377ad92a99b53e4d12de6456.jpg /in/annotations/coco-annotations.json ``` + +- 示例json文件 + +标注mask采用 `rle` 编码。 + +```json +{ + "images": [ + { + "file_name": "fake1.jpg", + "height": 800, + "width": 800, + "id": 0 + }, + { + "file_name": "fake2.jpg", + "height": 800, + "width": 800, + "id": 1 + }, + { + "file_name": "fake3.jpg", + "height": 800, + "width": 800, + "id": 2 + } + ], + "annotations": [ + { + "bbox": [ + 0, + 0, + 20, + 20 + ], + "segmentation": {"counts": ''}, + "area": 400.00, + "score": 1.0, + "category_id": 1, + "id": 1, + "image_id": 0 + }, + { + "bbox": [ + 0, + 0, + 20, + 20 + ], + "segmentation": {"counts": ''}, + "area": 400.00, + "score": 1.0, + "category_id": 2, + "id": 2, + "image_id": 0 + }, + { + "bbox": [ + 0, + 0, + 20, + 20 + ], + "segmentation": {"counts": ''}, + "area": 400.00, + "score": 1.0, + "category_id": 1, + "id": 3, + "image_id": 1 + } + ], + "categories": [ + { + "id": 1, + "name": "bus", + "supercategory": "none" + }, + { + "id": 2, + "name": "car", + "supercategory": "none" + } + ], + "licenses": [], + "info": null +} +``` diff --git a/docs/segmentation.md b/docs/segmentation.md deleted file mode 100644 index c33df9f..0000000 --- a/docs/segmentation.md +++ /dev/null @@ -1,138 +0,0 @@ -# segmentation - -- update date: 2022/11/14 - -## semantic segmentation: 语义分割 - -### docker images: docker 镜像 - -- `youdaoyzbx/ymir-executor:ymir2.0.0-mmseg-cu111-tmi` - -### hyper-parameters: 超参数 - -- training: 训练 - - `export_format`: `seg-mask:raw` - -### convert dataset format: 转换数据集格式 - -``` -from ymir_exc.dataset_convert import convert_ymir_to_mmseg -from ymir_exc.util import get_merged_config - -ymir_cfg = get_merged_config() -new_ann_dict = convert_ymir_to_mmseg(ymir_cfg) -``` - -### read: 输入格式 - -``` -in -├── annotations [19 entries exceeds filelimit, not opening dir] -├── assets -> /xxx/ymir-workplace/sandbox/0001/asset_cache -├── config.yaml -├── env.yaml -├── idx-assets.tsv -├── idx-gt.tsv -├── idx-pred.tsv -├── models -├── predictions [18 entries exceeds filelimit, not opening dir] -├── pred-test-index.tsv -├── pred-train-index.tsv -├── pred-val-index.tsv -├── test-index.tsv -├── train-index.tsv -└── val-index.tsv -``` - -## in/annotations -``` -ls /in/annotations -08 15 19 32 35 3b 59 6a 72 77 85 a4 a6 cd d1 e0 e1 f0 labelmap.txt -``` - -## in/annotations/labelmap.txt - -- `class_name:R,G,B::`: - - class_name=bg, RGB=(0, 0, 0) - - class_name=fg, RGB=(1, 1, 1) - -- `cat in/annotations/labelmap.txt` -``` -bg:0,0,0:: -fg:1,1,1:: -``` - -## in/env.yaml -``` -input: [2/1804] - annotations_dir: /in/annotations - assets_dir: /in/assets - candidate_index_file: '' - config_file: /in/config.yaml - models_dir: /in/models - root_dir: /in - training_index_file: /in/train-index.tsv - val_index_file: /in/val-index.tsv -output: - executor_log_file: /out/ymir-executor-out.log - infer_result_file: /out/infer-result.json - mining_result_file: /out/result.tsv - models_dir: /out/models - monitor_file: /out/monitor.txt - root_dir: /out - tensorboard_dir: /out/tensorboard - training_result_file: /out/models/result.yaml -protocol_version: 1.1.0 -run_infer: false -run_mining: false -run_training: true -task_id: t00000010000059a17ce1668392602 -``` - -## in/train-index.tsv -``` -/in/assets/32/6371cbb7e0a2c356cb17e17ca467c7f892ccc232.png /in/annotations/32/6371cbb7e0a2c356cb17e17ca467c7f892ccc232.png -/in/assets/32/562cfd8c96bba98568673d59614d2578258f1e32.png /in/annotations/32/562cfd8c96bba98568673d59614d2578258f1e32.png -/in/assets/59/f72430463f59d0299c3258e01fc9ad2c5671b359.png /in/annotations/59/f72430463f59d0299c3258e01fc9ad2c5671b359.png -``` - -### write: 输出格式 -``` -out -├── models [17 entries exceeds filelimit, not opening dir] -├── monitor.txt -├── tensorboard -> /xxx/ymir-workplace/ymir-tensorboard-logs/0001/t00000010000059a17ce1668392602 -└── ymir-executor-out.log -``` - -- `ls /out/models` -``` -20221114_022352.log iter_1000.pth iter_1800.pth iter_600.pth ymir-info.yaml -20221114_022352.log.json iter_1200.pth iter_2000.pth iter_800.pth -best_mIoU_iter_1200.pth iter_1400.pth iter_200.pth latest.pth -fast_scnn_lr0.12_8x4_160k_cityscapes.py iter_1600.pth iter_400.pth result.yaml -``` - -- `cat /out/models/result.yaml` -``` -best_stage_name: best -map: 0.632 -model_stages: - best: - files: - - fast_scnn_lr0.12_8x4_160k_cityscapes.py - - best_mIoU_iter_1200.pth - mAP: 0.632 - stage_name: best - timestamp: 1668393850 - last: - files: - - fast_scnn_lr0.12_8x4_160k_cityscapes.py - - latest.pth - mAP: 0.5421 - stage_name: last - timestamp: 1668393874 -``` - -## instance segmentation: 实例分割 -todo: 开发中 diff --git a/docs/semantic_segmentation/test_semantic_seg.md b/docs/semantic_segmentation/test_semantic_seg.md index 584f064..aca212b 100644 --- a/docs/semantic_segmentation/test_semantic_seg.md +++ b/docs/semantic_segmentation/test_semantic_seg.md @@ -4,6 +4,9 @@ 用户可以直接通过Ymir平台发起语义分割的训练,推理及挖掘任务,对镜像进行测试。 +!!! 注意 + YMIR平台发起的任务在顺利结束时,会清理相应的目录,因此在测试时,请确保相应目录存在。 + ### 导入待测镜像 - 假设用户已经制作好镜像 **demo/semantic_seg:tmi**, 它支持训练、推理及挖掘 @@ -30,7 +33,13 @@ - 对于挖掘或推理任务: `cd sandbox/work_dir/TaskTypeMining` +- 对于单张图片测试任务: `cd sandbox/work_dir/TaskTypeInfer` + - 列举当前所有的任务,按任务时间找到对应任务id, 此处假设为最新的 **t00000020000023a473e1673591617** + +!!! 注意 + 对于训练任务, 可通过tensorboard链接获得对应任务id。 + ``` > ls -lt . @@ -75,6 +84,14 @@ python start.py - 通过 `docker build` 重新构建镜像, 如果修改了超参数,需要在Ymir平台删除旧镜像并重新添加,使更新的超参数生效。如果仅仅修改了代码,不需要重新添加即可使用本地的最新镜像。 -## 通过 ymir-executor-verifier 进行测试 +## 💫 YMIR后台错误查看 + +- 如镜像正确运行,但输出格式不符合YMIR后台要求,或其他错误,可在 `ymir-workplace/ymir-data/logs` 下查看 + +``` +tail -f -n 200 ymir_controller.log +``` + +## 💫 通过 ymir-executor-verifier 进行测试 [ymir-executor-verifier](https://github.com/modelai/ymir-executor-verifier) 面向企业用户,目的是对大量镜像进行自动化测试,以保障镜像的质量。 diff --git a/docs/ymir-dataset-format.md b/docs/ymir-dataset-format.md deleted file mode 100644 index b7da634..0000000 --- a/docs/ymir-dataset-format.md +++ /dev/null @@ -1,128 +0,0 @@ -# ymir 镜像数据标注格式 - -本文介绍在算法镜像中,ymir的数据标注格式。 - -| export_format | 算法类型 | 格式说明 | -| - | - | - | -| ark:raw 或 det-ark:raw | 目标检测 | 标注文件为txt | -| voc:raw 或 det-voc:raw | 目标检测 | 标注文件为xml,目标检测默认格式 | -| seg-coco:raw | 图像分割 | 标注文件为json,图像分割默认格式 | - -## 设置修改 - -- 对于训练镜像,用户可以通过设置 `/img-man/training-template.yaml` 中的 `export_format` 字段来控制镜像需要使用数据格式。 - -- 对于推理或挖掘镜像,由于不需要用到标注文件,因此不需要设置数据标注格式 - -- 目录结构 - -``` -/in -├── annotations # 标注文件所在目录 -├── assets # 图像所在目录 -├── config.yaml # 超参数配置文件 -├── env.yaml # ymir环境配置文件 -├── models # 预训练模型权重文件所在目录 -├── train-index.tsv # 训练集索引文件 -└── val-index.tsv # 验证集索引文件 -``` - -- 索引文件格式 - -每行为 `图像绝对路径` + `\t` + `标注文件绝对路径`,用户按行解析索引文件,即可获得所有的标注图像与标注文件。 - -## det-ark:raw - -- 索引文件示例 -``` -/in/assets/train/26681097a3e1194777c8dc7bb946e70d0cbbcec8.jpeg /in/annotations/train/26681097a3e1194777c8dc7bb946e70d0cbbcec8.txt -/in/assets/train/6cf24e81164571c5e5f5f10dc9f51cde13fabb05.jpeg /in/annotations/train/6cf24e81164571c5e5f5f10dc9f51cde13fabb05.txt -/in/assets/train/07b3fb8bd1e36b5edb509b822c1ad86b5863630f.jpeg /in/annotations/train/07b3fb8bd1e36b5edb509b822c1ad86b5863630f.txt -``` - -- 标注文件示例 - -每行为 `class_id` + `xmin` + `ymin` + `xmax` + `ymax`,通过 `,` 进行分隔。 - -``` -0, 122, 7, 372, 375 -1, 211, 147, 325, 255 -``` - -## det-voc:raw - -- 索引文件示例 -``` -/in/assets/train/26681097a3e1194777c8dc7bb946e70d0cbbcec8.jpeg /in/annotations/train/26681097a3e1194777c8dc7bb946e70d0cbbcec8.xml -/in/assets/train/6cf24e81164571c5e5f5f10dc9f51cde13fabb05.jpeg /in/annotations/train/6cf24e81164571c5e5f5f10dc9f51cde13fabb05.xml -/in/assets/train/07b3fb8bd1e36b5edb509b822c1ad86b5863630f.jpeg /in/annotations/train/07b3fb8bd1e36b5edb509b822c1ad86b5863630f.xml -``` - -- 标注文件示例 - -参考voc xml 格式 - -``` - - VOC2012 - 2008_000026.jpg - - The VOC2008 Database - PASCAL VOC2008 - flickr - - - 500 - 375 - 3 - - 0 - - person - Frontal - 1 - 1 - - 122 - 7 - 372 - 375 - - 0 - - - dog - Unspecified - 0 - 1 - - 211 - 147 - 325 - 255 - - 0 - - - -``` - -## seg-coco:raw - -- 索引文件示例 - -其中所有图像文件都对应同一个标注文件 - -``` -/in/assets/train/26681097a3e1194777c8dc7bb946e70d0cbbcec8.jpeg /in/annotations/coco-annotations.json -/in/assets/train/6cf24e81164571c5e5f5f10dc9f51cde13fabb05.jpeg /in/annotations/coco-annotations.json -/in/assets/train/07b3fb8bd1e36b5edb509b822c1ad86b5863630f.jpeg /in/annotations/coco-annotations.json -``` - -- 标注文件示例 - -参考coco格式 - -``` - -``` diff --git a/docs/ymir-dataset-zh-CN.md b/docs/ymir-dataset-zh-CN.md deleted file mode 100644 index e412e1f..0000000 --- a/docs/ymir-dataset-zh-CN.md +++ /dev/null @@ -1,141 +0,0 @@ -# ymir-executor 使用说明 - -更新日期: 2022-11-01 - -本文档面向使用或定制[ymir-executor](https://github.com/IndustryEssentials/ymir-executor)的用户 - - -## 外部数据集导入ymir-gui系统 - -- `<1G` 的数据集可以直接`本地导入`,将本地数据集压缩包上传到ymir系统中,数据集具体格式与voc类似,参考[ymir-cmd 准备外部数据](https://github.com/IndustryEssentials/ymir/blob/master/README_zh-CN.md#421-%E5%87%86%E5%A4%87%E5%A4%96%E9%83%A8%E6%95%B0%E6%8D%AE) - - [sample导入数据集](https://github.com/yzbx/ymir-executor-fork/releases/download/dataset/import_sample_dataset.zip) - - -- `>=1G` 的数据集可以通过`路径导入`,先将数据集复制到ymir工作目录下的子目录`ymir-sharing`,再输入相对路径导入 - - -## ymir系统与ymir-executor镜像的数据传输接口 - -- 参考[ymir 与功能性 docker container 数据传输接口](https://github.com/IndustryEssentials/ymir/blob/master/docs/ymir-cmd-container.md) - - - ymir会将`/in`与`/out`目录挂载到镜像中 - - - 镜像中需要自带`/img-man`目录,辅助ymir系统对镜像类型进行识别,并对超参数页面进行配置 - - - 镜像默认以`bash /usr/bin/start.sh`进行启动 - - - **注意所有 .tsv 和 .yaml 文件中出现的路径都是绝对路径** - -- [sample /in /out](https://github.com/yzbx/ymir-executor-fork/releases/download/dataset/sample_docker_input.zip) - - ![](images/sample_docker_input.png) - -- [sample /img-man](https://github.com/IndustryEssentials/ymir/tree/master/docker_executor/sample_executor/app) - - - 注意所有的`xxx-template.yaml`只能是一级`key:value`文件 - -### 索引文件 train-index.tsv / val-index.tsv / candidate-index.tsv - -- 每行由`图像的绝对路径` + `制表符` + `标注的绝对路径`构成 - -``` -{image_abs_path 1}\t{annotation_abs_path 1} -{image_abs_path 2}\t{annotation_abs_path 2} -... -``` - -- 注意 `candidate-index.tsv` 中只有 `图像的绝对路径` - -- 图像为常见的jpg, png格式 - -- 默认标注为`txt`格式,其中`class_id, xmin, ymin, xmax, ymax`均为整数, 所有标注格式介绍见[ymir输入镜像的标注格式](./docs/ymir-dataset-zh-CN.md#ymir输入镜像的标注格式) - -``` -class_id, xmin, ymin, xmax, ymax, bbox_quality -``` - - -### 超参数配置文件 config.yaml - -用户可以在超参数页面看到`xxx-template.yaml`的信息,而`config.yaml` 中的信息,是用户更改过后的。 - -- 对于训练任务,`config.yaml`提供training-template.yaml中的配置 + ymir-gui 用户自定义配置 + ymir默认配置 - -- 对于挖掘任务,`config.yaml`提供mining-template.yaml中的配置 + ymir-gui 用户自定义配置 + ymir默认配置 - -- 对于推理任务,`config.yaml`提供infer-template.yaml中的配置 + ymir-gui 用户自定义配置 + ymir默认配置 - -``` -class_names: # ymir默认配置 -- bowl -- cat -- bottle -- cup -- spoon -gpu_id: '0' # ymir默认配置 -pretrained_model_params: [] # ymir训练时可选默认配置 -model_params_path: [] # ymir推理/挖掘时默认配置 -task_id: t0000001000002ebb7f11653630774 # ymir默认配置 -img_size: 640 # 用户自定义配置 -model: yolov5n # 用户自定义配置 -batch_size: 16 # 用户自定义配置 -``` - -### ymir路径配置文件 env.yaml - -存放一些路径信息,以及当前进行的任务信息 - -- 是否进行训练任务: `run_training: true|false` - -- 是否进行推理任务:`run_infer: true|false` - -- 是否进行挖掘任务: `run_mining: true|false` - -``` -input: - annotations_dir: /in/annotations # 标注文件存放目录 - assets_dir: /in/assets # 图像文件存放目录 - candidate_index_file: '' # 挖掘索引文件 - config_file: /in/config.yaml # 超参配置文件 - models_dir: /in/models # 预训练模型存放目录 - root_dir: /in # 输入根目录 - training_index_file: /in/train-index.tsv # 训练索引文件 - val_index_file: /in/val-index.tsv # 验证索引文件 -output: - infer_result_file: /out/infer-result.json # 推理结果文件 - mining_result_file: /out/result.tsv # 挖掘结果文件 - models_dir: /out/models # 训练任务模型权重与信息等存放目录 - monitor_file: /out/monitor.txt # 任务进度文件 - root_dir: /out # 输出根目录 - tensorboard_dir: /out/tensorboard # tensorboard结果文件目录 - training_result_file: /out/models/result.yaml # 训练任务结果文件 -run_infer: false -run_mining: false -run_training: true -task_id: t0000001000002ebb7f11653630774 # 任务id -``` - -## ymir输入镜像的标注格式 - -常见的目标检测标注格式有 `voc` 与 `coco`, ymir 除自身格式, 目前还支持`voc`格式,可在超参数页面通过设置`export_format`对ymir导入镜像的数据格式进行修改,其中检测格式 ["det-ark", "det-voc"]也可简写为 ["ark", "voc"], 从而兼容ymir1.1.0 。 - -``` -image format: ['raw', 'lmdb'] -annotation format: ["ark", "voc", "det-ark", "det-voc", "seg-coco"] -``` - -### 默认数据格式 - -- ymir1.1.0 默认数据导出格式 `export_format=ark:raw`, 标注文件为`xxx.txt` - -- ymir2.0.0+ 默认检测数据导出格式 `export_format=det-voc:raw`, 标注文件为`xxx.xml` - -- ymir2.0.0+ 默认分割数据导出格式 `export_format=seg-coco:raw`, 标注文件为`xxx.json` - -### voc 数据格式 - -- `export_format=voc:raw` 或 `export_format=det-voc:raw` 标注文件为`xxx.xml`, 可以包含更多自定义的图像信息 - -- `export_format=ark:raw` 或 `export_format=det-ark:raw`, 标注文件为`xxx.txt` - -- `export_format=seg-coco:raw`, 标注文件为`xxx.json` diff --git a/docs/ymir-executor-version.md b/docs/ymir-executor-version.md index f1dd796..1135b33 100644 --- a/docs/ymir-executor-version.md +++ b/docs/ymir-executor-version.md @@ -6,11 +6,11 @@ - 训练镜像可以获得系统的ymir接口版本,方便镜像兼容 -- 预训练模型文件在ymir1.1.0时放在/in/models目录下, +- 预训练模型文件在ymir1.1.0时放在/in/models目录下,ymir2.0.0时放在 /in/models/目录下 ## 辅助库 -- [ymir-executor-sdk](https://github.com/modelai/ymir-executor-sdk) 采用ymir1.3.0分支 +- [ymir-executor-sdk](https://github.com/modelai/ymir-executor-sdk) 采用ymir2.0.0分支 - [ymir-executor-verifier](https://github.com/modelai/ymir-executor-verifier) 镜像检查工具 diff --git a/docs/cn/docker_images/det-detectron2-tmi.md b/docs/ymir_executor/det-detectron2-tmi.md similarity index 100% rename from docs/cn/docker_images/det-detectron2-tmi.md rename to docs/ymir_executor/det-detectron2-tmi.md diff --git a/docs/cn/docker_images/det-mmdet-tmi.md b/docs/ymir_executor/det-mmdet-tmi.md similarity index 100% rename from docs/cn/docker_images/det-mmdet-tmi.md rename to docs/ymir_executor/det-mmdet-tmi.md diff --git a/docs/cn/docker_images/det-nanodet-tmi.md b/docs/ymir_executor/det-nanodet-tmi.md similarity index 100% rename from docs/cn/docker_images/det-nanodet-tmi.md rename to docs/ymir_executor/det-nanodet-tmi.md diff --git a/docs/cn/docker_images/det-vidt-tmi.md b/docs/ymir_executor/det-vidt-tmi.md similarity index 100% rename from docs/cn/docker_images/det-vidt-tmi.md rename to docs/ymir_executor/det-vidt-tmi.md diff --git a/docs/cn/docker_images/det-yolov4-tmi.md b/docs/ymir_executor/det-yolov4-tmi.md similarity index 100% rename from docs/cn/docker_images/det-yolov4-tmi.md rename to docs/ymir_executor/det-yolov4-tmi.md diff --git a/docs/cn/docker_images/det-yolov5-automl-tmi.md b/docs/ymir_executor/det-yolov5-automl-tmi.md similarity index 100% rename from docs/cn/docker_images/det-yolov5-automl-tmi.md rename to docs/ymir_executor/det-yolov5-automl-tmi.md diff --git a/docs/cn/docker_images/det-yolov5-tmi.md b/docs/ymir_executor/det-yolov5-tmi.md similarity index 100% rename from docs/cn/docker_images/det-yolov5-tmi.md rename to docs/ymir_executor/det-yolov5-tmi.md diff --git a/docs/cn/docker_images/det-yolov7-tmi.md b/docs/ymir_executor/det-yolov7-tmi.md similarity index 100% rename from docs/cn/docker_images/det-yolov7-tmi.md rename to docs/ymir_executor/det-yolov7-tmi.md diff --git a/mkdocs.yml b/mkdocs.yml index e7765c9..ee69e70 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -10,8 +10,9 @@ plugins: python: options: docstring_style: numpy - watch: - - seg-semantic-demo-tmi.app.start + # watch: + # - seg-semantic-demo-tmi.app.start + - include_dir_to_nav markdown_extensions: - markdown_include.include: base_path: . @@ -19,5 +20,9 @@ markdown_extensions: - toc: permalink: "#" # - sane_lists -# nav: -# - Home: index.md +nav: + - Home: index.md + - 基本概念: overview + - 目标检测: object_detection + - 语义分割: semantic_segmentation + - Ymir镜像说明文档: ymir_executor From 9b22f01785bea25fc843ecaa8cbc664f97671329 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Sat, 28 Jan 2023 15:54:18 +0800 Subject: [PATCH 188/204] update requirements.txt --- docs/requirements.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/requirements.txt b/docs/requirements.txt index f6cb652..323332c 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -64,3 +64,5 @@ watchdog==2.1.9 # via mkdocs zipp==3.8.0 # via importlib-metadata +mkdocs-include-dir-to-nav==1.2.0 + # via mkdocs From 7624b839cc320080a49f820bcf8d7f29f1b82157 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 1 Feb 2023 18:35:38 +0800 Subject: [PATCH 189/204] update doc --- docs/algorithms/mmdet.md | 39 ++++++++++++++++ docs/algorithms/mmseg.md | 0 docs/algorithms/mmyolo.md | 94 +++++++++++++++++++++++++++++++++++++++ docs/algorithms/yolov5.md | 0 docs/algorithms/yolov8.md | 0 5 files changed, 133 insertions(+) create mode 100644 docs/algorithms/mmdet.md create mode 100644 docs/algorithms/mmseg.md create mode 100644 docs/algorithms/mmyolo.md create mode 100644 docs/algorithms/yolov5.md create mode 100644 docs/algorithms/yolov8.md diff --git a/docs/algorithms/mmdet.md b/docs/algorithms/mmdet.md new file mode 100644 index 0000000..21e232d --- /dev/null +++ b/docs/algorithms/mmdet.md @@ -0,0 +1,39 @@ +# mmdetection + +此文档采用 `mmdetection v3.x` 架构,阅读此文档前,建议先了解[mmengine](https://mmengine.readthedocs.io/zh_CN/latest/get_started/introduction.html). + +- [mmdetection v3.x](https://github.com/open-mmlab/mmdetection/tree/3.x) + +- [ymir-mmdetection](https://github.com/modelai/ymir-mmdetection) + +## 配置镜像环境 + +## 提供超参数模板文件与镜像配置文件 + +- [img-man/*-template.yaml](https://github.com/modelai/ymir-mmdetection/tree/ymir/ymir/img-man) + +## 提供默认启动脚本 + +- [ymir/start.py](https://github.com/modelai/ymir-mmyolo/tree/ymir/ymir/start.py) + +- Dockerfile +``` +RUN echo "python /app/ymir/start.py" > /usr/bin/start.sh # 生成启动脚本 /usr/bin/start.sh +CMD bash /usr/bin/start.sh # 将镜像的默认启动脚本设置为 /usr/bin/start.sh +``` + +## 实现基本功能 + +### 训练 + +### 推理 + +### 挖掘 + +## 制作镜像 det/mmdet:tmi + +- [ymir/Dockerfile](https://github.com/modelai/ymir-mmdetection/tree/ymir/ymir/Dockerfile) + +``` +docker build -t det/mmyolo:tmi -f ymir/Dockerfile . +``` diff --git a/docs/algorithms/mmseg.md b/docs/algorithms/mmseg.md new file mode 100644 index 0000000..e69de29 diff --git a/docs/algorithms/mmyolo.md b/docs/algorithms/mmyolo.md new file mode 100644 index 0000000..948538e --- /dev/null +++ b/docs/algorithms/mmyolo.md @@ -0,0 +1,94 @@ +# mmyolo + +阅读此文档前,建议先了解[mmdet](./mmdet.md) + +- [mmyolo](https://github.com/open-mmlab/mmyolo) + +- [ymir-mmyolo](https://github.com/modelai/ymir-mmyolo) + +## 配置镜像环境 + +参考 [mmyolo#installation](https://github.com/modelai/ymir-mmyolo#%EF%B8%8F-installation-) + +- [ymir/Dockerfile](https://github.com/modelai/ymir-mmyolo/tree/ymir/ymir/Dockerfile) + +## 提供超参数模板文件 + +- [img-man/*-template.yaml](https://github.com/modelai/ymir-mmyolo/tree/ymir/ymir/img-man) + +## 提供镜像说明文件 + +- [img-man/manifest.yaml](https://github.com/modelai/ymir-mmyolo/tree/ymir/ymir/img-man/manifest.yaml) + +## 提供默认启动脚本 + +- [ymir/start.py](https://github.com/modelai/ymir-mmyolo/tree/ymir/ymir/start.py) + +- Dockerfile +``` +RUN echo "python /app/ymir/start.py" > /usr/bin/start.sh # 生成启动脚本 /usr/bin/start.sh +CMD bash /usr/bin/start.sh # 将镜像的默认启动脚本设置为 /usr/bin/start.sh +``` + +## 实现基本功能 + +完整代码变动参考[ymir-mmyolo/pull/1](https://github.com/modelai/ymir-mmyolo/pull/1/files) + +### 训练 + +1. 启动镜像时调用 `bash /usr/bin/start.sh` + +2. `start.sh` 调用 `python3 ymir/start.py` + +3. `start.py` 调用 `python3 ymir/ymir_training.py` + +4. `ymir_training.py` 调用 `bash tools/dist_train.sh ...` + + - `ymir_training.py` 调用 `convert_ymir_to_coco()` 实现数据集格式转换 + + - `ymir_training.py` 获取配置文件(config_file)、GPU数量(num_gpus)、工作目录(work_dir), 并拼接到调用命令中 + + ``` + cmd = f"bash ./tools/dist_train.sh {config_file} {num_gpus} --work-dir {work_dir}" + ``` + - 在训练结束后, 保存 `max_keep_checkpoints` 份权重文件 + +5. `dist_train.sh` 调用 `python3 tools/train.py ...` + + - `train.py` 中调用 `modify_mmengine_config()` 加载ymir平台超参数、自动配置预训练模型、添加tensorboard功能、添加ymir进度监控hook等。 + +### 推理 + +1. 启动镜像时调用 `bash /usr/bin/start.sh` + +2. `start.sh` 调用 `python3 ymir/start.py` + +3. `start.py` 调用 `python3 ymir/ymir_infer.py` + + - 调用 `init_detector()` 与 `inference_detector()` 获取推理结果 + + - 调用 `mmdet_result_to_ymir()` 将mmdet推理结果转换为ymir格式 + + - 调用 `rw.write_infer_result()` 保存推理结果 + +### 挖掘 + +1. 启动镜像时调用 `bash /usr/bin/start.sh` + +2. `start.sh` 调用 `python3 ymir/start.py` + +3. `start.py` 调用 `python3 ymir/ymir_mining.py` + + - 调用 `init_detector()` 与 `inference_detector()` 获取推理结果 + + - 调用 `compute_score()` 计算挖掘分数 + + - 调用 `rw.write_mining_result()` 保存挖掘结果 + +## 制作镜像 det/mmyolo:tmi + +- [ymir/Dockerfile](https://github.com/modelai/ymir-mmyolo/tree/ymir/ymir/Dockerfile) + +``` +docker build -t det/mmyolo:tmi -f ymir/Dockerfile . +``` diff --git a/docs/algorithms/yolov5.md b/docs/algorithms/yolov5.md new file mode 100644 index 0000000..e69de29 diff --git a/docs/algorithms/yolov8.md b/docs/algorithms/yolov8.md new file mode 100644 index 0000000..e69de29 From 7bd54f9401aa1783b32ea0ee336cfafcbe71891a Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 1 Feb 2023 18:38:37 +0800 Subject: [PATCH 190/204] udpate readme --- README.md | 2 +- README_zh-CN.md | 2 +- docs/object_detection/simple_det_training.md | 2 +- docs/overview/ymir-executor.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 994159b..488e143 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# ymir-executor documentation [English](./README.MD) | [简体中文](./README_zh-CN.MD) +# ymir-executor documentation [English](./README.md) | [简体中文](./README_zh-CN.md) - [ymir](https://github.com/IndustryEssentials/ymir) diff --git a/README_zh-CN.md b/README_zh-CN.md index 35f1316..8d8f08b 100644 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -1,4 +1,4 @@ -# ymir-executor 使用文档 [English](./README.MD) | [简体中文](./README_zh-CN.MD) +# ymir-executor 使用文档 [English](./README.md) | [简体中文](./README_zh-CN.md) - [ymir](https://github.com/IndustryEssentials/ymir) diff --git a/docs/object_detection/simple_det_training.md b/docs/object_detection/simple_det_training.md index 321c977..a878711 100644 --- a/docs/object_detection/simple_det_training.md +++ b/docs/object_detection/simple_det_training.md @@ -32,7 +32,7 @@ COPY img-man/*.yaml /img-man/ # 将主机中img-man目录下的所有yaml文件 - [img-man/manifest.yaml](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/det-demo-tmi/img-man/manifest.yaml) ``` -# 3 for object detection +# 2 for object detection "object_type": 2 ``` diff --git a/docs/overview/ymir-executor.md b/docs/overview/ymir-executor.md index f34d6af..1f5bfc3 100644 --- a/docs/overview/ymir-executor.md +++ b/docs/overview/ymir-executor.md @@ -42,7 +42,7 @@ docker run hello-world - [nvidia-docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installation-guide) !!! 注意 - 先按照上述链接中的前提条件安装好 **NVIDIA Driver** + 先按照上述链接中的前提条件安装好 **NVIDIA Driver >=510.47.03 **, 以支持 `cuda11.6+` ``` # 添加软件源 From 4317f16b6f7703e37b3535cee5de29b4b6ac0e80 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 3 Feb 2023 11:21:29 +0800 Subject: [PATCH 191/204] add instance seg demo --- docs/algorithms/mmdet.md | 39 ++- docs/algorithms/mmyolo.md | 6 +- docs/object_detection/test_det.md | 4 + docs/overview/ymir-executor.md | 22 ++ .../test_semantic_seg.md | 4 + seg-instance-demo-tmi/Dockerfile | 42 +++ seg-instance-demo-tmi/README.MD | 3 + .../app/pycococreatortools.py | 143 +++++++++++ seg-instance-demo-tmi/app/result_to_coco.py | 105 ++++++++ seg-instance-demo-tmi/app/start.py | 239 ++++++++++++++++++ seg-instance-demo-tmi/fast.Dockerfile | 20 ++ .../img-man/infer-template.yaml | 12 + seg-instance-demo-tmi/img-man/manifest.yaml | 3 + .../img-man/mining-template.yaml | 12 + .../img-man/training-template.yaml | 18 ++ seg-instance-demo-tmi/requirements.txt | 11 + seg-semantic-demo-tmi/app/start.py | 2 +- 17 files changed, 680 insertions(+), 5 deletions(-) create mode 100644 seg-instance-demo-tmi/Dockerfile create mode 100644 seg-instance-demo-tmi/README.MD create mode 100644 seg-instance-demo-tmi/app/pycococreatortools.py create mode 100644 seg-instance-demo-tmi/app/result_to_coco.py create mode 100644 seg-instance-demo-tmi/app/start.py create mode 100644 seg-instance-demo-tmi/fast.Dockerfile create mode 100644 seg-instance-demo-tmi/img-man/infer-template.yaml create mode 100644 seg-instance-demo-tmi/img-man/manifest.yaml create mode 100644 seg-instance-demo-tmi/img-man/mining-template.yaml create mode 100644 seg-instance-demo-tmi/img-man/training-template.yaml create mode 100644 seg-instance-demo-tmi/requirements.txt diff --git a/docs/algorithms/mmdet.md b/docs/algorithms/mmdet.md index 21e232d..64ee989 100644 --- a/docs/algorithms/mmdet.md +++ b/docs/algorithms/mmdet.md @@ -1,4 +1,4 @@ -# mmdetection +# ymir-mmdetection 此文档采用 `mmdetection v3.x` 架构,阅读此文档前,建议先了解[mmengine](https://mmengine.readthedocs.io/zh_CN/latest/get_started/introduction.html). @@ -6,6 +6,41 @@ - [ymir-mmdetection](https://github.com/modelai/ymir-mmdetection) +## mmdetection --> ymir-mmdetection + +- mmdetection支持 `coco` 与 `pascal voc` 等多种数据格式。ymir-mmdetection镜像会将ymir平台的检测数据格式 `det-ark:raw` 转换为 `coco`。 + +- mmdetection通过配置文件如 [configs/_base_/datasets/coco_detection.py](https://github.com/open-mmlab/mmdetection/blob/3.x/configs/_base_/datasets/coco_detection.py#L36-L42) 指明数据集的路径。 + +``` +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' + +train_dataloader = dict( + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline)) + ) +``` + +- 为加载ymir平台数据集,一种方案是参考[自定义数据集](https://mmdetection.readthedocs.io/en/3.x/user_guides/train.html#train-with-customized-datasets),提供配置文件。但这种方案会固定数据集的类别,不适合ymir平台。 + +- ymir-mmdetection采用另一种方案,在已有配置文件的基础上,直接在内存中进行修改。参考[ymir-mmyolo/ymir/tools/train.py](https://github.com/modelai/ymir-mmyolo/blob/ymir/tools/train.py#L65-L67) + +``` + # 加载已有配置文件如 `configs/yolov8/yolov8_s_syncbn_fast_8xb16-500e_coco.py` + cfg = Config.fromfile(args.config) + # 获得ymir平台超参数 + ymir_cfg = get_merged_config() + # 直接在内存中修改配置 + modify_mmengine_config(cfg, ymir_cfg) +``` + ## 配置镜像环境 ## 提供超参数模板文件与镜像配置文件 @@ -35,5 +70,5 @@ CMD bash /usr/bin/start.sh # 将镜像的默认启动脚本设置为 /usr/bin/s - [ymir/Dockerfile](https://github.com/modelai/ymir-mmdetection/tree/ymir/ymir/Dockerfile) ``` -docker build -t det/mmyolo:tmi -f ymir/Dockerfile . +docker build -t det/mmdet:tmi -f ymir/Dockerfile . ``` diff --git a/docs/algorithms/mmyolo.md b/docs/algorithms/mmyolo.md index 948538e..c0fce14 100644 --- a/docs/algorithms/mmyolo.md +++ b/docs/algorithms/mmyolo.md @@ -1,9 +1,11 @@ -# mmyolo +# ymir-mmyolo -阅读此文档前,建议先了解[mmdet](./mmdet.md) +阅读此文档前,建议先阅读[mmdet](./mmdet.md),了解mmyolo代码仓库数据加载,超参数加载与模型训练流程。 - [mmyolo](https://github.com/open-mmlab/mmyolo) +- [mmyolo算法解析](https://mmyolo.readthedocs.io/zh_CN/latest/algorithm_descriptions/index.html) + - [ymir-mmyolo](https://github.com/modelai/ymir-mmyolo) ## 配置镜像环境 diff --git a/docs/object_detection/test_det.md b/docs/object_detection/test_det.md index 7a443f1..8a702c5 100644 --- a/docs/object_detection/test_det.md +++ b/docs/object_detection/test_det.md @@ -64,8 +64,12 @@ drwxr-xr-x 4 root root 45 Jan 9 18:07 t00000020000029e02f61673258829 - 假设启动程序为 **/usr/bin/start.sh** ``` +# --ipc host 表示容器共享主机的所有内存 docker run -it --rm --gpus all --ipc host -v $PWD/in:/in -v $PWD/out:/out -v /data:/data demo/det:tmi bash +# --shm-size 128g 表示容器最多共享主机128G内存 +# docker run -it --rm --gpus all --shm-size 128g -v $PWD/in:/in -v $PWD/out:/out -v /data:/data demo/det:tmi bash + bash /usr/bin/start.sh ``` diff --git a/docs/overview/ymir-executor.md b/docs/overview/ymir-executor.md index 1f5bfc3..d2c9bac 100644 --- a/docs/overview/ymir-executor.md +++ b/docs/overview/ymir-executor.md @@ -134,12 +134,34 @@ hello ymir executor - 基本功能:加载数据集与超参数进行训练,将模型权重,模型精度等结果保存到 **/out** 目录的指定文件。 +``` +# pip install "git+https://github.com/modelai/ymir-executor-sdk.git@ymir2.1.0" +from ymir_exc import env + +env_config = env.get_current_env() +with open(env_config.output.training_result_file, "w") as f: + yaml.safe_dump(data=training_result, stream=f) +``` + - 写tensorboard日志:可选, ymir平台支持查看训练任务的tensorboard训练日志 ### 推理镜像需要实现的额外功能 - 基本功能:加载数据集与模型权重进行推理,将推理结果保存到 **/out** 目录的指定文件。 +``` +env_config = env.get_current_env() +with open(env_config.output.infer_result_file, "w") as f: + f.write(json.dumps(result)) +``` + ### 挖掘镜像需要实现的额外功能 - 基本功能:加载数据集与模型权重进行挖掘,基于主动学习算法获得每张图片的重要程度分数,将分数保存到 **/out** 目录的指定文件。 + +``` +env_config = env.get_current_env() +with open(env_config.output.mining_result_file, "w") as f: + for asset_id, score in sorted_mining_result: + f.write(f"{asset_id}\t{score}\n") +``` diff --git a/docs/semantic_segmentation/test_semantic_seg.md b/docs/semantic_segmentation/test_semantic_seg.md index aca212b..c5e8354 100644 --- a/docs/semantic_segmentation/test_semantic_seg.md +++ b/docs/semantic_segmentation/test_semantic_seg.md @@ -64,8 +64,12 @@ drwxr-xr-x 4 root root 45 Jan 9 18:07 t00000020000029e02f61673258829 - 假设启动程序为 **/usr/bin/start.sh** ``` +# --ipc host 表示容器共享主机的所有内存 docker run -it --rm --gpus all --ipc host -v $PWD/in:/in -v $PWD/out:/out -v /data:/data demo/semantic_seg:tmi bash +# --shm-size 128g 表示容器最多共享主机128G内存 +# docker run -it --rm --gpus all --shm-size 128g -v $PWD/in:/in -v $PWD/out:/out -v /data:/data demo/semantic_seg:tmi bash + bash /usr/bin/start.sh ``` diff --git a/seg-instance-demo-tmi/Dockerfile b/seg-instance-demo-tmi/Dockerfile new file mode 100644 index 0000000..7481013 --- /dev/null +++ b/seg-instance-demo-tmi/Dockerfile @@ -0,0 +1,42 @@ +# a docker file for an sample training / mining / infer executor + +# FROM ubuntu:20.04 +FROM python:3.8.16 + +ENV LANG=C.UTF-8 + +# Change mirror +RUN sed -i 's#http://archive.ubuntu.com#http://mirrors.ustc.edu.cn#g' /etc/apt/sources.list \ + && sed -i 's#http://security.ubuntu.com#http://mirrors.ustc.edu.cn#g' /etc/apt/sources.list + +# Set timezone +RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \ + && echo 'Asia/Shanghai' >/etc/timezone + +# Install linux package +RUN apt-get update && apt-get install -y gnupg2 git libglib2.0-0 \ + libgl1-mesa-glx libsm6 libxext6 libxrender-dev \ + build-essential ninja-build \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +COPY requirements.txt ./ +RUN pip3 install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple + +WORKDIR /app +# copy user code to WORKDIR +COPY ./app/*.py /app/ + +# copy user config template and manifest.yaml to /img-man +RUN mkdir -p /img-man +COPY img-man/*.yaml /img-man/ + +# view https://github.com/protocolbuffers/protobuf/issues/10051 for detail +ENV PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python + +# entry point for your app +# the whole docker image will be started with `nvidia-docker run ` +# and this command will run automatically + +RUN echo "python /app/start.py" > /usr/bin/start.sh +CMD bash /usr/bin/start.sh diff --git a/seg-instance-demo-tmi/README.MD b/seg-instance-demo-tmi/README.MD new file mode 100644 index 0000000..cd30a72 --- /dev/null +++ b/seg-instance-demo-tmi/README.MD @@ -0,0 +1,3 @@ +# ymir 自定义实例分割镜像 + + diff --git a/seg-instance-demo-tmi/app/pycococreatortools.py b/seg-instance-demo-tmi/app/pycococreatortools.py new file mode 100644 index 0000000..edf777b --- /dev/null +++ b/seg-instance-demo-tmi/app/pycococreatortools.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python3 +""" +from https://github.com/waspinator/pycococreator/blob/0.2.1/pycococreatortools/pycococreatortools.py +""" +import datetime +from itertools import groupby + +import numpy as np +from PIL import Image +from pycocotools import mask + + +def resize_binary_mask(array, new_size): + image = Image.fromarray(array.astype(np.uint8) * 255) + image = image.resize(new_size) + return np.asarray(image).astype(np.bool_) + + +def close_contour(contour): + if not np.array_equal(contour[0], contour[-1]): + contour = np.vstack((contour, contour[0])) + return contour + + +def binary_mask_to_rle(binary_mask, compress=True): + """ + if compress: + return {'counts': b'', 'size': list(binary_mask.shape)} + else: + return {'counts': [0, 56541, 7, 338, ...], 'size': list(binary_mask.shape)} + """ + if compress: + rle = mask.encode(np.asfortranarray(binary_mask.astype(np.uint8))) + rle['counts'] = rle['counts'].decode('utf-8') + return rle + + rle = {'counts': [], 'size': list(binary_mask.shape)} + counts = rle.get('counts') + for i, (value, elements) in enumerate(groupby(binary_mask.ravel(order='F'))): + if i == 0 and value == 1: + counts.append(0) + counts.append(len(list(elements))) + + return rle + + +def binary_mask_to_polygon(binary_mask, tolerance=0): + """Converts a binary mask to COCO polygon representation + + Args: + binary_mask: a 2D binary numpy array where '1's represent the object + tolerance: Maximum distance from original points of polygon to approximated + polygonal chain. If tolerance is 0, the original coordinate array is returned. + + """ + from skimage import measure + + polygons = [] + # pad mask to close contours of shapes which start and end at an edge + padded_binary_mask = np.pad(binary_mask, pad_width=1, mode='constant', constant_values=0) + contours = measure.find_contours(padded_binary_mask, 0.5) + contours = np.subtract(contours, 1) + for contour in contours: + contour = close_contour(contour) + contour = measure.approximate_polygon(contour, tolerance) + if len(contour) < 3: + continue + contour = np.flip(contour, axis=1) + segmentation = contour.ravel().tolist() + # after padding and subtracting 1 we may get -0.5 points in our segmentation + segmentation = [0 if i < 0 else i for i in segmentation] + polygons.append(segmentation) + + return polygons + + +def create_image_info(image_id, + file_name, + image_size, + date_captured=datetime.datetime.utcnow().isoformat(' '), + license_id=1, + coco_url="", + flickr_url=""): + + image_info = { + "id": image_id, + "file_name": file_name, + "width": image_size[0], + "height": image_size[1], + "date_captured": date_captured, + "license": license_id, + "coco_url": coco_url, + "flickr_url": flickr_url + } + + return image_info + + +def create_annotation_info(annotation_id, + image_id, + category_info, + binary_mask, + image_size=None, + tolerance=2, + bounding_box=None): + + if image_size is not None: + binary_mask = resize_binary_mask(binary_mask, image_size) + + binary_mask_encoded = mask.encode(np.asfortranarray(binary_mask.astype(np.uint8))) + + area = mask.area(binary_mask_encoded) + if area < 1: + return None + + if bounding_box is None: + bounding_box = mask.toBbox(binary_mask_encoded) + + if category_info["is_crowd"]: + is_crowd = 1 + # segmentation = binary_mask_to_rle(binary_mask) + segmentation = binary_mask_encoded + # avoid TypeError: Object of type bytes is not JSON serializable + segmentation['counts'] = segmentation['counts'].decode('utf-8') + else: + is_crowd = 0 + segmentation = binary_mask_to_polygon(binary_mask, tolerance) + if not segmentation: + return None + + annotation_info = { + "id": annotation_id, + "image_id": image_id, + "category_id": category_info["id"], + "iscrowd": is_crowd, + "area": area.tolist(), + "bbox": bounding_box.tolist(), + "segmentation": segmentation, + "width": binary_mask.shape[1], + "height": binary_mask.shape[0], + } + + return annotation_info diff --git a/seg-instance-demo-tmi/app/result_to_coco.py b/seg-instance-demo-tmi/app/result_to_coco.py new file mode 100644 index 0000000..5346320 --- /dev/null +++ b/seg-instance-demo-tmi/app/result_to_coco.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python3 + +import datetime +import os.path as osp +from typing import Dict, List + +import imagesize +import numpy as np +from easydict import EasyDict as edict +from tqdm import tqdm + +import pycococreatortools + +INFO = { + "description": "Example Dataset", + "url": "https://github.com/waspinator/pycococreator", + "version": "0.1.0", + "year": 2022, + "contributor": "ymir", + "date_created": datetime.datetime.utcnow().isoformat(' ') +} + +LICENSES = [{ + "id": 1, + "name": "Attribution-NonCommercial-ShareAlike License", + "url": "http://creativecommons.org/licenses/by-nc-sa/2.0/" +}] + +CATEGORIES = [ + { + 'id': 1, + 'name': 'square', + 'supercategory': 'shape', + }, + { + 'id': 2, + 'name': 'circle', + 'supercategory': 'shape', + }, + { + 'id': 3, + 'name': 'triangle', + 'supercategory': 'shape', + }, +] + + +def convert(ymir_cfg: edict, results: List[Dict], with_blank_area: bool): + """ + convert ymir infer result to coco instance segmentation format + the mask is encode in compressed rle + the is_crowd is True + """ + class_names = ymir_cfg.param.class_names + + categories = [] + # categories should start from 0 + for idx, name in enumerate(class_names): + categories.append(dict(id=idx, name=name, supercategory='none')) + + coco_output = {"info": INFO, "licenses": LICENSES, "categories": categories, "images": [], "annotations": []} + + image_id = 1 + annotation_id = 1 + + for idx, d in enumerate(tqdm(results, desc='convert result to coco')): + image_f = d['image'] + result = d['result'] + + width, height = imagesize.get(image_f) + + image_info = pycococreatortools.create_image_info(image_id=image_id, + file_name=osp.basename(image_f), + image_size=(width, height)) + + coco_output["images"].append(image_info) # type: ignore + + # category_id === class_id start from 0 + unique_ids = np.unique(result) + for np_class_id in unique_ids: + if with_blank_area: + class_id = int(np_class_id) - 1 + else: + class_id = int(np_class_id) + + # remove background class in infer-result + if with_blank_area and class_id < 0: + continue + + assert class_id < len(class_names), f'class_id {class_id} must < class_num {len(class_names)}' + category_info = {'id': class_id, 'is_crowd': True} + binary_mask = result == np_class_id + annotation_info = pycococreatortools.create_annotation_info(annotation_id, + image_id, + category_info, + binary_mask, + tolerance=2) + + if annotation_info is not None: + coco_output["annotations"].append(annotation_info) # type: ignore + annotation_id = annotation_id + 1 + + image_id += 1 + + return coco_output diff --git a/seg-instance-demo-tmi/app/start.py b/seg-instance-demo-tmi/app/start.py new file mode 100644 index 0000000..e528ff4 --- /dev/null +++ b/seg-instance-demo-tmi/app/start.py @@ -0,0 +1,239 @@ +import logging +import os +import random +import sys +import time +from typing import List + +import cv2 +import numpy as np +from easydict import EasyDict as edict +from tensorboardX import SummaryWriter +from ymir_exc import monitor +from ymir_exc import result_writer as rw +from ymir_exc.util import get_merged_config + +from result_to_coco import convert + + +def start() -> int: + cfg = get_merged_config() + + if cfg.ymir.run_training: + _run_training(cfg) + if cfg.ymir.run_mining: + _run_mining(cfg) + if cfg.ymir.run_infer: + _run_infer(cfg) + + return 0 + + +def _run_training(cfg: edict) -> None: + """sample function of training + + which shows: + - how to get config file + - how to read training and validation datasets + - how to write logs + - how to write training result + """ + # use `env.get_executor_config` to get config file for training + gpu_id: str = cfg.param.get('gpu_id') + class_names: List[str] = cfg.param.get('class_names') + expected_maskap: float = cfg.param.get('expected_maskap', 0.6) + idle_seconds: float = cfg.param.get('idle_seconds', 60) + trigger_crash: bool = cfg.param.get('trigger_crash', False) + # use `logging` or `print` to write log to console + # notice that logging.basicConfig is invoked at executor.env + logging.info(f'gpu device: {gpu_id}') + logging.info(f'dataset class names: {class_names}') + logging.info(f"training config: {cfg.param}") + + # count for image and annotation file + with open(cfg.ymir.input.training_index_file, 'r') as fp: + lines = fp.readlines() + + valid_image_count = 0 + valid_ann_count = 0 + + N = len(lines) + monitor_gap = max(1, N // 100) + for idx, line in enumerate(lines): + asset_path, annotation_path = line.strip().split() + if os.path.isfile(asset_path): + valid_image_count += 1 + + if os.path.isfile(annotation_path): + valid_ann_count += 1 + + # use `monitor.write_monitor_logger` to write write task process percent to monitor.txt + if idx % monitor_gap == 0: + monitor.write_monitor_logger(percent=0.2 * idx / N) + + logging.info(f'total image-ann pair: {N}') + logging.info(f'valid images: {valid_image_count}') + logging.info(f'valid annotations: {valid_ann_count}') + + # use `monitor.write_monitor_logger` to write write task process percent to monitor.txt + monitor.write_monitor_logger(percent=0.2) + + # suppose we have a long time training, and have saved the final model + # model output dir: os.path.join(cfg.ymir.output.models_dir, your_stage_name) + stage_dir = os.path.join(cfg.ymir.output.models_dir, 'epoch10') + os.makedirs(stage_dir, exist_ok=True) + with open(os.path.join(stage_dir, 'epoch10.pt'), 'w') as f: + f.write('fake model weight') + with open(os.path.join(stage_dir, 'config.py'), 'w') as f: + f.write('fake model config file') + # use `rw.write_model_stage` to save training result + rw.write_model_stage(stage_name='epoch10', + files=['epoch10.pt', 'config.py'], + evaluation_result=dict(maskAP=random.random() / 2)) + + _dummy_work(idle_seconds=idle_seconds, trigger_crash=trigger_crash) + + write_tensorboard_log(cfg.ymir.output.tensorboard_dir) + + stage_dir = os.path.join(cfg.ymir.output.models_dir, 'epoch20') + os.makedirs(stage_dir, exist_ok=True) + with open(os.path.join(stage_dir, 'epoch20.pt'), 'w') as f: + f.write('fake model weight') + with open(os.path.join(stage_dir, 'config.py'), 'w') as f: + f.write('fake model config file') + rw.write_model_stage(stage_name='epoch20', + files=['epoch20.pt', 'config.py'], + evaluation_result=dict(maskAP=expected_maskap)) + + # if task done, write 100% percent log + logging.info('training done') + monitor.write_monitor_logger(percent=1.0) + + +def _run_mining(cfg: edict) -> None: + # use `cfg.param` to get config file for training + # pretrained models in `cfg.ymir.input.models_dir` + gpu_id: str = cfg.param.get('gpu_id') + class_names: List[str] = cfg.param.get('class_names') + idle_seconds: float = cfg.param.get('idle_seconds', 60) + trigger_crash: bool = cfg.param.get('trigger_crash', False) + # use `logging` or `print` to write log to console + logging.info(f"mining config: {cfg.param}") + logging.info(f'gpu device: {gpu_id}') + logging.info(f'dataset class names: {class_names}') + + # use `cfg.input.candidate_index_file` to read candidate dataset items + # note that annotations path will be empty str if there's no annotations in that dataset + # count for image files + with open(cfg.ymir.input.candidate_index_file, 'r') as fp: + lines = fp.readlines() + + valid_images = [] + valid_image_count = 0 + for line in lines: + if os.path.isfile(line.strip()): + valid_image_count += 1 + valid_images.append(line.strip()) + + # use `monitor.write_monitor_logger` to write task process to monitor.txt + logging.info(f"assets count: {len(lines)}, valid: {valid_image_count}") + monitor.write_monitor_logger(percent=0.2) + + _dummy_work(idle_seconds=idle_seconds, trigger_crash=trigger_crash) + + # write mining result + # here we give a fake score to each assets + total_length = len(valid_images) + mining_result = [] + for index, asset_path in enumerate(valid_images): + mining_result.append((asset_path, index / total_length)) + time.sleep(0.1) + monitor.write_monitor_logger(percent=0.2 + 0.8 * index / valid_image_count) + + rw.write_mining_result(mining_result=mining_result) + + # if task done, write 100% percent log + logging.info('mining done') + monitor.write_monitor_logger(percent=1.0) + + +def _run_infer(cfg: edict) -> None: + # use `cfg.param` to get config file for training + # models are transfered in `cfg.ymir.input.models_dir` model_params_path + class_names = cfg.param.get('class_names') + idle_seconds: float = cfg.param.get('idle_seconds', 60) + trigger_crash: bool = cfg.param.get('trigger_crash', False) + seed: int = cfg.param.get('seed', 15) + # use `logging` or `print` to write log to console + logging.info(f"infer config: {cfg.param}") + + # use `cfg.ymir.input.candidate_index_file` to read candidate dataset items + # note that annotations path will be empty str if there's no annotations in that dataset + with open(cfg.ymir.input.candidate_index_file, 'r') as fp: + lines = fp.readlines() + + valid_images = [] + invalid_images = [] + valid_image_count = 0 + for line in lines: + if os.path.isfile(line.strip()): + valid_image_count += 1 + valid_images.append(line.strip()) + else: + invalid_images.append(line.strip()) + + # use `monitor.write_monitor_logger` to write log to console and write task process percent to monitor.txt + logging.info(f"assets count: {len(lines)}, valid: {valid_image_count}") + monitor.write_monitor_logger(percent=0.2) + + _dummy_work(idle_seconds=idle_seconds, trigger_crash=trigger_crash) + + # write infer result + random.seed(seed) + results = [] + + fake_mask_num = min(len(class_names), 10) + for iter, img_file in enumerate(valid_images): + img = cv2.imread(img_file, cv2.IMREAD_GRAYSCALE) + mask = np.zeros(shape=img.shape[0:2], dtype=np.uint8) + for idx in range(fake_mask_num): + percent = 100 * idx / fake_mask_num + value = np.percentile(img, percent) + mask[img > value] = idx + 1 + + results.append(dict(image=img_file, result=mask)) + + # real-time monitor + monitor.write_monitor_logger(percent=0.2 + 0.8 * iter / valid_image_count) + + coco_results = convert(cfg, results, True) + rw.write_infer_result(infer_result=coco_results, algorithm='segmentation') + + # if task done, write 100% percent log + logging.info('infer done') + monitor.write_monitor_logger(percent=1.0) + + +def _dummy_work(idle_seconds: float, trigger_crash: bool = False) -> None: + if idle_seconds > 0: + time.sleep(idle_seconds) + if trigger_crash: + raise RuntimeError('app crashed') + + +def write_tensorboard_log(tensorboard_dir: str) -> None: + tb_log = SummaryWriter(tensorboard_dir) + + total_epoch = 30 + for e in range(total_epoch): + tb_log.add_scalar("fake_loss", 10 / (1 + e), e) + time.sleep(1) + monitor.write_monitor_logger(percent=e / total_epoch) + + +if __name__ == '__main__': + logging.basicConfig(stream=sys.stdout, + format='%(levelname)-8s: [%(asctime)s] %(message)s', + datefmt='%Y%m%d-%H:%M:%S', + level=logging.INFO) + sys.exit(start()) diff --git a/seg-instance-demo-tmi/fast.Dockerfile b/seg-instance-demo-tmi/fast.Dockerfile new file mode 100644 index 0000000..8d03b6f --- /dev/null +++ b/seg-instance-demo-tmi/fast.Dockerfile @@ -0,0 +1,20 @@ +FROM youdaoyzbx/ymir-executor:ymir2.0.2-seg-semantic-demo-base + +WORKDIR /app +# copy user code to WORKDIR +COPY . /app + +# copy user config template and manifest.yaml to /img-man +RUN mkdir -p /img-man +COPY img-man/*.yaml /img-man/ + +RUN pip3 install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple +# view https://github.com/protocolbuffers/protobuf/issues/10051 for detail +ENV PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python + +# entry point for your app +# the whole docker image will be started with `nvidia-docker run ` +# and this command will run automatically + +RUN echo "python /app/start.py" > /usr/bin/start.sh +CMD bash /usr/bin/start.sh diff --git a/seg-instance-demo-tmi/img-man/infer-template.yaml b/seg-instance-demo-tmi/img-man/infer-template.yaml new file mode 100644 index 0000000..67295db --- /dev/null +++ b/seg-instance-demo-tmi/img-man/infer-template.yaml @@ -0,0 +1,12 @@ +# infer template for your executor app +# after build image, it should at /img-man/infer-template.yaml +# key: gpu_id, task_id, model_params_path, class_names, gpu_count should be preserved + +# gpu_id: '0' +# gpu_count: 1 +# task_id: 'default-infer-task' +# model_params_path: [] +# class_names: [] + +# just for test, remove this key in your own docker image +idle_seconds: 3 # idle seconds for each task diff --git a/seg-instance-demo-tmi/img-man/manifest.yaml b/seg-instance-demo-tmi/img-man/manifest.yaml new file mode 100644 index 0000000..38c8521 --- /dev/null +++ b/seg-instance-demo-tmi/img-man/manifest.yaml @@ -0,0 +1,3 @@ +# object_type: 2 for object detection, 3 for semantic segmentation +# 4 for instance segmentation +"object_type": 4 diff --git a/seg-instance-demo-tmi/img-man/mining-template.yaml b/seg-instance-demo-tmi/img-man/mining-template.yaml new file mode 100644 index 0000000..3eae941 --- /dev/null +++ b/seg-instance-demo-tmi/img-man/mining-template.yaml @@ -0,0 +1,12 @@ +# mining template for your executor app +# after build image, it should at /img-man/mining-template.yaml +# key: gpu_id, task_id, model_params_path, class_names, gpu_count should be preserved + +# gpu_id: '0' +# gpu_count: 1 +# task_id: 'default-mining-task' +# model_params_path: [] +# class_names: [] + +# just for test, remove this key in your own docker image +idle_seconds: 3 # idle seconds for each task diff --git a/seg-instance-demo-tmi/img-man/training-template.yaml b/seg-instance-demo-tmi/img-man/training-template.yaml new file mode 100644 index 0000000..c6d423a --- /dev/null +++ b/seg-instance-demo-tmi/img-man/training-template.yaml @@ -0,0 +1,18 @@ +# training template for your executor app +# after build image, it should at /img-man/training-template.yaml +# key: gpu_id, task_id, pretrained_model_paths, class_names, gpu_count should be preserved + +# gpu_id: '0' +# gpu_count: 1 +# task_id: 'default-training-task' +# pretrained_model_params: [] +# class_names: [] + +# format of annotations and images that ymir should provide to this docker container +# annotation format: must be seg-coco +# image format: must be raw +export_format: 'seg-coco:raw' + +# just for test, remove this key in your own docker image +expected_maskap: 0.983 # expected mIoU for training task +idle_seconds: 3 # idle seconds for each task diff --git a/seg-instance-demo-tmi/requirements.txt b/seg-instance-demo-tmi/requirements.txt new file mode 100644 index 0000000..708647b --- /dev/null +++ b/seg-instance-demo-tmi/requirements.txt @@ -0,0 +1,11 @@ +pycocotools +pydantic>=1.8.2 +pyyaml>=5.4.1 +tensorboardX>=2.4 +numpy +opencv-python>=4.0 +pillow +imagesize +tqdm +easydict +ymir_exc@git+https://github.com/modelai/ymir-executor-sdk.git@ymir2.1.0 diff --git a/seg-semantic-demo-tmi/app/start.py b/seg-semantic-demo-tmi/app/start.py index a3c2b78..040817b 100644 --- a/seg-semantic-demo-tmi/app/start.py +++ b/seg-semantic-demo-tmi/app/start.py @@ -214,7 +214,7 @@ def _run_infer(cfg: edict) -> None: monitor.write_monitor_logger(percent=1.0) -def _dummy_work(idle_seconds: float, trigger_crash: bool = False, gpu_memory_size: int = 0) -> None: +def _dummy_work(idle_seconds: float, trigger_crash: bool = False) -> None: if idle_seconds > 0: time.sleep(idle_seconds) if trigger_crash: From b413d1a6f0abc703801e8f4fc1467db025fbfd8d Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 3 Feb 2023 13:49:45 +0800 Subject: [PATCH 192/204] zzz --- seg-instance-demo-tmi/fast.Dockerfile | 3 ++- seg-semantic-demo-tmi/Dockerfile | 2 +- seg-semantic-demo-tmi/fast.Dockerfile | 3 +++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/seg-instance-demo-tmi/fast.Dockerfile b/seg-instance-demo-tmi/fast.Dockerfile index 8d03b6f..6033dda 100644 --- a/seg-instance-demo-tmi/fast.Dockerfile +++ b/seg-instance-demo-tmi/fast.Dockerfile @@ -2,12 +2,13 @@ FROM youdaoyzbx/ymir-executor:ymir2.0.2-seg-semantic-demo-base WORKDIR /app # copy user code to WORKDIR -COPY . /app +COPY ./app/*.py /app/ # copy user config template and manifest.yaml to /img-man RUN mkdir -p /img-man COPY img-man/*.yaml /img-man/ +COPY ./requirements.txt /app/ RUN pip3 install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple # view https://github.com/protocolbuffers/protobuf/issues/10051 for detail ENV PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python diff --git a/seg-semantic-demo-tmi/Dockerfile b/seg-semantic-demo-tmi/Dockerfile index 7481013..b1ee2f0 100644 --- a/seg-semantic-demo-tmi/Dockerfile +++ b/seg-semantic-demo-tmi/Dockerfile @@ -20,7 +20,7 @@ RUN apt-get update && apt-get install -y gnupg2 git libglib2.0-0 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -COPY requirements.txt ./ +COPY requirements.txt /app/ RUN pip3 install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple WORKDIR /app diff --git a/seg-semantic-demo-tmi/fast.Dockerfile b/seg-semantic-demo-tmi/fast.Dockerfile index 4f0ab11..30ec6b8 100644 --- a/seg-semantic-demo-tmi/fast.Dockerfile +++ b/seg-semantic-demo-tmi/fast.Dockerfile @@ -4,6 +4,9 @@ WORKDIR /app # copy user code to WORKDIR COPY ./app/*.py /app/ +COPY ./requirements.txt /app/ +RUN pip3 install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple + # copy user config template and manifest.yaml to /img-man RUN mkdir -p /img-man COPY img-man/*.yaml /img-man/ From f6cd017795254ec98522ad304f1a27b7bccda8c8 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 3 Feb 2023 13:59:47 +0800 Subject: [PATCH 193/204] add confidence for instance segmentation --- seg-instance-demo-tmi/app/result_to_coco.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/seg-instance-demo-tmi/app/result_to_coco.py b/seg-instance-demo-tmi/app/result_to_coco.py index 5346320..8bd1947 100644 --- a/seg-instance-demo-tmi/app/result_to_coco.py +++ b/seg-instance-demo-tmi/app/result_to_coco.py @@ -2,6 +2,7 @@ import datetime import os.path as osp +import random from typing import Dict, List import imagesize @@ -96,6 +97,8 @@ def convert(ymir_cfg: edict, results: List[Dict], with_blank_area: bool): binary_mask, tolerance=2) + # for instance segmentation + annotation_info['confidence'] = max(1.0, 0.1 + random.random()) if annotation_info is not None: coco_output["annotations"].append(annotation_info) # type: ignore annotation_id = annotation_id + 1 From 41355235288b2c9f9aa8647d4851b4c80edb0ea9 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 6 Feb 2023 10:12:04 +0800 Subject: [PATCH 194/204] update instance seg-demo-tmi --- docs/algorithms/mmdet.md | 23 +++++++++++++++++++++ docs/overview/ymir-executor.md | 11 ++++++++-- mkdocs.yml | 7 ++++++- seg-instance-demo-tmi/app/result_to_coco.py | 2 +- 4 files changed, 39 insertions(+), 4 deletions(-) diff --git a/docs/algorithms/mmdet.md b/docs/algorithms/mmdet.md index 64ee989..5bca751 100644 --- a/docs/algorithms/mmdet.md +++ b/docs/algorithms/mmdet.md @@ -72,3 +72,26 @@ CMD bash /usr/bin/start.sh # 将镜像的默认启动脚本设置为 /usr/bin/s ``` docker build -t det/mmdet:tmi -f ymir/Dockerfile . ``` + +## 💫复杂用法 + +!!! 注意 + 这部分内容初学者可以跳过 + +### cfg_options + +当用户使用脚本 “tools/train.py” 或 “tools/test.py” 提交任务,或者其他工具时,可以通过指定 --cfg-options 参数来直接修改配置文件中内容。 + +- 更新字典链中的配置的键 + + 配置项可以通过遵循原始配置中键的层次顺序指定。例如,--cfg-options model.backbone.norm_eval=False 改变模型 backbones 中的所有 BN 模块为 train 模式。 + +- 更新列表中配置的键 + + 你的配置中的一些配置字典是由列表组成。例如,训练 pipeline data.train.pipeline 通常是一个列表。 例如 [dict(type='LoadImageFromFile'), dict(type='TopDownRandomFlip', flip_prob=0.5), ...]。 如果你想要在 pipeline 中将 'flip_prob=0.5' 修改为 'flip_prob=0.0' , 您可以指定 --cfg-options data.train.pipeline.1.flip_prob=0.0. + +- 更新 list/tuples 中的值 + + 如果想要更新的值是一个列表或者元组。 例如, 一些配置文件中包含 param_scheduler = "[dict(type='CosineAnnealingLR',T_max=200,by_epoch=True,begin=0,end=200)]"。 如果你想要改变这个键,你可以指定 --cfg-options param_scheduler = "[dict(type='LinearLR',start_factor=1e-4, by_epoch=True,begin=0,end=40,convert_to_iter_based=True)]"。 注意, ” 是必要的, 并且在指定值的时候,在引号中不能存在空白字符。 + + diff --git a/docs/overview/ymir-executor.md b/docs/overview/ymir-executor.md index d2c9bac..5b654e9 100644 --- a/docs/overview/ymir-executor.md +++ b/docs/overview/ymir-executor.md @@ -44,6 +44,13 @@ docker run hello-world !!! 注意 先按照上述链接中的前提条件安装好 **NVIDIA Driver >=510.47.03 **, 以支持 `cuda11.6+` +!!! gpu驱动与cuda版本 + 引用自openmmlab: + + 对于基于 Ampere 的 NVIDIA GPU,例如 GeForce 30 系列和 NVIDIA A100,CUDA 版本需要 >= 11。 + 对于较旧的 NVIDIA GPU,CUDA 11 向后兼容,但 CUDA 10.2 提供更好的兼容性并且更轻量级。 + 请确保 GPU 驱动程序满足最低版本要求。有关详细信息,请参阅[此表](https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cuda-major-component-versions__table-cuda-toolkit-driver-versions) + ``` # 添加软件源 distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \ @@ -110,11 +117,11 @@ hello ymir executor ### 基础镜像 -需要选择一个合适的基础镜像,上面的例子中我们采用ubuntu18.04作用基础镜像构建新镜像,基于实践,我们推荐制作ymir镜像的基础镜像包含以下配置: +需要选择一个合适的基础镜像来避免从0开始制作ymir镜像,上面的例子中我们采用ubuntu18.04作用基础镜像构建新镜像,基于实践,我们推荐制作ymir镜像的基础镜像包含以下配置: - python 版本 >= 3.8 -- 支持的cuda版本 >= 11.2 +- ymir镜像的cuda版本<=主机支持的cuda版本 - 推荐基于[nvidia/cuda](https://hub.docker.com/r/nvidia/cuda/tags) 与 [pytorch/pytorch](https://hub.docker.com/r/pytorch/pytorch/tags) 进行ymir镜像制作 diff --git a/mkdocs.yml b/mkdocs.yml index ee69e70..72defb1 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -22,7 +22,12 @@ markdown_extensions: # - sane_lists nav: - Home: index.md - - 基本概念: overview + - 基本概念: + - overview/framework.md + - overview/dataset-format.md + - overview/hyper-parameter.md + - overview/ymir-executor.md - 目标检测: object_detection - 语义分割: semantic_segmentation - Ymir镜像说明文档: ymir_executor + - 算法仓库介绍: algorithms diff --git a/seg-instance-demo-tmi/app/result_to_coco.py b/seg-instance-demo-tmi/app/result_to_coco.py index 8bd1947..69a48e9 100644 --- a/seg-instance-demo-tmi/app/result_to_coco.py +++ b/seg-instance-demo-tmi/app/result_to_coco.py @@ -98,7 +98,7 @@ def convert(ymir_cfg: edict, results: List[Dict], with_blank_area: bool): tolerance=2) # for instance segmentation - annotation_info['confidence'] = max(1.0, 0.1 + random.random()) + annotation_info['confidence'] = min(1.0, 0.1 + random.random()) if annotation_info is not None: coco_output["annotations"].append(annotation_info) # type: ignore annotation_id = annotation_id + 1 From 85e8978d1daa383bcc510387d358f63443fed7ba Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Mon, 6 Feb 2023 12:11:07 +0800 Subject: [PATCH 195/204] update doc for instance segmentation and image community --- docs/README.MD | 28 --------------- .../det-detectron2-tmi.md | 2 +- .../det-mmdet-tmi.md | 2 +- .../det-nanodet-tmi.md | 2 +- .../det-vidt-tmi.md | 2 +- .../det-yolov4-tmi.md | 2 +- .../det-yolov5-automl-tmi.md | 2 +- .../det-yolov5-tmi.md | 2 +- .../det-yolov7-tmi.md | 2 +- docs/image_community/image_community.md | 33 +++++++++++++++++ .../simple_instance_seg_tmi.md | 35 +++++++++++++++++++ .../simple_semantic_seg_infer.md | 1 + .../simple_semantic_seg_mining.md | 0 .../simple_semantic_seg_training.md | 0 .../test_semantic_seg.md | 0 mkdocs.yml | 20 +++++++++-- 16 files changed, 94 insertions(+), 39 deletions(-) rename docs/{ymir_executor => image_community}/det-detectron2-tmi.md (99%) rename docs/{ymir_executor => image_community}/det-mmdet-tmi.md (99%) rename docs/{ymir_executor => image_community}/det-nanodet-tmi.md (99%) rename docs/{ymir_executor => image_community}/det-vidt-tmi.md (99%) rename docs/{ymir_executor => image_community}/det-yolov4-tmi.md (99%) rename docs/{ymir_executor => image_community}/det-yolov5-automl-tmi.md (97%) rename docs/{ymir_executor => image_community}/det-yolov5-tmi.md (99%) rename docs/{ymir_executor => image_community}/det-yolov7-tmi.md (99%) create mode 100644 docs/image_community/image_community.md create mode 100644 docs/image_segmentation/simple_instance_seg_tmi.md rename docs/{semantic_segmentation => image_segmentation}/simple_semantic_seg_infer.md (98%) rename docs/{semantic_segmentation => image_segmentation}/simple_semantic_seg_mining.md (100%) rename docs/{semantic_segmentation => image_segmentation}/simple_semantic_seg_training.md (100%) rename docs/{semantic_segmentation => image_segmentation}/test_semantic_seg.md (100%) diff --git a/docs/README.MD b/docs/README.MD index f75a7be..a99f718 100644 --- a/docs/README.MD +++ b/docs/README.MD @@ -28,34 +28,6 @@ - [Ymir镜像制作简介](./overview/ymir-executor.md) -## 目标检测 - -- [制作一个简单的检测训练镜像](./object_detection/simple_det_training.md) - -- [制作一个简单的检测推理镜像](./object_detection/simple_det_infer.md) - -- [制作一个简单的检测挖掘镜像](./object_detection/simple_det_mining.md) - -- [测试Ymir目标检测镜像](./object_detection/test_det.md) - -## 语义分割 - -- [制作一个简单的语义分割训练镜像](./semantic_segmentation/simple_semantic_seg_training.md) - -- [制作一个简单的语义分割推理镜像](./semantic_segmentation/simple_semantic_seg_infer.md) - -- [制作一个简单的语义分割挖掘镜像](./semantic_segmentation/simple_semantic_seg_mining.md) - -- [测试Ymir语义分割镜像](./semantic_segmentation/test_semantic_seg.md) - -## 实例分割 - -- [制作一个简单的实例分割训练镜像]() - -- [制作一个简单的实例分割推理镜像]() - -- [制作一个简单的实例分割挖掘镜像]() - ## 基于已有镜像进行定制 - [增/删/改: 默认超参数](./hyper-parameter.md) diff --git a/docs/ymir_executor/det-detectron2-tmi.md b/docs/image_community/det-detectron2-tmi.md similarity index 99% rename from docs/ymir_executor/det-detectron2-tmi.md rename to docs/image_community/det-detectron2-tmi.md index 24386ed..e4d185d 100644 --- a/docs/ymir_executor/det-detectron2-tmi.md +++ b/docs/image_community/det-detectron2-tmi.md @@ -1,4 +1,4 @@ -# detectron2 镜像说明文档 +# ymir-detectron2 镜像说明文档 ## 代码仓库 diff --git a/docs/ymir_executor/det-mmdet-tmi.md b/docs/image_community/det-mmdet-tmi.md similarity index 99% rename from docs/ymir_executor/det-mmdet-tmi.md rename to docs/image_community/det-mmdet-tmi.md index 0f3fc35..d6a6ca9 100644 --- a/docs/ymir_executor/det-mmdet-tmi.md +++ b/docs/image_community/det-mmdet-tmi.md @@ -1,4 +1,4 @@ -# mmdetection 镜像说明文档 +# ymir-mmdetection 镜像说明文档 ## 仓库地址 diff --git a/docs/ymir_executor/det-nanodet-tmi.md b/docs/image_community/det-nanodet-tmi.md similarity index 99% rename from docs/ymir_executor/det-nanodet-tmi.md rename to docs/image_community/det-nanodet-tmi.md index 8d4ce81..2b68296 100644 --- a/docs/ymir_executor/det-nanodet-tmi.md +++ b/docs/image_community/det-nanodet-tmi.md @@ -1,4 +1,4 @@ -# nanodet 镜像说明文档 +# ymir-nanodet 镜像说明文档 > Super fast and high accuracy lightweight anchor-free object detection model. Real-time on mobile devices. diff --git a/docs/ymir_executor/det-vidt-tmi.md b/docs/image_community/det-vidt-tmi.md similarity index 99% rename from docs/ymir_executor/det-vidt-tmi.md rename to docs/image_community/det-vidt-tmi.md index 9b47750..8b8f09b 100644 --- a/docs/ymir_executor/det-vidt-tmi.md +++ b/docs/image_community/det-vidt-tmi.md @@ -1,4 +1,4 @@ -# vidt 镜像说明文档 +# ymir-vidt 镜像说明文档 ICLR 2022的 transformer 架构检测器 diff --git a/docs/ymir_executor/det-yolov4-tmi.md b/docs/image_community/det-yolov4-tmi.md similarity index 99% rename from docs/ymir_executor/det-yolov4-tmi.md rename to docs/image_community/det-yolov4-tmi.md index 6cc88d7..f07698f 100644 --- a/docs/ymir_executor/det-yolov4-tmi.md +++ b/docs/image_community/det-yolov4-tmi.md @@ -1,4 +1,4 @@ -# yolov4 镜像说明文档 +# ymir-yolov4 镜像说明文档 ## 仓库地址 diff --git a/docs/ymir_executor/det-yolov5-automl-tmi.md b/docs/image_community/det-yolov5-automl-tmi.md similarity index 97% rename from docs/ymir_executor/det-yolov5-automl-tmi.md rename to docs/image_community/det-yolov5-automl-tmi.md index 20ac9c4..f07fbb7 100644 --- a/docs/ymir_executor/det-yolov5-automl-tmi.md +++ b/docs/image_community/det-yolov5-automl-tmi.md @@ -1,4 +1,4 @@ -# yolov5 automl 镜像说明文档 +# ymir-yolov5 automl 镜像说明文档 ## 仓库地址 diff --git a/docs/ymir_executor/det-yolov5-tmi.md b/docs/image_community/det-yolov5-tmi.md similarity index 99% rename from docs/ymir_executor/det-yolov5-tmi.md rename to docs/image_community/det-yolov5-tmi.md index 19a4e27..f4f3475 100644 --- a/docs/ymir_executor/det-yolov5-tmi.md +++ b/docs/image_community/det-yolov5-tmi.md @@ -1,4 +1,4 @@ -# yolov5 镜像说明文档 +# ymir-yolov5 镜像说明文档 ## 仓库地址 diff --git a/docs/ymir_executor/det-yolov7-tmi.md b/docs/image_community/det-yolov7-tmi.md similarity index 99% rename from docs/ymir_executor/det-yolov7-tmi.md rename to docs/image_community/det-yolov7-tmi.md index 8a616d3..5ec704b 100644 --- a/docs/ymir_executor/det-yolov7-tmi.md +++ b/docs/image_community/det-yolov7-tmi.md @@ -1,4 +1,4 @@ -# yolov7 镜像说明文档 +# ymir-yolov7 镜像说明文档 ## 代码仓库 diff --git a/docs/image_community/image_community.md b/docs/image_community/image_community.md new file mode 100644 index 0000000..9ffe7a9 --- /dev/null +++ b/docs/image_community/image_community.md @@ -0,0 +1,33 @@ +# 镜像社区 + +- [镜像社区](http://pubimg.vesionbook.com:8110/img)的目的是共享用户之间制作的镜像,增加用户的可用镜像。 + +![](../imgs/ymir_image_community.png) + +- 用户通过ymir平台可使用与发布镜像 + +![](../imgs/ymir_publish_image.png) + +### 将镜像上传到docker hub +可以参考[runoob/docker](https://www.runoob.com/docker/docker-repository.html),其发布流程与`git`类似。 + +- 在[docker hub](https://hub.docker.com/) 上注册帐号,假设用户名 ` = youdaoyzbx` + +- 将本地镜像 `xxx/xxx:xxx` 添加别名,改为 `/xxx:xxx` 的格式 +``` +docker pull ubuntu18.04 +docker tag ubuntu:18.04 youdaoyzbx/ubuntu:18.04 +``` +- login 到docker hub并上传 +``` +docker login +docker push youdaozbx/ubuntu:18.04 +``` + +### 在ymir平台进行发布 + +- 镜像地址:填写 `/xxx:xxx`, 需要上传到docker hub + +- 填写其它信息与[参数说明](./det-yolov5-tmi.md) + +- 点击确定并等待Ymir团队审核 diff --git a/docs/image_segmentation/simple_instance_seg_tmi.md b/docs/image_segmentation/simple_instance_seg_tmi.md new file mode 100644 index 0000000..837276e --- /dev/null +++ b/docs/image_segmentation/simple_instance_seg_tmi.md @@ -0,0 +1,35 @@ +# 制作简单的实例分割镜像 + +参考语义分割镜像的制作: +- [语义分割-训练](./simple_semantic_seg_training.md) +- [语义分割-推理](./simple_semantic_seg_infer.md) +- [语义分割-挖掘](./simple_semantic_seg_mining.md) + +## 镜像说明文件 + +**object_type** 为 4 表示镜像支持实例分割 + +- [img-man/manifest.yaml](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/seg-instance-demo-tmi/img-man/manifest.yaml) +``` +# 4 for instance segmentation +"object_type": 4 +``` + +## 训练结果返回 + +``` +rw.write_model_stage(stage_name='epoch20', + files=['epoch20.pt', 'config.py'], + evaluation_result=dict(maskAP=expected_maskap)) +``` + +## 推理结果返回 + +采用coco数据集格式,相比语义分割,实例分割的annotation中需要增加 `bbox` 的置信度。 +``` +# for instance segmentation +annotation_info['confidence'] = min(1.0, 0.1 + random.random()) + +coco_results = convert(cfg, results, True) +rw.write_infer_result(infer_result=coco_results, algorithm='segmentation') +``` diff --git a/docs/semantic_segmentation/simple_semantic_seg_infer.md b/docs/image_segmentation/simple_semantic_seg_infer.md similarity index 98% rename from docs/semantic_segmentation/simple_semantic_seg_infer.md rename to docs/image_segmentation/simple_semantic_seg_infer.md index e5c7c63..42a9352 100644 --- a/docs/semantic_segmentation/simple_semantic_seg_infer.md +++ b/docs/image_segmentation/simple_semantic_seg_infer.md @@ -74,6 +74,7 @@ monitor.write_monitor_logger(percent=1.0) ## 写结果文件 ``` +coco_results = convert(cfg, results, True) rw.write_infer_result(infer_result=coco_results, algorithm='segmentation') ``` diff --git a/docs/semantic_segmentation/simple_semantic_seg_mining.md b/docs/image_segmentation/simple_semantic_seg_mining.md similarity index 100% rename from docs/semantic_segmentation/simple_semantic_seg_mining.md rename to docs/image_segmentation/simple_semantic_seg_mining.md diff --git a/docs/semantic_segmentation/simple_semantic_seg_training.md b/docs/image_segmentation/simple_semantic_seg_training.md similarity index 100% rename from docs/semantic_segmentation/simple_semantic_seg_training.md rename to docs/image_segmentation/simple_semantic_seg_training.md diff --git a/docs/semantic_segmentation/test_semantic_seg.md b/docs/image_segmentation/test_semantic_seg.md similarity index 100% rename from docs/semantic_segmentation/test_semantic_seg.md rename to docs/image_segmentation/test_semantic_seg.md diff --git a/mkdocs.yml b/mkdocs.yml index 72defb1..0f8cbac 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -28,6 +28,20 @@ nav: - overview/hyper-parameter.md - overview/ymir-executor.md - 目标检测: object_detection - - 语义分割: semantic_segmentation - - Ymir镜像说明文档: ymir_executor - - 算法仓库介绍: algorithms + - 图像分割: + - image_segmentation/simple_semantic_seg_training.md + - image_segmentation/simple_semantic_seg_infer.md + - image_segmentation/simple_semantic_seg_mining.md + - image_segmentation/test_semantic_seg.md + - image_segmentation/simple_instance_seg_tmi.md + - 镜像社区: + - image_community/image_community.md + - image_community/det-yolov5-tmi.md + - image_community/det-mmdet-tmi.md + - image_community/det-nanodet-tmi.md + - image_community/det-detectron2-tmi.md + - image_community/det-yolov7-tmi.md + - image_community/det-vidt-tmi.md + - image_community/det-yolov5-automl-tmi.md + - image_community/det-yolov4-tmi.md + - 算法仓库: algorithms From e10c8189f92335e1b57bb8502d5862e1b8c9de49 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Wed, 8 Feb 2023 10:52:55 +0800 Subject: [PATCH 196/204] udpate doc --- docs/algorithms/mmdet.md | 19 +++++++++++++++++++ .../simple_instance_seg_tmi.md | 3 +++ 2 files changed, 22 insertions(+) diff --git a/docs/algorithms/mmdet.md b/docs/algorithms/mmdet.md index 5bca751..1135545 100644 --- a/docs/algorithms/mmdet.md +++ b/docs/algorithms/mmdet.md @@ -94,4 +94,23 @@ docker build -t det/mmdet:tmi -f ymir/Dockerfile . 如果想要更新的值是一个列表或者元组。 例如, 一些配置文件中包含 param_scheduler = "[dict(type='CosineAnnealingLR',T_max=200,by_epoch=True,begin=0,end=200)]"。 如果你想要改变这个键,你可以指定 --cfg-options param_scheduler = "[dict(type='LinearLR',start_factor=1e-4, by_epoch=True,begin=0,end=40,convert_to_iter_based=True)]"。 注意, ” 是必要的, 并且在指定值的时候,在引号中不能存在空白字符。 +### 自定义Dataset +mmengine提供了基本类BaseDataset,可直接定义YmirDataset, 实现以下三个函数。 + +``` +from mmengine.dataset import BaseDataset + +from mmengine.registry import DATASETS + +@DATASETS.registre_module() +class YmirDataset(BaseDataset): + def __init__(self, xxx, xxx, **kwargs): + ... + + def load_data_list(self) -> List[dict]: + ... + + def get_cat_ids(self, idx: int) -> List[int]: + ... +``` diff --git a/docs/image_segmentation/simple_instance_seg_tmi.md b/docs/image_segmentation/simple_instance_seg_tmi.md index 837276e..5ebb88b 100644 --- a/docs/image_segmentation/simple_instance_seg_tmi.md +++ b/docs/image_segmentation/simple_instance_seg_tmi.md @@ -1,8 +1,11 @@ # 制作简单的实例分割镜像 参考语义分割镜像的制作: + - [语义分割-训练](./simple_semantic_seg_training.md) + - [语义分割-推理](./simple_semantic_seg_infer.md) + - [语义分割-挖掘](./simple_semantic_seg_mining.md) ## 镜像说明文件 From 0c5991964decb63b92925108f9e6b4237765573d Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 10 Feb 2023 10:08:41 +0800 Subject: [PATCH 197/204] add mmseg doc --- docs/algorithms/mmseg.md | 39 +++++++++++ docs/image_community/det-yolov7-tmi.md | 2 +- docs/image_community/seg-mmseg-tmi.md | 92 ++++++++++++++++++++++++++ 3 files changed, 132 insertions(+), 1 deletion(-) create mode 100644 docs/image_community/seg-mmseg-tmi.md diff --git a/docs/algorithms/mmseg.md b/docs/algorithms/mmseg.md index e69de29..ca0b642 100644 --- a/docs/algorithms/mmseg.md +++ b/docs/algorithms/mmseg.md @@ -0,0 +1,39 @@ +# ymir-mmsegmentation + +## mmsegmentation简介 + +mmsegmentation 是 OpenMMLab 开源的语义分割工具库,包含众多的算法。可以阅读其[官方文档](https://mmsegmentation.readthedocs.io/zh_CN/latest/index.html)了解其详细用法,此处仅介绍其训练,推理相关的内容。 + +### 训练 + +- 单GPU训练的命令如下, 其中的 **CONFIG_FILE** 可以从 [configs](https://github.com/open-mmlab/mmsegmentation/tree/master/configs) 目录下找到 + +``` +python tools/train.py ${CONFIG_FILE} [可选参数] + +# 在cityscapes数据集上训练deeplabv3plus模型 +python tools/train.py configs/deeplabv3plus/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes.py +``` + +- 多GPU训练的命令如下 +``` +sh tools/dist_train.sh ${CONFIG_FILE} ${GPUS} [可选参数] + +# 采用4块GPU进行训练 +python tools/train.py configs/deeplabv3plus/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes.py 4 +``` + +### 推理 + +- 可以参考 [demo/image_demo.py](https://github.com/open-mmlab/mmsegmentation/tree/master/demo/image_demo.py) + +- 先下载对应config的权重文件, 可在[configs/deeplabv3plus/README.md](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/deeplabv3plus/README.md)找到对应 **CONFIG_FILE** 和权重文件 + +``` +wget https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes/deeplabv3plus_r50-d8_512x1024_40k_cityscapes_20200605_094610-d222ffcd.pth +``` + +- 进行推理 +``` +python demo/image_demo.py demo/demo.png configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py deeplabv3plus_r50-d8_512x1024_40k_cityscapes_20200605_094610-d222ffcd.pth +``` diff --git a/docs/image_community/det-yolov7-tmi.md b/docs/image_community/det-yolov7-tmi.md index 5ec704b..3acf80d 100644 --- a/docs/image_community/det-yolov7-tmi.md +++ b/docs/image_community/det-yolov7-tmi.md @@ -8,7 +8,7 @@ ## 镜像地址 ``` -youdaoyzbx/ymir-executor:ymir2.0.0-yolov7-cu111-tmi +youdaoyzbx/ymir-executor:ymir2.1.0-yolov7-cu111-tmi ``` ## 性能表现 diff --git a/docs/image_community/seg-mmseg-tmi.md b/docs/image_community/seg-mmseg-tmi.md new file mode 100644 index 0000000..aeb523a --- /dev/null +++ b/docs/image_community/seg-mmseg-tmi.md @@ -0,0 +1,92 @@ +# ymir-mmsegmentation镜像说明文档 + +- 支持任务类型: 训练, 推理, 挖掘 + +- 支持算法: fastscnn语义分割 + +- 版本信息 + +``` +python: 3.8.8 +pytorch: 1.8.0 +torchvision: 0.9.0 +cuda: 11.1 +cudnn: 8 +mmcv: 1.6.1 +mmsegmentation: 0.27.0+ +``` + +## 镜像信息 + +> 参考仓库[open-mmlab/mmsegmentation](https://github.com/open-mmlab/mmsegmentation) + +- 代码仓库[modelai/ymir-mmsegmentation](https://github.com/modelai/ymir-mmsegmentation) + +- 镜像地址 + +``` +docker pull youdaoyzbx/ymir-executor:ymir2.1.0-mmseg-cu111-tmi +``` + +## 性能表现 + +>参考 [fastscnn](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/fastscnn) + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| -------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------- | --------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| FastSCNN | FastSCNN | 512x1024 | 160000 | 3.3 | 56.45 | 70.96 | 72.65 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fast_scnn/fast_scnn_lr0.12_8x4_160k_cityscapes/fast_scnn_lr0.12_8x4_160k_cityscapes_20210630_164853-0cec9937.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fast_scnn/fast_scnn_lr0.12_8x4_160k_cityscapes/fast_scnn_lr0.12_8x4_160k_cityscapes_20210630_164853.log.json) | + +## 训练参数 + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| config_file | +| export_format | seg-coco:raw | 字符串| 受ymir后台处理,ymir分割数据集导出格式 | 禁止改变 | +| shm_size | 128G | 字符串| 受ymir后台处理,docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | +| config_file | configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py | 文件路径 | mmlab配置文件 | 建议采用fastscnn系列, 参考[configs](https://github.com/modelai/ymir-mmsegmentation/tree/master/configs) | +| samples_per_gpu | 2 | 整数 | 每张GPU一次处理的图片数量 | 建议大小:显存占用<50% 可增加2倍加快训练速度 | +| workers_per_gpu | 2 | 整数 | 每张GPU对应的数据读取进程数 | 采用默认值即可,若内存及CPU配置高,可适当增大 | +| max_iters | 20000 | 整数 | 数据集的训练批次 | 建议:必要时分析tensorboard确定是否有必要改变,一般采用默认值即可 | +| interval | 2000 | 整数 | 模型在验证集上评测的周期 | 采用默认值即可 | +| args_options | '' | 字符串 | 训练命令行参数 | 参考[tools/train.py]() +| cfg_options | '' | 字符串 | 训练命令行参数 | 参考 [tools/train.py]() +| save_least_file | True | 布尔型 | 是否只保存最优和最新的权重文件 | 设置为True | +| max_keep_ckpts | -1 | 整数 | 当save_least_file为False时,最多保存的权重文件数量 | 设置为k, 可保存k个最优权重和k个最新的权重文件,设置为-1可保存所有权重文件。| +| ignore_black_area | False | 布尔型 | 是否忽略未标注的区域 | 采用默认即可将空白区域当成背景进行训练 | + +## 推理参数 + +| 超参数 | 默认值 | 类型 | 说明 | 建议 | +| - | - | - | - | - | +| hyper-parameter | default value | type | note | advice | +| shm_size | 128G | 字符串| 受ymir后台处理,docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | +| samples_per_gpu | 2 | 整数 | 每张GPU一次处理的图片数量 | 建议大小:显存占用<50% 可增加2倍加快训练速度 | +| workers_per_gpu | 2 | 整数 | 每张GPU对应的数据读取进程数 | 采用默认值即可,若内存及CPU配置高,可适当增大 | + + +## 挖掘参数 + +| 超参数 | 默认值 | 类型 | 可选值 | 说明 | +| hyper-parameter | default value | type | choices | note | +| - | - | - | - | - | +| mining_algorithm | RSAL | str | RSAL, RIPU | 挖掘算法名称 | +| superpixel_algorithm | slico | str | slico, slic, mslic, seeds | 超像素算法名称 | +| uncertainty_method | BvSB | str | BvSB | 不确定性计算方法名称 | +| shm_size | 128G | str | 128G | 容器可使用的共享内存大小 | +| max_superpixel_per_image | 1024 | int | 1024, ... | 一张图像中超像素的数量上限 | +| max_kept_mining_image | 5000 | int | 500, 1000, 2000, 5000, ... | 挖掘图像数量的上限 | +| topk_superpixel_score | 3 | int | 3, 5, 10, ... | 一张图像中采用的超像素数量 | +| class_balance | True | bool | True, False | 是否考虑各类标注的平衡性 | +| fp16 | True | bool | True, False | 是否采用fp16技术加速 | +| samples_per_gpu | 2 | int | 2, 4, ... | batch size per gpu | +| workers_per_gpu | 2 | int | 2 | num_workers per gpu | +| ignore_blank_area | False | bool | True, False | 是否忽略未标注的区域 | + +## 镜像制作 + +- [YMIR语义分割镜像制作](https://ymir-executor-fork.readthedocs.io/zh/latest/image_segmentation/simple_semantic_seg_training/) + +- [mmsegmentation简介](https://ymir-executor-fork.readthedocs.io/zh/latest/algorithms/mmseg/) From 8bafa996c99f9baacbcb1a447e5e7e96a86c3482 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 10 Feb 2023 10:58:33 +0800 Subject: [PATCH 198/204] update seg-mmseg-tmi.md --- mkdocs.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/mkdocs.yml b/mkdocs.yml index 0f8cbac..8f2ea06 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -36,6 +36,7 @@ nav: - image_segmentation/simple_instance_seg_tmi.md - 镜像社区: - image_community/image_community.md + - image_community/seg-mmseg-tmi.md - image_community/det-yolov5-tmi.md - image_community/det-mmdet-tmi.md - image_community/det-nanodet-tmi.md From b3638ccd02065bd5a8767e55ff90b44b5183fdd6 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 10 Feb 2023 14:13:35 +0800 Subject: [PATCH 199/204] add doc --- docs/image_community/seg-mmseg-tmi.md | 22 ++++++++++++++++++++-- docs/imgs/ymir_image_community.png | Bin 0 -> 114521 bytes docs/imgs/ymir_publish_image.png | Bin 0 -> 30183 bytes 3 files changed, 20 insertions(+), 2 deletions(-) create mode 100644 docs/imgs/ymir_image_community.png create mode 100644 docs/imgs/ymir_publish_image.png diff --git a/docs/image_community/seg-mmseg-tmi.md b/docs/image_community/seg-mmseg-tmi.md index aeb523a..27de31c 100644 --- a/docs/image_community/seg-mmseg-tmi.md +++ b/docs/image_community/seg-mmseg-tmi.md @@ -2,7 +2,7 @@ - 支持任务类型: 训练, 推理, 挖掘 -- 支持算法: fastscnn语义分割 +- 支持算法: deeplabv3plus, fastscnn,hrnet, ocrnet 语义分割 - 版本信息 @@ -37,6 +37,24 @@ docker pull youdaoyzbx/ymir-executor:ymir2.1.0-mmseg-cu111-tmi | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | -------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------- | --------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | FastSCNN | FastSCNN | 512x1024 | 160000 | 3.3 | 56.45 | 70.96 | 72.65 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fast_scnn/fast_scnn_lr0.12_8x4_160k_cityscapes/fast_scnn_lr0.12_8x4_160k_cityscapes_20210630_164853-0cec9937.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fast_scnn/fast_scnn_lr0.12_8x4_160k_cityscapes/fast_scnn_lr0.12_8x4_160k_cityscapes_20210630_164853.log.json) | +| HRNet | HRNetV2p-W18-Small | 512x1024 | 40000 | 1.7 | 23.74 | 73.86 | 75.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18s_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_40k_cityscapes/fcn_hr18s_512x1024_40k_cityscapes_20200601_014216-93db27d0.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_40k_cityscapes/fcn_hr18s_512x1024_40k_cityscapes_20200601_014216.log.json) | +| HRNet | HRNetV2p-W18 | 512x1024 | 40000 | 2.9 | 12.97 | 77.19 | 78.92 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_40k_cityscapes/fcn_hr18_512x1024_40k_cityscapes_20200601_014216-f196fb4e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_40k_cityscapes/fcn_hr18_512x1024_40k_cityscapes_20200601_014216.log.json) | +| HRNet | HRNetV2p-W18-Small | 512x1024 | 80000 | - | - | 75.31 | 77.48 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18s_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_80k_cityscapes/fcn_hr18s_512x1024_80k_cityscapes_20200601_202700-1462b75d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_80k_cityscapes/fcn_hr18s_512x1024_80k_cityscapes_20200601_202700.log.json) | +| HRNet | HRNetV2p-W18 | 512x1024 | 80000 | - | - | 78.65 | 80.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_80k_cityscapes/fcn_hr18_512x1024_80k_cityscapes_20200601_223255-4e7b345e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_80k_cityscapes/fcn_hr18_512x1024_80k_cityscapes_20200601_223255.log.json) | +| HRNet | HRNetV2p-W18-Small | 512x1024 | 160000 | - | - | 76.31 | 78.31 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_160k_cityscapes/fcn_hr18s_512x1024_160k_cityscapes_20200602_190901-4a0797ea.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_160k_cityscapes/fcn_hr18s_512x1024_160k_cityscapes_20200602_190901.log.json) | +| HRNet | HRNetV2p-W18 | 512x1024 | 160000 | - | - | 78.80 | 80.74 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18_512x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_160k_cityscapes/fcn_hr18_512x1024_160k_cityscapes_20200602_190822-221e4a4f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_160k_cityscapes/fcn_hr18_512x1024_160k_cityscapes_20200602_190822.log.json) | +| DeepLabV3+ | R-50-D8 | 512x1024 | 40000 | 7.5 | 3.94 | 79.61 | 81.01 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes/deeplabv3plus_r50-d8_512x1024_40k_cityscapes_20200605_094610-d222ffcd.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes/deeplabv3plus_r50-d8_512x1024_40k_cityscapes_20200605_094610.log.json) | +| DeepLabV3+ | R-50-D8 | 769x769 | 40000 | 8.5 | 1.72 | 78.97 | 80.46 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes/deeplabv3plus_r50-d8_769x769_40k_cityscapes_20200606_114143-1dcb0e3c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes/deeplabv3plus_r50-d8_769x769_40k_cityscapes_20200606_114143.log.json) | +| DeepLabV3+ | R-18-D8 | 512x1024 | 80000 | 2.2 | 14.27 | 76.89 | 78.76 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r18-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_512x1024_80k_cityscapes/deeplabv3plus_r18-d8_512x1024_80k_cityscapes_20201226_080942-cff257fe.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_512x1024_80k_cityscapes/deeplabv3plus_r18-d8_512x1024_80k_cityscapes-20201226_080942.log.json) | +| DeepLabV3+ | R-18-D8 | 769x769 | 80000 | 2.5 | 5.74 | 76.26 | 77.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r18-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_769x769_80k_cityscapes/deeplabv3plus_r18-d8_769x769_80k_cityscapes_20201226_083346-f326e06a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_769x769_80k_cityscapes/deeplabv3plus_r18-d8_769x769_80k_cityscapes-20201226_083346.log.json) | +| DeepLabV3+ | R-18b-D8 | 512x1024 | 80000 | 2.1 | 14.95 | 75.87 | 77.52 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes_20201226_090828-e451abd9.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes-20201226_090828.log.json) | +| DeepLabV3+ | R-18b-D8 | 769x769 | 80000 | 2.4 | 5.96 | 76.36 | 78.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r18b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18b-d8_769x769_80k_cityscapes/deeplabv3plus_r18b-d8_769x769_80k_cityscapes_20201226_151312-2c868aff.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18b-d8_769x769_80k_cityscapes/deeplabv3plus_r18b-d8_769x769_80k_cityscapes-20201226_151312.log.json) | +| OCRNet | HRNetV2p-W18-Small | 512x1024 | 40000 | 3.5 | 10.45 | 74.30 | 75.95 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes/ocrnet_hr18s_512x1024_40k_cityscapes_20200601_033304-fa2436c2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes/ocrnet_hr18s_512x1024_40k_cityscapes_20200601_033304.log.json) | +| OCRNet | HRNetV2p-W18 | 512x1024 | 40000 | 4.7 | 7.50 | 77.72 | 79.49 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes/ocrnet_hr18_512x1024_40k_cityscapes_20200601_033320-401c5bdd.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes/ocrnet_hr18_512x1024_40k_cityscapes_20200601_033320.log.json) | +| OCRNet | HRNetV2p-W18-Small | 512x1024 | 80000 | - | - | 77.16 | 78.66 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes/ocrnet_hr18s_512x1024_80k_cityscapes_20200601_222735-55979e63.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes/ocrnet_hr18s_512x1024_80k_cityscapes_20200601_222735.log.json) | +| OCRNet | HRNetV2p-W18 | 512x1024 | 80000 | - | - | 78.57 | 80.46 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes/ocrnet_hr18_512x1024_80k_cityscapes_20200614_230521-c2e1dd4a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes/ocrnet_hr18_512x1024_80k_cityscapes_20200614_230521.log.json) | +| OCRNet | HRNetV2p-W18-Small | 512x1024 | 160000 | - | - | 78.45 | 79.97 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes/ocrnet_hr18s_512x1024_160k_cityscapes_20200602_191005-f4a7af28.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes/ocrnet_hr18s_512x1024_160k_cityscapes_20200602_191005.log.json) | +| OCRNet | HRNetV2p-W18 | 512x1024 | 160000 | - | - | 79.47 | 80.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes/ocrnet_hr18_512x1024_160k_cityscapes_20200602_191001-b9172d0c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes/ocrnet_hr18_512x1024_160k_cityscapes_20200602_191001.log.json) | ## 训练参数 @@ -83,7 +101,7 @@ docker pull youdaoyzbx/ymir-executor:ymir2.1.0-mmseg-cu111-tmi | fp16 | True | bool | True, False | 是否采用fp16技术加速 | | samples_per_gpu | 2 | int | 2, 4, ... | batch size per gpu | | workers_per_gpu | 2 | int | 2 | num_workers per gpu | -| ignore_blank_area | False | bool | True, False | 是否忽略未标注的区域 | +| ripu_region_radius | 1 | int | 1, 2, 3 | ripu挖掘算法专用参数 | ## 镜像制作 diff --git a/docs/imgs/ymir_image_community.png b/docs/imgs/ymir_image_community.png new file mode 100644 index 0000000000000000000000000000000000000000..b65314ff39fbcd3e02a79d4c25b25f3b732ab771 GIT binary patch literal 114521 zcmeFZ2UL^Wwl?gxp&}v{q({YVK|n)Qs$~N+&{j1b&nJ$|w_f`vrE@;LMu*R;kax59^&z z8=hXXrVu5#Y`+2cd9&9!3*Zym>iGYDA$VrpT(f5N_4(gUUkk9E;0e7w;0&in~pTvTY&HMKM_!{;(PtyDC$Cve)hq#D8ye=LF z?-BX&<-Pe=#*c&S->>+h-ulNueyjg=_LZb(Mf=a+7T6KxSylsyRaZ<~eS5O%@Jz;T zn16Kz;_*Pm)vBo_D>EYV;G&__QeNDbXisY7ubf=8d$%p#G8MrZ*bRgGt?KA`T8Sgo zX6D(vrJ^80B+hejxu3Dnrc_<&Fwl3by8PUvf%BH>_tes2&5Wu+U zOtGjkdeDB^+>P_xcN%IfbYU3V+UyNwvx`Ww`e>ZlBe`BNG5RUbB#~1 z0FmB%&H;LX%})3!dg-;8ngX-@9=*tak6v?qap024@u|d@_0;RTfqBxui0k#_A7IR% z61N|F4FpZO`_9eHcO)b%ZfYJ9jna>jvO zjb~xyyVOQs;@L8}{v*FV#2vxm?V2}f9`l{OWGb#@Vz!ZbWw)xiDL=vrw3zZsjV zJrHDpg(N$hH-4zf`4&d_%Lper|9N2Fl6oYkjv@CwutTqX5A5Nh>04jJuY|Yl`jyc* zeo8Y2hQc9lPgd&bq{Qm#26(+)iNRPt!aQeUZb(Wfx^wEBs(n75bFEVO^XUKIx_(s0 ze8TXX4c#cjm}bMS1lqO{_uQ?ys$tjPni4NL{rN)tcQTyD&;}}p8tmz}s>`OBU-D)b zdCcb7%{TUhZHI9gY{U`SiHa`337i1a7xy}A+GTCZ-*ssfa|^zOi~N2CmkRvsjKH3Rx0qi0n={g~0B2NN$w>w!$qhI5 zmt%!6tiUW_L>bFyrKj_!rFBf6q9t?Iu~g@bqbRrUW)=Q)|<_x|Q?7w2~D z>-l!GKU>DfXi14b$Fi4<>}&vWzhjyE-?40IqFJop|8Ee>w=~bi{{ZL#bM@`D*82|h zF#ym%YgqN-?o?{VwqxJSckBP(oYH_fb%AwB)pPn^MCy}yGg6NwH77;N|A^RJ8JM=) zfCnESD`Rn!6y>H(6W;xEP zhuXmL=yD@w`6lT76RP)73;hy;;ey?2c=PZ0j;;EVp-E+Os42W_GwLSaBV!6pn998q z{P9g@6uryFkITF7>ekor=3k2dV=Da|O7!)9IsPS)HKfvff*=COM@Rpwfnt#IUx($oHbzx9x|m6+uX zIW_UdOvR}0AmwJpZB8cB1h$1_#<}5%FTQ1*vzASeD9Bj|-s(Fc@EEc3#FqxVhD}si zLW{e;2fV&z5>ME9tnP*5p(08KO-)C;EU(8R`d_3BYID&AGPjWx)7s)UTK0qtoB4O7 zArxll)|`~WMYWc;Qm2tcTS_?VqI6x!H~y-MG)uXBzd(>8Av+QK2K4b|*4irHZ7IO%8 zr^Q%LP*X;DXY>R{I3v*x@hR(Be%r(J1_i^4n82L-jM*hL*MK9{3^hv6bL@p+SZe*S zm4y3brDwMmDDxU|W0UV0=0juEJc?u@mggfydje5Tg>oYnJN!}G5&{0hNMOayCpTa zlg?Qlt0^DyDa5u85K^U*S6)vts?+-00tQMhX>4TiAxNXrlb%q`Q5amQo%7$CNnssB za@svo6?WzqZ(^w~blgCSYA~B@S8J431MOBc-6N71i^Ti%mY!nxwZ|!kP&yV=$)R<* zN#>!tt#bcDWuy2_0evPSx?q5AhrBK3o(5kOa-895DW~pn=Uydj=QaqWdby*XJ({^B ztz{r#t~>SWnfiIFwlXhXQ*VVZJ$cpe6+@xtAC-{|8Q58c!2z$20cxukg{7VlvrcH{ zT@-;SRM`4WU6Ez|{vJ`e-2ti$navZvXBYVCoNKu=c}Ug0q2_&d`_lkPQiAgK}CKh&Dn%-)sUMFB3 zoX>+Kd){mde49!;CP4Sx%^U^7@*_!tb#(!{{PR#bRvRxJW=C-@dp6{Gtl7BUBQx~6 zWBgvqE!z418P|PttOllexB^o)Gt>-Z#mcPIN2H1ntgSM(X@+@8u9Qt+4LPMFr=%2} zb?X9Wyz~0^`n3+nYA@>;%(9FA9Nd zb9#zD+aI5s?wz81QG1A+o?R^0!S9b~9cH1ViM0qZpGvhB2JO~9tt55NI<&j7TCnu} z1y{>QbDi+Axl%|FuuA)LQ8-(?XD1{LQQhnnj0nU_gVJg9+)b!rfP;M{T7M<{d5nX| z(am#EE=4oErU#ny*{utj3*gIH4=ET6v0GSbHC3=Kv*EgK3xO;*Jm~%yyG1iq&I9RI zWtd~t7J))`fWto;FMgP4zGBG=4W1bzAhH(?F=GdUsSdWvKum70YNTdx92GuE_`P>h zwQJ~erOl$<=}t`9vnEjF^0a)<(qyBm?otYuhNN;>L;fGD#smK?jNlkohx|~{ zI;hpG^0HExp}=Z?CWJyyu&&({Na9j+f(#L8q-n4z>F56p%>sQd~#VP(oq%-NXxcrPtKQMkgu>5vOjc4IrfFVmQPQ7p;Mh}SkkQ*?Wk6v zu|AbfLQi%(4jT=rqjM-ToI2m`X)Y_gvvL2#DaSa+{5;ST?a9-d3~S2K!3^`sgwC_c z^0IOJe2Y>uIB_znMtq~_0`UE`7lv5dNF>|;62R>r@X#D6KU^^m_P^RR*Z(UTwMW)} zQrqv6qB6)|7_*mhpn2(W#gisQ=FuL{uc0g4$Wsi)DteFFHgMFpU&*G?nrmTDX1jCZ z{uR&}h5E}EMFpbfAXfGyjF)36B+*OvZS=NX)N!&K#B6%1*TZ+zG?d06kt-6thI0Sy5uGT z3oMV-d!B8u?(8Xbn!pZaa?z3-Z3_(B?)xC!%@ z+P%_AwcLc^4!w@cnMyFgj?w6zXT%^7Wa<>Q*1!ry<8WsQ!v>or<<_h7O+ze2FCF?Z zFNUfL7=~McrYfOPU_g_oOCH2}f zs$3@>mCEGmhzeE}Le&?^j5oedu|~e~FH93G<~!@A#2c@Gta*?+C;?1nG>CL#B|Ry0 z_1Zu;o|kP|E^VH0QzAzOMhTc3TK45ltq?YHh(pCtTf4kSoTw`q1aIcGz>S&~Gl1Oh z)|PUmSC@M_XRA8W8E=3{le_~~${I*%=-ysEp>mqMFyoP|erpE|BkHL=&nt6Fydqdm z=OR)Odqi$K#}r`)UR|NwOibjn-1xJu9%8WW5GOKN9Z&TwiH`Z8TI(7nahizxD+Ka%3@;#i^?!Vf^V1AwMefyj9|Fn|fzgAw zD?munZYc?DwC=RC>_b&Z>vc?zP6W11jy>*VbKki!d)+^diF=bUX}$}PUL)D)Qa^m` zGJ1x$M45pB+a0EO0sr)i09#v&b5V}axUc7o%LzMyHlJwBBR5JRnbM{na&3+n1}3LT z>NEzyt<6sNj7$;;&=Nr$V`&1INz_WPHf#@cwREmphjb3p zUa6zc8~4OdGcWCgjXcKLCW|^U`zlnK9d26Pq8e9xR2}5P5X>ZH6r>gQainl`z8mJoC4$z%u3XG-eIWIFg zwUvu^#JO)6oEE_IDd~%2s&b$Df!X&(qct~Upvr`ii4s2<80$+@cX5*zvt>$I&qboqo}g&WSIWYDeI#WL$3HFz@8U0ySk`1Xb z`J%)i^`i{ScS4?$2qn9XYC@jK4))uOh zZz}nqRd3P2UwCDQ(DjcIHiCIN&-)?Cw+if}MYZdkLP#kOP&foK400&CqO;ywnd&JO z<_vV+^me5*kSE`-O2VglA5Qq4pk;^;&FZ9vm z^E_m&K7iFe)Cb!|Lvi-xJ4!58&3q7dU~)xp*pv`%`r&cefD=_V5+XNmC0y~?ghc{$iV{kl{mkTWZ=Rn|2S z=XEn5m1#c}4R8$3gHGmF$NbNwu9q(&a!0u67K~fEm_}apN{c2f#sHMlZHv?dd0flq zm2ihbC}I8c^8GWSj-5qgu;aR0QxFz-Y5i*;$C2kuGO_lCh-IBfs?vOwKfr=T~a`uh1!HE`wCNFU#DOlZ{*0c&G zwc2y0cF|AMR$sd9)5_p)5`!a<5IR6qjADAEA;{ah^wqC(tBH0S0B zzJr->q<%1-@E9%ulV{V9Wm;sud7`g%w6@7zP!WVpxud~owbLVr6L7;+=xkr z4p6bUI5%$LU$-FR+Y$*!7>yEYTgdcMV_c+s_U`u1S_{#(d)vnio?BiKN|@`cb80zw zx0?Zw4R`KlR1K8;BGy&nS{B4Z$&g737{ke1}!r>OV?IRUx)qOZjwb|3WJT0don6#*mwa7&ij&J1!ONHLtTmNnF&qA z@r&v5;T^rl8H$}v)~E(8fK2+$FAGE4G#UNak{mPXM#u{_;9KwhgrX#dJV zcMuy{sIi1owk108l5U8s%KZ5y28SpMjVVW40nw#(;@M#b_D7h*tPMWu_}1r;Ik11y zWYruUqdAFA4)3%o9wQR~OMIqaRQk@9sQ&BLigb94!<)2Q48l{R(z%JMzFicvGS*_5 z8D!U)Juhs`PTqW+V4()zB;*L4uh<3+f66iLDyoWIUTBA$0H5R|QN;UFYFrhw~+MmU=-tI;z za@wj(H@I$fOs2+tY4i}-kOa|CDwR8q!g{-Wa_MU?=>;kr2Kq)-1??de=s6h)gZ7^A zYo48GW~qs4%Y3r7!ER1V07Xq#xM*?y3nl%N>jK)H$>0*l`dD(yN*S!WCX|y3t%TRG z5NHZk*9yyaed4hLW(fKLUK01PTLkhKo^zW9GcBPX1D>Vy7(T>_dXEqfQFtu(U;#>5 zVTao31T@|nOn%y4;&t_D<#3gj^+H-0&5YQp2lVcoPCjh}IH3N=emFiaeoj%2(eLiC z5a7P_7^gEJfwfMMg=tR*4TTpB&F<1?AQ)3;+3N(VIp~buq?!YCa zm4bWx81tMavB}z~EY?Ef(7n%ZqDeMkXeMT?lhAd(7ia^jff6;3F#6#}WzWhhU|D%~ z{vpM9GsyPSd*0YMEUx1h#=4gS$Jb?RCk$wWEX*=3PN;_55h8Pl3xT$E@q(F&ro?2K z>S&3(?x+3dAGznZL3im<7FMn3VhCMmgkf=@TS}mN8w@BVR1r|QbJ-%;Ar-5ESx&do z3+%DoFkY4&k`7f?@)!AQ{1m$>wQ~vR9}x#FohfK{?lpo#Bbe;id8mOZ2MI=qgWMFN zC8moIiRUS~%CWe@Fchv3s1ukj`OdhUM4PSe&{9!!CSf28PQ%yTJB2_nNUFlf7=e>h z0vB6E=y%3=A=4c@IW6su`DtqJGr;7M2+f2??#+)hke(-g=NLwnBW@Q0TgHeb^trNB z7-lwReP`G?RdabYOX`=qstnX^t2ftEM$&e#w!I7-%mJM6ZpRdd7ruwc4JhKa(iX z)2huGy&T7fqAR!B}v1#fmpmdz5Bxoh$*~ zL-EXC>tJs@eOrPKG!VtDnI4ibNZR=IOHnR5y%L?axB9}3P^kE!rt7-ZZa4FUs|=f7 z+La)rV9A3cP*YNi!^cXL5ELZQe~{4+W*B>W=|;RpB%!c@YorxM(noLAQ+;m8Y_P72 z$G`(7{KS4Upd1VD_%PK$(qHNI&5Fr<{&5PJb+1HlE{oIDVLBkqP} ziYIbo@$C1pv@3Vjd*a{g$3_M!AN?9U+JWGq7E@x=U=8vfabaS}`+{2{x%vPARNA}b zo?-bb0{wlB$YjG=FbBmxiKeCY%vAY?Q`xyo!u`c#MkL`mVn6dU=JOCU+4B%8W#3{^ z`gVzY6N9qLS)qgH9}tQ*5%(3R5CNJ+mfKq9M{y*o!41!PXbYwz?Cj;|I+Gf$RohDD zcnAj55@Wmx!i)?5@^-Y4>xR>Fo9-^5=wwLv(bqbkXnE^Y_Zi4VEJ5Bv!H|#x7|IY_ zm)?{3z|PZh+57U?*W@*C4?S*mvH>V_!m7P^(vCAgzgO9#FJMJ$4s0HiA*AXdz8f}T zT=GJ#(oMT_9+KVb-s!=!h~SI>N`Su(4nb@odRz^r6$>TXy=#susa%pa>O9ib;o@prtb;bp{%<(acQo#rbVc*oU~4FGTOdG52$ZCX-Wvo)W|5 zq_j@XETt$7k<>srz9g|y#Zy`K2?NbB6I7RZ766~$1~LkvzxBQ8VN;^?O06@U5!%lg zk>Gc&c<*gc<<;_RNztHg4Y}6KOsXSP%By9$sChB`%svpWm3QA4Dz&5wl7gSu<2bEm z-E)u4LZMP$X?Ps4?Dn6I=l3%G#9`I5A4L48SJv&+p0Au#kY_Ny2}X$1n26XGVc|b!;BCHLo=txSj{o2Xz-+Qn1rH$H2#ylbQc0tAa z{Wgoj@8rtMW{b?f_4h@7%WOZxk~PxSUZ`ApuoWmJI1_t|e2Fave`_EEsuH%94!^xr zPn9|RlW>49ol!G4{w^Fy@82KLIt*%?s>Bt$0ey%;RryqQ zB!=V#S$GL%92r4N%J`RA=MJIlf7j_{5CRTsDjG?-WU6Y}QrL%8wlM^zRq74sEs?kk z@~^W`fD}Po_+M>jDsyWQ3Xt z7W*cgMYL6Nw#8Q#-V}o=%!YABfS@><{nmFP*pJslf5?~k;G=P!r$|EInx>znLl?qn zK+{yEBVIKEckwx2-QZ?+kz{Bft>WBCphivVe6+3hGna)}+(+Xr_)^s|m6av+$=mAE z@Q^GUHS$X;Q2KfHHJN)2^c90e#)0eIpZGl!0X#b*wuq~@Xy)(SR6~Z{WoDp5PH@EC99=M==tpk;- zIvi*s9@Qa?8 zziMl9jx=Z4g%K$n%CZ)$SY=}=8)rPFItf;i-ptHJ;t_M84tfs~VbglDLLTP$wI5ke zwZn~?apJ;bZ+aI19jjnGwI_s1JN8Zd8&usfxTD6jx7;l``OV*Tz}-7woyGz!eY0SH z(Fk1fs3oAT!cOOvj?#~^`GC}N^}8@7nR_>QX42PHslO*4>>;g+MEpy=_l?H4E!!5x zYcA`L_3L=pjf;_plW-N2mMEb*XSuKh=h%__t%d7G9^*}po^jRXtEjkuB@LRzl+O{V zUI!-QM1AXgTPQLu)!FA;OuYRp_ecP}wDJDYc!eoOCsayV>Y+PvwY@T37{qkQUj%XB znv$cIvoPVB!``q87#f?opqNiGyK?laRr zNkw&T?UkFl9{qP@1g#JtP zdSkh*um|i4gO5SJ{|~69i(}?_*srPymt+@a>x`z4->u^9;}|pAF+4~|4-=gn$hk< zJqAH5f0J={-}RGth2UjYp|{V73UDnH;i3b zvweE1P8c5!3X~ENDO5kh$s+&q{UoIKtvsU*J7B-a?F9})I?x}mjSJR&KNUGl0{)P6 z`VU7d>o5PxSgUd$9rfek%9IPF4K5r1c)n6I4lVd$tApbA!T9ffa^OAB#s7H7a^jkU z;wj~bKb+R=eN%6J=hk-E_et{K3r!y4*0k_v`Hz=F=Kt*py6pjTOwyK~;unUaj$#&N zRCVWzh>Az!W$ZiaSONJshQvu4R>YGWACnnc>vs01rAhH8nGUJd-+N;bs(@UW8X6ur zp>`}5Cr3*1?bP<8Odi`>9n4`XC^$1~{_L}J*EeDIO6+Vj3DxTaL{}(67^|(olh_MM z7QKwxj0%5@yCU$@B4eV^Oh>o4eQpRqqM=Zq?KD11aH#9Txs!JhLF_hJ1{+vPe2lv@k@f^EU(aC6TrI|8DAI%^b1 zQtJB2C=_j)(VFM05fKd*QH7CSt6()6!BL-aM*63{?sy%I)5({sojaQZ6pIQ!yCHpl z5i{HQ)7O4JXunZlY=cH4d5L4ub1-!=G&BHBg8^Jn#^+<{dT% zc^X0_LP3@2h0Q#ey;pizq#u6u&W`{>D=Td0tvk0OQ+*)j8@GZU8GImx<# z@xfy;i@c*qW?Ffg7So0eYK#}mrLYOMRyYKfmF6-23BCOX-xxarxYa(QvJ&SYhyL=D zaz(ZNhRa}XBRlNvC(uZ>+(?h*ogofQkLkT_Y#@+F} zP~EaXkEK^=O^wSx9D%NHc?@{*qn2n~arE1~XuQA8=l~}lRnF|Lu+W$!lAkgfD4_0f*@0CJ=K#$hPcms-MNE`re2Z5%QvFyVrr;LF^|M1}b z!@B?%mM99LC%VhtPX4Vg9K($EE%~L;S2Q6@xQ>Qo_b!^+w`e)n%asqUb&HKr8}}h6sTs;=LBBBcljKtH$%4NRabcH zY?;q(587!krccS0xJ&W6$^I~e*!&~t^~M007xqd;$=?>t`TLF)sX1V!Y>-33EB#Kg z(Dct|qtL;lo|Qc|7Lo66fi2aqD=G`-pY~9w%H-^k-$+OZNL6AmWPN7f*+f0rAC|e* z3P9PjS0xjZrV{ zKuZb^@`WT$%)8U#jGtA0DV1MfaK-R9i~0^Qmi9`?L|Ldx|2SSOf4mO*W890c23$9> zgGgRA6+s@uHN&kGt;SAs;)D^s@XRVzHrQy6`$kGCJ8#qTFXaRP^>-lfxg(gujdDkOqt-y?SZVF;tUuK%GI< z+`-V{ckAr@k<&E=c%GYW3gS2BR4`VD zIFJ*%$+n-;0YS3fR+-p=m;^~6EY>};c04g`-cs;r>c@~9_2{c#qHx6FBrfQy7d{LB zF(l9Y4qUMPCt&@G6F))nRlZ^T43d0P{22s+@GG?MkKs3gzg=hk7z7ph{NTY)xX9-R zn|_K%PyeSTNGm}H&weuYfB$R3Io&UhQM)ewA}{>Ys&p7e;UAx#|_Nqpd&9x4~>p`A_V~rrZ_Gd6KsLic$sVUg|GjV^f?6 z-UB>uY{1scZP-1VfDaQR)d;P=7!@4i{BosB@ReLkhsyXjevMYTHlOxA{W z^l?C{o(9P2Kqw(fnjJm9kfNsWOlGHyuq-TLorB_!Lv+2+Fp^YPW!QSvZ0KXTmiT(u z;I#$qNpoOK^LABTwP>8EL${Le(umS9n%G2b3IFpMgy}VLDktF$m5g2u$l6;#ckTH2 z^ggwEc8*AiO!6LrHHIZuJEAn91NO)5Wvu;S2B)O6XG*+H9NF_MN>p%5p|7Y2tZu=r zbZj@Q+kY->$#)`C8fBSEls_g=Z~epP)?}UaO;y_gi$eDwU|jAf06BQ~IOOi9BBKpC z)9A-I;xB*PyYVY2nK66;OLMCP%-N#Nao_D8G;<_-zqRhG&rMXZq{@{gW@blE;;)P)ks^DqxpZrIg(?IW2%OAd zX*ky2k*oz)_5Po&>Nm^20ES6GXn{kHIUt%_x(f@Tm83>R#+iakp*NvYvG97#+^+in za0E$7NsFf`4e16bQL990kay0RoDH4Fv-*o{Vo7rgW!8thOC=+4Kiu$|E8*i>5NZkF z^y+`|2R>9j_~*&$e*>p~8t31a|9fR1d?Pq8ispip3tEx-&`O-m)(H_B?8-r*3OJm{{=6_W?so!1ogHPX3 z0s!YLUB2d+k9uQ&M}c6|0tAS)6Nk5t5ltl^oMz1`)o1b2$=3~7u+FzPtkzQ%tgyJN z$T@K7-l$B9UwQj}CUi_~x2?h!lmq0_k%4V*rzAb=GU7P9pM*PG{(!IVJOG6LY62b6 zxWH@LOe=4!_Hb*tu^|YMYR%R3CVP_Sy&aG5FO^$AMsV2I8&37wRiHOqK0>vYC+P!< zsP|?Yx*gWD4zEuReAn`FvRA8`6`<=-y|%Jt^VOgq?n=%WfVZ#AbPsIiT(%GSDdc5CU-qDzcaM><0X=NtGtxu1Vct<4`2ea@WES>8Lt!d5bihFcdVBNev*W$7 zilfxt!o%(qwA7s+hJP*r;NVvhW(h7WBNawuIz^7kRWY%BO;lMnpbnR6G@D6y$!+HI zbDo3l=%A)VVX}#8k-xIB>`iyY1bIg`wnk;n2B(_t>Cehqfzq}#CpbaXihc~R&EHq+ zY9vnWs@k>^L>DAr^N5u8J^v)4yyWO8Bw4+AwCLiIJ>2GMJg0Fe$bzKc;2BpLg|mN` zCy|&;lSh)FIz}7JDVbsqHLvBGEXC5@GKK1Y0H&NU0N^8R!Dfl@Mi>lxuMzO-=>DbHGCR) zS6qdO7Z`R~`mmp|($A)`jT+}o>PK&!tP3BQ7OPCW?j>|TE3|9x)U5=gZTj`1oVq*X z)ynE=l?v5TFu!AwwK9~(*c9m4yTVzwcI>Vwft$YsTHO{Oo8APG^#_>TKVEbESRYz% z9hJ#1O|VB_V46>K^#>s=O|9o%kke~(E!1g_rM&stDM^`dd!TUf#D^49U4R|egcgfb z+TVsa6w)m884`%fDNPte8|2xx)Z?<`EI^@Q*2}^aLgLp!!RdRS*-3Z+IdQWU1(ystZ) zx?M025iFC;NGP6L_Je5pQ8Ge19ftwwtm)<)!*Z}WvJJAxb)hKB!0r!99(Bk!BGz40 z5RRWNrHeLv8a-`u>!fh{P=u9WL0?4QMzH?_^d6hYlalX~X>G?Ur3lwGV?(@u zjG~${7Y^gyr)J{%oZ=xj*|fS*p7$lGL@N?;UbVOJVw6bLBF{;%+<4a<-B1&aRS+xG zjLruLmK9dT=vXo&_-)(L_KQDqJ+uIy)$9YSZ_S*BzeZJv8o^J+IA+tf$>!#bIYP^* z2ZmX_a0f+|eGO>Ia1)T@#n=rU?f!$=!~@(-9%07*Awi45L2#04Em4PyW}^0KGHTSm z7}t#&H}p*lpqyu>ja5M6R!dd=@)L|-e8nylpVBN}4}YCe!@w*A#?ji`UkZv3tn$NCv-|lQMYCh-)_5RWOCHPcyOa?Zx zW?UAcMPoYV^P*+n4w2`G;ySZoy&;r@{W4CH4xMn9Z6ivuuzd$4`K-;$dViIV|& zL&TAIxuSk0Q&|}EEo2dG<^L>$C26+jN0VE#<|};$R1U1n4Q5Eny{w@+=BvjzL@KZ3 zGeUC(5n(7;=N<>fN~;#gNz{#>`Qv=QrUSJOP^ohY(MykuMQwSk<4H?sW}%aZB;vPR z8rEqmCG7L&(l=9tkJBRRRDB>d1L|Zy?BO@I2B4Xc0oC%jkZkUdY##X-uyVo$SnDzm z(*RYERJ;6ExnakvJN(Th|7`9+YIyZemg0X6Z#nQ9%^jA6p0oU6S?;u)`kEW?lU^38 zl#bB+p~N!$+gJ=>`Cq9iQGVSIAN!h$@RCXztKeKd=iA`J*EgZns)f%o=@5FF_up=W)Ctfo8W9==9LKTFX7L) zb^g1K$^Ecsv%h>N44w|`Bdg~goSt|sI{G7{Q2!q6gLcB|#1`z1`LL&W4O>5Lpf6Z1 zs;>XXSa$~ae$8M1&xWLyszaqypXwTZ`?R}m@}$i2KV7G;z>v`n>as!d=3=P5y@z|& z_G$&4R4Tn!nx{LAU_^27X^n9N<}q{#Z^dx$>E z?ES6P1gH@`>(=AR*rz!I^FCF|55PdVA;AuT19bNnW$hMf_mo!#b41sw8Ldqc+oBcJBu!6%t-MFkFg zGT2VkO<|m0o`~b;22cJma!u$p)rer zee%}H!=esb4545r)I7HRXTMAwke4Na0TGGvMH1$QJ2MD=fPc8|`h%FdH?-1G2M;UL zUbQ;kS{|X13(yP|O&QGP1+g2`+hN|1<6WjC*1lRl{Y%oB1NU{_1@a%bT3i&d8o!gD z;+Go$5TWQqnv?Z*?60S8k9uXl2}AL|1oT>tKM#s2Z;~h&Gnj1TP7?{eKBBjII%mf9 z*VPU0Weo3E__nvh>r_I|ZG_+Xvwg5mM7w);^`z*7;5ln^dC}9%Ya93nkAM>6+2z|@ zaT%jHxs7$p`~H)o^`p7j=9SHSqfRtH$+Tn~bGw;uPca2fCnje&f^Obn_)v zpW3p{$k%gigdSOj-#YM~J+2Qo7VJB9VLRygk>8X)t$W6QzU=X%Q45|Y`4y7q!Th3B zMeid;F*el?b+TlQ<&Xb5Nd$z`85UAtD`BD5r* z869zC9|FwCqsrgWQ8rnp0O)pI=`=9sy7sH+E_KsUa|;XO(=)@PJfG>?1t{EnC&(zm zPCc5SGkZq;|G2lu6d8=662)j+@7D9|XGbG7cj9VOU{?bdty+;)nx9{d;Tu_)2#hJ0 zRO;V)!${TIJ66-R<1|I-2^I(iNBNP#x*2VymyC87>FSvSg6ih6@C2}#eRurk!Yl3) zLlprwcfm4UO-pgIi#0vjXfJPfH|%mUBP!h^d0`( z648<`UuL6lGJ=^1-0>!fAs_J9DM4o9-+V z_XN(J#{p>$$tGmF!4WYeXy-+Ul|CB(EJbg*G{hckzP=^oqx-J&p>8UkN8-0(gJL6I z_bKfbQI+ydro~2idR3@Z&tD}1MfL}=`a*n?oQ^G-Pyt#=LSu`* ztV~E7eV*7m4_W$w9O>L%2QHmRBun`*Q95z1rQJ$9c@$0R5%YZ9o?RRxns1b*6IuWG zUF+-mZPKQZt{98r((qLRYviYqP4LnhOaHHSQXGDDWP`H%Pd6kB}5$$9~o zEswV@tr5oW-nT0TSu77W~KGFBW~E_&sIsU$Lsn0?v?izZ>Fo zSuR8yjKtcV!Cg9`3p%Zdow$BOC_;ZTw;huXsi!=3K}VZwPjRgH4P?L|Z7GRdA4)n%P%FB_zXj8!Ano+#!=0R+902jyiPpnXT# z($WdQ`I}2$x@f|90qSPX(D2HnwAPSJ*`$jOa+*=iGZ8|3wKuejl?%ZIHs1b9eLDI~^?ejV zN;>bFj4lv7p1lol-^ZCrTMYp_Nkx^5-d_E3Y_$ljBpJ1!?Es`lQJLKdMy+f32@z}N|>os>03$vE)(pq`EyViI}XX8RV zEUl~!hlGk?U#u=)Y$(4czg2koH*aM7KSK55@!OYk@-!9fuM3@ZjTSD?5D7P}Yf}bj z@xW|N_a+1C(xOhSgic2guLzO0f`;I{EQVQ(QzU7Q(PWUkNlWrX7`HMele&(xXWGwe z^{nv8(lPcI9}wcE*E<3VCm6-;`_ylLO11h4&pk<-IaG2^GKO2~fAbf=GHSj7&2rx3 z+V+XgSO^nxpF9LSoQ!*)XXiEe#^J>yt=TYwPRzH63#JH&3Q$|@R*JY^4ir?jNFV8$ zk}6vyvim8KZw#O?a`v2(r_4kHte)lv3DBjtwhXP;%;;Ddk8Q^~%@p|dz3BChaAYa_ z)8VDTU*3+6v#W60E)rj!TpgnT_at_%&p{z;Lj2%LpiOgQG&UFFC;)!1Q|X&WrIQvO|P8tqtv zi3n=sM#b?7^e~TKWX~9ObZ@l@vB}@iU23gb?g<>luJ$Bees$|6JN4M^nT)rJYuh6E zl%@pjo*-RFJlN(yxfs8Ov=;17^#gN$;rJV+dYQxP49LM$)7RWuqXCwp)^g>r@uix< zlOZ)RYD<3g>%tZGgM7_F!L{%YcKs*C;8q$vZ8B0)$uM{Se4L=iE-#fx^8V>{0_-Df zx>DyR+Bdp{X040bX#8gXqj(3XN$l2ne~63&eCJW%Oj2mWiu|WVUfhW7l-Q0KoI2I( zthc9BazfC7qXNj_N;qjxV?lL8w}YkMKy;po&T4V5u?|r)wl(46 zxWKZb*MeLix1l>dZnEjiE%aTS3}=yQ1ClO7bQ+`7dif#hUEPUip1M?9QB?YKIfGSE zjzQ^+F=o2(f&jOzXNEI!N0Og{q9a#lcaN=nPSfDj3Ttr2SMxD}*k}d!g~MC{&L~HB z`u+4KN%wRpFUW8$ONDqj>f0`S?Ffuuqi`rPD%x~wYLu7cv4vML%{*HXJ9TOg$~?47 zi1R{rba2TejbyN3`Aqt<9Y~8j^!iD0lT~yx{EIP%%k7S!qBE&Z62z|=$suOX7LF)hRGx4V-$27M%Y=TN z%Wv&d-&&`)NhXG!C-aLGDE#DByjn+TFBB_%0Z1rFn?bN-CDs=(0PiokXe(t0)pBbe zQ9}V{Lg~&}pjPWvhT0;!6Lxl3q;9cL^@PfrHDcQvt?^HbR$Hx+7NaK^@l(@&zn?Bq zUAf^!&+uHmy28V^uALqXUtO`*9rqXco->+hGpZO%ljX_nj>Gt`pXJq*s`k8YI^VrIc8FGC6=}G2HJz#*xdD1vnSS z;*aYdn}KEAe)w?+XRMRf#eKdjO0ap1c4q=UD4VwuF7n#(NK5im74t|=Q0lGq`&5K8 zm%b3}545CDtbA3aj6-hiVw%M?3cc6gAv~|CcC~C)3}Ew&aQJFoLmzh%wE``8r&#Dg zFVqAGzP3Z60mIh&G9uVRQ@=28EDhPv>y!`^kkX8%d_=$W;#Vxux16jUo33#B9=Lw%$UJqOLwX2_0z@QM z24=3i9D#p_L_!!ecIHF}fe?!mtaGJ{$$Q@I*8Z%c@@(Z17k`MD?ofR3s597q`S(^~ zDa7r!A>IbY^;BWqFHgBsc_;>$Q|zucRY~ThRM&R+SC3ro^EHr(B17&UqJ}TFTHkrY z2j)HpsOzhjTPsJ6BV_Monf?aWJyR(4Al)%i-!@iH)uG?Q(`D;B>0KmF;yDD2qy^2S^gAC+63#In{ItT)HV=?OqRossTJ-XtIYkFsSLL z@bT8ZfL~6V|Gms5jlwm*0lZ&v@f`Sz->M1w?paQ`iB2HrMk-t&-v--@!|>8a4d_p} zQ-jW(Xsb&z#(rhXzyqIN&xNJ?q&1WABLRMsch{kIt=5jA|3A*Y1RUz^|6jLTZYe5d z$#%=Vt(K@P4W_P8w{Tl!9hpkPpuu1;m9jIfD9cm|B|F2|CW*06B?dEy!DJiDV20WL zXQ*5Ee!utoyU%}~`#iU2@cEqcKJW8B@8$J=9qyzowp2p?p~Rj|z~VeHla94s{mj3x zR6jhoog8>yX=U~pW5?~mJYR!1^Rve`_GJ}TIK{0ug{YQIV9%KJ9L~KHJJ+2mlE0Bj zK8II6hhOe=-_>zyL8XMCCw`2<i2)7 zRb5AmK6$W+WAjQ^(FI-d6JEv&=j~f_MZp>pSv32;Elz11;b|uhz_Q|`J)5f(0(B9S za`{2`ckKZVKSZs?=2`N-1C+ReN!Mk5Al-p}tBq}%T~mDO6?CLj5WMj={oMh_JT1xu zbWNLA)5T{RVh5D~ncO7AJ$>`T-IMWWvu!V>e6=;f@{zyurt`Q8lbD{Xj}VBReQbC zX_plI-voVTrrXWgm#Pq}lSe3tZ@xPR0RvCnnbo%EU#&(sFDnke^!!uTyHbrzC+e=r z&#Bl;_$R14ue>F76doq_Q50eu)Y}yV@MYY}i-`!v=z3XzanU2bMgM9rQ80f#cP)Os z8lzX-hvB>W)qwv28D#aR%`cn~UKBH$n>_Tl_TbRQHF@i(217PS=2X3cnv`N2nkRlF zZ8+=zLV0~hRmAv*dKo}N&hBv<`6*F&OEYkb1VI!=MZGA<1LtHZmLOwLed1-+xTz{K zMlWS$s@LwPE^>~?sICggsK*A%Pg3a_er{r7x20?CZh_3gGZ8lhz-_*c%pK7JBw1!* z;N(4Od)7VmRS-5SCO@blNUzL8)hp^wo`R(_Y5sMsVF=fixdJ}=2*4dU!L;itJf@=p zTby*)q!WZI9tQn$iorDdAf5{wMWJdX)x}nkdewG7d5-tO^Pp z8SeH(U!t_L&Ix1JthSiJiRd)SO)wg|q^ewiOHEKPrJnq>t48Cx$^Np9h;HrLJ+0y&{E~-oprK?*3XE~>)WDoq*@Zr%HdD&pP4fLPC(QAGyCd_mkh zuT6Ij4K)sjuK~Fk1)Z)rO%Ny*IJ!a$da)}pQXQxPn}OLalwg~V~i1j$VO}Da6;$Qh1N)V#1ML#UWe61mVnG- zc-|k}xXh$7Ut?F+?^>#%|r-V;@fv`d0s|1b?K zkJZFIETgwgA^LNXzpP>doR#834Kz6gzJx7Yg~4V&gS_?WCfK~wl<`U za00`*aWGcNRx4RYLTnF|S4TZ?OcfP1(O7g$4e+|en2jt9@=!w-zxFfdp*=0rH zRn_myrmo%{HXRD6b;?%RWZ;DXUP*UfNOlY@IZmXnCQV8vI5&rcK_7;3MvhxYG} zelcG-HEv;}Sxcg2EOQ>frvLP|X7E8Z9i;RxivqtT@K97#;WuYYvGA7@>sa@H=n_+D z&?YrldAiu;WocLChm2pi)9g|FLYke|HKPt@bk^5m{u9i|`4}T*m<+`*Wwbh4C=0B8 zTO|m6m9?5v23>D=425#*7@5b6E&X~AO)Mqlp)yy;%e>P?IdzyFAk0+2-f!E|X4ZaZ zaD7pAg00=jPVv)3^P(2Lg)(nAVDDN%n_e*4qpWPae7h2u@v9%dKubUY>7P^Y(>;ug=@LYCtw7J?c^M@s*q^bc9hrDX4S&#AGKAdvZeSN#q)@wPAolS(xacUS() zJpZ_;MwFqbw9Z(4X8Vu-E@urd0~DiS!hkyfxBK(5Op1k zYaeglr)sGfCUgeUJ#T*26>bBDDNV7Z-rP7ux1whmpg$}v8C_;{VaNEsps zdZ<$n&6CjtEL2c}9?K(n9xbG#iZ&onZ_&qbLlo!QX7G=ZCBRWq*^6Y^if95^-63Rk zO2Ej@8_c_xb3!VV7tQ+OpNNUwTZDWr7N~w1>Zh5HmwyI@7v&Dv@tZxKW9PnA`x4gw zS(XAi+|vEAC90=}#xR8VtqtB$62spU(ER0*VwV<+)?ao;6jHZ{^%SMWM)jTySer|# z97^gp`3k*FQ2s9uwWFKoNH~a_GUmutu5%*=QL@rsp6sZzSYlt?9AD%SApa*Q)Nydj zpT)K>bNPm6J*e{ShahcLHZ|kLZZlIx47ihUx2dlO3!DA(qCvs^%bOl!7r~~Bl_-Qw z6aO7b^1)W?WKC)XwR?t>dKFRVSdUbs)%7Qsbh|41 zmwf0Yj#}J6klm0DYN_AwsGn88)$bCOk?I{U=N*ViN8gQyxwk}J{D{M(0d?H+RkE)O zzYS|{*U+OMy9bL!0}o~3{^{2=f|~fF;!G4Vh#D{?Brw|Dlx(fkA@#yQwms7)Wc+O_ z@qk{a+DDUCxVyYdqK9(q& z-M+w>w!{pHuQC@)yY;6<1y!hQl?Vk(4{0QShjotBE3ZF_{C72UU6nZ(RJWF49e8G` zAEuBB?y@N(XD>>%&p);2u>DV82e(K4n`n1SBQyKc%~`FPKV;$%(CS>P=QWO0!13OL zXsbM~f*i;9TIltroyjU&v$U>0|G2KtV+erlSH`3{CLQbCN(I%qh8p$*qiGwx*{OZ8 zxxUmetJd}?v$=)a1}?Z@$oZ!I<9J+7KQq5`sQjF1HRVV}MCsQpE9PkPO{3#=hC-J$ zYqoA1dL&~_U#9?`69#B8<5;8fxi-!v=C(#O#+J~L7y>>CrbA1|5#2-*Js~{s zXvF;Et*?tC*1CweS*oy412C4)lU+j#T%(MI&sU1+daf*9!1;vP+QzVj@h@F2LDq|l zJZFdgN2U1{;F1dOWC?s_hF7xGW4pZfu6Z78sDj`da9vvYzPun}$}k0R#F>NqUFMY^ zv!MvU1niNvxYJUcbN)$^04mP+)cT<)0u;N!>PV*AX*J(?DU~L30-%EMT>RDHO2`I>#P{YXDuMtIw0Ly{wPm(npx9Xiki7ZL`#Wc{m27FT_BF}NMj^%Z z@RyHzWT?zIcZfK-_7#BsZZ9vqk}Ds**aXRWL(lbRhP7aQl$;?y=ChfQ2vBYa)>1~;~y&7%DkH8saq%G>umQ+ ziHR+U3a$Uewp))~jElNPfIH6>RPHxT{i9%=LQTWUT2;4#$2Q6-kdEvXm#XK6J&GOG zHZw93AZI+(m0XqcSR`E|C)AkN4`Q!Res{!vgK@nxJ0NVKo#Zr`B{Zy*zt!}juDva< zxM%8TYWNNin}*Tdbys(-*ZECX?)q`yHy#mR1lBB``iuCHMTuWC$`4PlRlYMa6)!Kj zbzot^@g+|A7FBQPQoOPst9@tnr2v}>^tb(FM|@W+_}ri zgr^B-t2Z$Q8dGKhQ3CZEkO&5!YrkG1rx-H(qcnYSLLMV(rv@nVoxP)WsqWwgJ(+|! zso7FNe2zZ*ZetW=w`oJpsCGF_b_0@v+H}1gd$>9EvfMUOKU~o@>gTe8`cahx_p1o4 zVn>b=eiQ3rlls=8aDwVI!$hYe&rc~Ec6jI<0i1c~OqVj848#?NNxl|4j@8XlN2CPC zmi7B-jgJcBxot3h+Y?P*-qyGjo;0(`aUm!4z3*$|`0{-)rgQK39Q7tC$A^e)s5dbP z3gupB7?2Fp2Ocu*T{~au3(j3pQgqJqi;X0}P(rI=z2z_tyv8?WKgYzjiL3@)+A-&@ zK0S8&(qWZSE9`CUYAMEeHEW0Dk5!H@Pn__zQ-^x9&tI0O{~@E~8>fO(G>kug{SB0S z{`k%?>^5?adMf+G1k&DPthDn1f3PoYP-xy*U#vT~Z{z4t zYgEvqYQ_fuZr5$oUkffM7Qsfp?2aS*hl06JaXb6d*ovqc&zX^5s&fNq_Ffjs2%Lwp zL&$CP^j=KKMlZ}K`C$Gj+O55BX%f9YmnyR`tW~y0-_mx|^x>oCaK-4w)7kOVy7fnc z1LfYXwHo~$fyK}+La?%P)M)1ka~$RWWhBvu0jo+O=+$1x0B6ChoC^Q$oe~% z*QJ(5M0w1lBIvjMNny0Py#ihKaV1g`7N&_$u>Nu|b@U+SvU<4|Cs$&to)2S}$IeYo;+sO2i4JEmAj( z@WsW%_)G0uUd%LKhi7s~EM|m4D^PPw35??w^u_lvQ7S%b&$m^50tMvIaXb+vp0*|q+Y{%ICVdP{)7j8VM{?9$0#d%LFQ-fFrQYO-sM zL%Q32cB~RT=N?Ho5txc}cbqJ(IfU6YIEsVDxd|o}C34|wmBTPO7Z{zM^Zl6C-xJ9~ zUG<*9CO#p>)SdzO!`<9AvM%DiwB zZg-`!$(woCnZ&4e44a9qc!$v)m*~b`9K8=`_7dM?a>h&WL{qKe_rE4Sn4ek%$iCk2 z2|A$ClO@OBq#lp)WI{bvOj5TT8k5KM>PQZ%bTvx#R;-2Iga*u9_Hxyy|Dk*`l94?E z*dsOjeG-l+oAgr*jHGHEq#<#6>sqlCpPf$3Pe9pRXZ-RhXpY@s^2LLkh3FKDU-A@d z%OGX{Fi^tYDKs^O64*7lUyj>5oW$ARz>FH47FN74nyAg_z-{ft>-S+&-4d&`#=6!g zIadb_3d@S%R(!&lAe+I77$t~XBK564IM0KATdb0N)igGd>r8?Vu`ne2f{B%==KYl8 zbD{f-y$1aD+-+AQ(Ny%g<2B(FsAo={6H|)dPMk2S68PphtV61lzDcH={U`xSGkg^v z4~?x?>tz@I;JvEStZ@?LRCaC(iCHpa1@%UHniw%ep0nD!=%C9gD^qwOvwo0!rbEn* zyo-({!cWHBzCuSsNa%-jmd=~P68+aXvSIaUYH-DdpHjFdq9syo@|m1t&vq!M zPM)Nn6BDVGLA*7d;W}e5fNdo@uZ*r>nBF|m-mU=IuHDbt0`r*Je7d8RvVRar&#G_q zZcX=zS4~5*sA-Cxk7%z{%kILzQtM9NRtF(S3L4Jl# zOtZW><@M(zf!GcKvBlQVoS@CQj%?ai+A_ZlhMHsXFc6!)At0;gnG`|mL@BJPe+b!4 z7 zJy5M7WoZ8TKoVjA9jpawhdLBBy;eK9q;o?Xg5;o)hu?<{!d1G$`Ys!69#n}Poj2O? zz$jcQPOAEFgr4O1=2y~h8>~}rJG;OU5&iyBxTwByj?cQBV~5+2O0~bi-Sp8FPd|N# zr}#Xq+)I&OSu*M0fRO4nQ+-1r#npT0MiE|AW)Iiep_RnR*275|Xr-t@zU!r&>JI(W zKc1zw;FVG-CKa(h9r#q%bESE&7f4lfG|)Ay*Q4rL#?$~KOMux6&W3prFnS!G%@#B6 za(tZ!A7pLaayOD7Z&iZKFOj!%=pfOY*9PBFR3I#5hCm}z&P1kHH|`gw`hTt!ro>e8 z>B5;S#e>2@2EPQ|R>seuUB?96;TGWx8!P%Ob9_;fcqDHxgP_tB2kGRGnW&&Jd`dAv z?M1ME8264N^_~;cdy91<%t(})5fjsS@VT0Eb!=7tyNzNJ2DM3aMItYYP}UQKf?USO-f>mxqy*XZ6}^?=VvJ;-km3B+1P@ z!##M!n4@j5%9Vlz+FHE5mztPZuAKt4JhFmk;xP>y!X#PH{do2lDrb@9J z5Y!Z~COY|5RlZCWH--f94z|H?{=ROh1i6U1y$q)}K_nc5*^f<-dOiz+b7{{K%7m#N z%8(%|L4j~TY@6pycSVi1uwSJN!y`L9><->bu>lwzF|m%or$->XPta{Ph&@|=GB~61 zDvVW~F+gdqK3pC+w+yXU(UD_L>H9g>{D9x_ry7#o7$Kkep{n=12vDBSP2K=yTOVJQ zVp)bW?BMvHC6^hdr@9=t6y54Z2rkb)-&8>_Ye^0obnK$Axb_>M>4FMIx-%O!CTH5Z z<_3@8g$-CnQ3NeXRW!1(dwr3)sAw-p6yV-mq4R<*xoB zT97!CM1?=e(NEVq@xk%A6SJP|4do>PkN>b>axd$rQThvRlaN%=w^v0mc9Nfr;;zS& zJ)r@$9wkzN{88oyBxY+A0pBBK1d%H9<;s)MG2GM~r8=`uJ(vje@LmfqMz1HW8`B1G z8TIqpJvhQ=Y9|wLUnX#K0#dod&Knz(fOqW=y`zFrj&qNoGhdT1)rRD_`6?uB)7iKs z!^*0EI!af-#aG_ zSO_9mH1XgG#`NsUl)$UXr-L3h>oX-4^M@b?R3qP<~0PbjS9CkH#NTT)7@(890k(}R=?_fTS3bu*aGTc zrGnJe9FVL_krf{3pet6yr84S?XXjm$D>RRd4t9(t<|KNgA83fZ_d&Grt-d8E=d@%U zO^zHGa`1V8rdtXNAwnJi+2n7u3>gocF=WbOcn?i}4hlv0cm&eM1$KHLd~=)9H5xP< zc_9n}`9dlj&O}*_>pkzNtp|aYVPxe!IMWOD3CWPsq#H~pRh<}OdFb_eP$7CPl)%^H zMq8zprW?0bLi>>1F0-DahqkRgX3{Zr-b-!fc(`$@<9??S^`Z*-j7a0eO`%s!$w+}; z2qRD}A;t+Yrzk@5&MGR7n@lu;}yxz3n@1(ZR(6aH9kjT?bc zis84sw!4~p4&4`ai)prP(i5?DodzrJKl~N)#R#_dyjDe1)XeF>iG>ypoX{7r@ zX9CNppqJmPsey53hJy=E2XAY7>s&sEOh+@w1C4`P?DeR5;!JOwYsy#(e~ee=XQo9V zkfboR6-*LedpT~(fc;2nOi~$mMd0&#`j1UvI$&uht8~Jh;TKqeJ2KB*x)lKvt@!SN zH-YDB*}MPj5m-b4)z{GID&9ncH8@}W0c7_k)B!dp3p3ENP!-r!(86tUJfz}^8x3Zx zWGI&oJfA;M{XQ4ex6hT5f~K$zbb-qpzZA?+_~BP3o~~QcTMz6ElZIXE%a<2+gP)vg zAy!>fnw@g(HtD;FtwD7dEo2ep?dRqVsb;FdQE+)>>fRJaFr6+AhQS-;2|jhSEI?a4 zp-du+{S4$Nz|DDU3bI(snJb{emFzGIEz$ieW5(l&Z)t52AIe>url5go%)^*RMUNMx z89}seUpkJ^t4BABa#iS8tugN;BnoV4~=qDcy8=e za&X#Gz|RQdKaa<>SD=YmV26Lwgt~eP#k=+K_sj$h71IxE0)`xWRSbR(Z4jN;>xsud z?Uc85J z)G`D&A)|Mx(|7VhxK>H`U~!y9!~4nI=!(&Rk#9E9&g;C(M>ha3Y>Y37PnlW6P9=I) za%tD;2w@pU=iI~v_82A5Ew|_fH^-AfER%Zx`84H<*{8*eBSs>gwT;!Pp;Nd;NX9tOx9wr7^12 zp0+joVy5qvgUp|ZIkcDvm^XUKdWJy)SATs=+=@R9&SKf~?6(HxgW7);*|+w~YewH% zuS8w#ps{jsM~FrZ%8%cV`n$BH^tV+167kO0F+8^X|9R2dtxilf)^MN_b)h?GqO3TL z{%NX%e&zSOn>| zhMB67#meWP0F9knKzMg?&|5oX^Zu))UGeG)mJ+uB_1V`AAQt4bUaL6%#LI{fQrrCt zxDcJ}E>`g_tnPD?ehqxtWf9C5w2~HejKE(=<%uX@K&C;D7CPE_0FO{aW^VQD_QrPO z$K}s+03x-qGa$EXaoUTLdu*xp%h1ascf-w3jP{QhV!;3A>eIkg+veA@MXs?#1ZY`# zY7`rKq8(wY7nA3A0NiOaFTkra;-|JyzUvHM``bf!vLElp80Pdq+9zmW5vn6it3rU z^Hb#j#xw3KXTN>@L^V?y`DE2!mY+847HK0JK{%7)!@Y3oV`<~IgH|H{=@zJcKan{Y zq*#PAitF;uk$RD`=VL8ruo_&2Ilj9G@k**-uJyMp5sQs6pxXPYYM-Uw{?ii)Bs-O( z^dsOdq7%iwy>Uh1BObfm?H4bcgg18hZk*adU6;tjK3Jj}{Q5wkyZ=LAA3mSGDCf^V zVvBVro7vdn0VJ6k4X#cz9{FLF%WBC=Z1oj8m z&}q@N;G;#+)LjcSi*)XyT=MzZ?<%Lb&t^^bhX)?ICj!lhwF|v`ku+&ZyN7BDws!|6Lg&DDX1+*Ciw%!=iyGM8z_VBUA@St=&Dp8b>&aGk+C8>k z_)ur(aa_*RSni>O*7wtUYqvkAOZ4{|#)XmCCyDSYkv6ExcEcV|hcd?VomaY>i|64U z;JXXDj8DHTD;4Dr!1lAbmbE$~IKbdTTfd9mE=sP;)YT;xp7L01$&|q0tzGzHp>k1< z2XUW`hGeQPks;mEz7f6GE$!IFA;Xx_*zwh7k&=atViN&;$N4})QzrC2xy-q+QXab} zXnF*;`8cKma>u-}*xms@qQ~Rl!1;{c<_GHNp%$y`x98YN#&)931Rz zw)I}h(^UDe3yn?Cdc=Z!ol|X9R~wwX&~9ASh34FloyNB zkA*s**QS)VI2^+?rLx~m66$)OzN^L^6{hVr>&=z8`C~AS@j0WD{!(+haqPhP$%;56d~_1?vFj zqK?ODU|iYge(ZWHf{S~v^O10RzU1lfevzhr0G=FKl1%KPpPF#rdyb?$+JSydE-B{dXNbn4E&6@*)OBz;t>GZ~~aq&9G)%Q_obC1lup-(23Fe#y8ZpWhFi{xZrCWQOx7#Si*?U%f(?#(UUzfX$hIiR^=8&qODuV=ijSx)zV9^KGHM8`Mq&PPEcOp zaK*{vr+)ChHDZvp=C^FX`IzxD`d)_a32}`dX2pN}Dquux)N3R1Bi&uMqt$N2NELZy z`OysNQj^%e350d7;*9@P&IE9h)x_!@^WkKy>P4vPXI+>m-VZfYb<9 z=3t=?Wx|2Bdz$)gqLtQ$R`ve#_9t68WY^PY7Jq6GdXznmHqn{fXZd|l97uQ`i2W(d>Iuj39L>%yR$bfGA z%JJz%NRs_3U}eXY@&hWrrkgfc*F|W;63q(+`7>(m8(V*WZ@MDGVC4h0tY&(uuNYPn-3sX$GZJi0rEd{=u0EjT+kl{TZ7?x!Z^4iXE0lVe&2 zY|?zz?D_)z5I=8yK>yoSUj%wM>G6Wyucw1FQjLfwaI6ILJRpN+;8A{@(0i_Epp3RN zKRT^{nf}rclR52oDj?~Uk<2wb@K^pST1V-Rnsb+UW(Rp;zwt;n7nc7r!r@$$K}>QD@t;FIngRRV%!wCHN`*{fLpj*hvtdU*=fO!| zg`ssaD0ahU%u4BCBk5p-G$t+8h^@8#JAQ?~j_lJfA2nH1n*4iP5uC7F3-DiN>bK** zPX6tYS^eKHbLNk62*)}8&HE0aHq80UfV0;d__2SpGVM=-?_e>VITD03%dGf^oQ$U4 zN`F7WnrJ9GA{D_QG?(AV4oVRL$GV>$u-qMT8`Kw~vByZS>h-UbVwQ;UqI>r}r! zV0Y$1`7ir;NzXM#bk>%RupaLkV}T^9*k_qGe;fImcJ~q_{x~$Z?S#iu$=(L`MWbXiJkvEg15w6E^G)~s7mB;6I?OtI#hm|JNMI{ z95;=JIQ}tGrGmXmn~=?%f-Se)MKHZm5D=ZU!L0@e;nTM(j($wcSPt)pwVQ9c8q@?( z@+3PhCFRRF#BvQdnHlqB&mpY?lz&uQx!FcQeIS5&f?gQ@uS>~B@Bl~u$#@&Yfdf`S zMjPSZSU~=&rH;pWQKiCP0gsB`X^lUF7svTlJUclyWqM@L{zNMRz_TXbUy zAYvDA=zlt@*M6-w%E=?UfBbAGw3ZS**m3?pw_E~8c6QIe0+8S5MOur+X)heIZ<;*> ziloLOpz%3}|Ina4+XN;R019=6jST#GY$tn!~$ z;|T9qss7F(Ah5*@*y4K|&E$}o8bt;>haPSD>t^gc+SJ|A;Tdmt_2}+prQ4Pc|IiHB zhiWkLj_*cpaxY7|bTUuFx5AW-a4$$)dEu?f*zS<$iKAnMB*bDJaUEo?-$CU8M_ z%r{`=mnl31Ah#)JlfMterKy59-B>_159(}mh|4NQP4qY&6YIXSq)5j7uy@t4p6#HO zi<10pF$SxFR`C{t>36Utc{W=B;kh*c&e`6JqjrF=rDo$bME#Pj*fOR0xleNf1UxV& z(75?U8FK?~YEg_xi&4Ui^wM@jy1=tz8Vu>tduafz8@00_4^X7^pKXa%g3z+<%OcVG zQ`)}0vAQ}A9zj8diONoFcNHxNig~}atrFZMXtTrHcxf}QiNG9^GR_8M84|&ANT9{v zt;n3+ZFURbd^G~s0xE=2 zJq>M1?J;n~nIE%7RJ3?Q)ip$r^%pVGZuzo=Z;6k=ZD(8)+@F(tCk24 zD`!`3p#a!s86cc+n~#Z!arzyi7QUdj+0?qsy>DM_|1jWQ9O6;|kgqn(PQ;)e0nb*q z`t|#1`i^wTQ-*R)ol|_SF zzX{_zRbJe)Kd=3wECA8z8*$N=aV96=jMhaRwVPeOf&U_3peerOXzuw%up$W1z|^TO zfrb|~>WiK)>65~eJ{5q?P)6jIG4CGJaCvph4$x-cco*`X4{?rcHnT}o#+DXw@vi`E z*4AZovo9o;AR?Mb4L9oy?eNCD0$1fqE`<(`M|ctk{xoNgg^Q`44}e!ow4P)8hFV4> zK)uT8So6burDx<#9iSiD;Z)Y0_PDGlLZ1#i@h$Ckso0vRU;l+@Ea1$+cvuR!%2ULq zWP2q{D%Dr*-cM$v&4B*TmZ$+%HEl~YK_Hj8CUvh`?2^MJw?TNrVzG}xB#O__MSa@` zQcWWFDSi%uWW=oa0}{ID&4S;{a7N~l(PC+BmkZgt+t!q90sYMe|6-QMjsEYLpVqAU zFJ^cFu%bLO|LwBe>55Q54L9Y>+efrGQeu&O=H?XP_kM)#clPX09X}{sT-ayVm;4!E zLJa}F0(7WsKM4Ecl8DUAHyb5bH1|iAe^CT*(7!YL7t{Ri6#ZYj;X7ydf6D&55I|&} z|7kkk1%aW!i2T#O`7YKHZGq%(W(KejfrWPg++ct6)BcxGzH@#=_HvOpYjG>h;EZZ% zv&mFioD#6P^s%nI8h0weftDC+I+{JWXq;_VDr`H0c^%fJF&6oA9xMIhbB{iZ6iSyT_B9gi+5&6Cho^C5?09(G|Xs zZr7H}(>WK?t=n4~U1=?pq!TRhPuupE^IFfcL(@D&9yRzRL_XO35W<5L?4j0f1W0Vn zx#S>+LU*d;yd(EkDtzaJYxqyaBU%gL-2)!ML^l^{qoG=|aq@ohUI70R!J+>ei;A{q zWrG%qe-}x6VT&)7C7!pcet!e!<5Bm+*z2rxf^70|$V>m)Pw^x5w)$S97AW7mn5^`Q zx@7(6H@4lK5RYWJ3o44dyTPV;P?Epe%?I9~{i9hJ)RtW7|BlQ%JygcI>)t-a-so!$5wP%SS)kb)Wy&tqPXmg-r* z)Qv_S;NXYG)OCTIE zwHs&TA3|2YKC^7US(Ynja5vj|I0msUn?UuE83DTHM%RQS#7?7u0T`CgT#+Gt{|kQ z&`YvXNDG8|s(q8{#^(Y^aqs1 z%fxRnnw^O+)5W~OUTT&X8L|!A=!;)M1gX z&9S_UW*nM2TeFdD#O^6z`8yL)Fs-c6ZdpQnhnL&DzAkRkm8plwcxH%6-&L!M}i zWcWX}zho0^x~iol({O#1Jj!6xC+>~nlD!Tr*&5e90ZCJN$#RNj zbDOyt9v3hM^W%@UuG1ZSo5kek?9{L}+{3ki3oGgbRUCcd%cjKlf>nr$B+QovzNjRiLQ|3m$fpBd7tO^n!f#p5@oB`HJudyqDk@=nG zf5zt`Z{OM}8{+i<-RLr}`SSTW{Uqg^9p~k``td0ht=*4I%A}c<*a{t$Dp!OdwF08B zU++_uWeG)wD!IIGQue|oq&UKm+s#yoZQoS&iHi^?-X=nzw|^gT!X1dyUcf41dwpj$ zr!UUI79s=Zap**edoOTCYg)Oz6D{hd)hT@Z&$m-b$o4FqS$pEHAtq`9S}f-uI7g-J zPmKWwpMN3*ve_MCfd7CJi^}lmQ2E{HVCW$L^bcA7hgD6s6Wc_yo}!cl@sKc8#Fb2J zX5-m1s%dA(VUfI1{Y@er`OEAS@5nQV&14Jv-o-@@1IU3k8Q6 z6elV;aA%Jup;!j+v-4JR8s)->&qNxHAEeO2Mt=Vk!KE>J7FZ?*Yn;xm709~HMil@F zvtkI$&kqo2nG@9vPh#23CY|wp>^P^-qmw1SpD#MN(r-O7EXT_Kf_KY~50WtU;>pq| zjv2_v$nopH#+{k;c{l{Q$tM)}OrE>qz}XZFD6jfb8@=Lt5|__au*at0@+r%|8fcUE zId=$;#wJ>~;oN`J@+oOC0EqXo3EpKiATnnxHy(u}1SmE~`?>pBEtwnRc^~;f-3A+$ zi@m=3pIJ_k_uRMxt}ej#bxlt_4?pv2vxP*kmQ`!5 z-{dIsDKhfdj~;w~c6;Wd7~xFb#4A*<&2f1xTFyetr|9=^^!WDLg1p|j%L7w8-rR~+ z7VLN`Ntq(MddkCz_CfPQ(oE@`W(EnP_+V~O_*#EF%K|Yd`bG-JN@Yf}8mLO9{8Gxr zaLFfvy!$ia`UdGOJ`)rQp=B7}q-2DDH;p!{fL?t&YP~18Wm5(tW7_abzJ>MFQp-vZ zV^p6}5}T%z0J&6wtHzfxifk zKdEP|0xogVfA=C!*4UxIk=5Pr?p5C{9Rj=WK680kyK?!i>UTS?)exgH&prPk;nCUt zDtqQyO$`ls9?QcMSK|7?kG7sd9A`|s6X&Giseb&rzLAM?r4=nxa^+;Orhz?#>J9$) z%Ert+u6jh4kEJ=`^SE2t8z3o8AG0uHnyW%9&#o+s-U&x|A9W`CHLa~Q)>oL-Urm0V z0Sj53scWwz2rRFMZmCA@60{73gMcPP*vdb|E9spPXJS^qw*g|>Mxy1>RljifyRF^{ z0)E94(Mq@Z>IQQt&!*24tOj!AfnT913)L<0F4O+tl=+$}`da{4X$MaJsWFJ{ggSy0 ze>_{#I2D#lhg~`I>g2LlI+1JB%GqOE2PBD^#roTQ$Lp3~+%vtO5x~ad3g|rt^q3@d_S5$ zJBBLj{?pt>;nUZ~NvV!p-pBNZ_+B8Ftq|xH1dnU_mS#$=(fv%5MwMVIo^V=dS&UM) zZt!ohk}0JAGBnvQrp(*>c1T!244R6DCibpM!Lp17U?I}XY^h|cBn3k(LDN66ta4^z z)z_}Xpv8-(zUIOHzVO~!sMk#3N0WXoDEZMd!aR~t?Z1DaIjk|1H2o=Ci5cy*@FE>I z++xrT`b(m^6QPjb9|Z7!<{>&qEYR;CGELG4Rc{q11;mzJh&$pt^Lw#j)Qfo?lG$^~ zx%nOtvee|c>Cbwjh8{YXwpyw)u|dwcv6$_MAP<>z7C&jkLd6Zv=@94g=ZhT%YZo~2 zJ&#s=XtgM)3wVS+=C6kgm5S^{`VS4K(rH&i(al^BbthNj8^4jl9;pzqV&=}M1 zVLWlBaBE8Phy`Xov75yF$uxfRgHdQ7lhcvd-59o>vyc5@9MZj?ZT>)gxv8onNmDqM z2d@-`k(yW2=O@u4!_2`P>adZL3d)>Uus%D!k~x?vmTWjjAtd7};rsfFZxVSkuOla@ z_QYu)@K@91!Piw@6=Q9((NLUu-kl0jG^^$p4BMwydyJ@D{*zR2_c%@Pp`Xw(V!?f` z>)e}cC$Af(=FwENzQ@Xba{BzU-heJ&_8qryoiF*m$C`i1_WdnE6b~8dVn3H@bvy*% zkS=ga!4m3L?Arg{Q?;P$TmI$5kz4vmr+Dg}m_0!eWtYbS+LuCLv3IinE1%OSgS4d0 zZb9|9_}c|iXA2w-$UT~R%w3q+)W<8|T}vzwRsKYufaRh&e9gN4Eqer9D&|59k~Zv| z!nA1C$SH5D;M~yEKgoRBG1%kT6=m9Mj~;QQ!NPO4eVvowFLnZn&|-soW|2!*u-F0W zZV_~J^U+&~MC=<^Qs7rC?;4%zsf@!Gd<&2t%> zAh#l%jGZCgO%Z^D{m@T**YxS|yyVaS5xd;0B#FOZ zYi1W;XDq#2ik{US8!t6WQtSzB`#1Sz0=Nm!!dxP)kKeIps4KrO$W}9&puFjNX0H2! zJEzVKx(N?eap&i6yY^iT=pTy$cHqX}LZg<2J9%2??*lf5Ut$Tqz2av#`ELR&3_Klk zVpf{Y+Pd4@zUA$|Pg*W)K&oICwxD|Np}VWLBWm<7lb;)=n-8;CWO!B@8S8UmS}|^L zUJlj8cy{bd>7X54Klfr7o{dZE&(Pm_dttooJoOJMo=*aG+qx}+b8xo<>t&<=ds;jw zg7AWLz4={AYuXHdo{@KDL2yp?SMqu56s%XezeVZcrG&nl5;90{EgW&gKB;K%%>6T= z)IYe>txb4UWCX$Szw5y8v8FRBX>c@vnT(4-dvnbIa-k&xv}^=8DBUyh>zBfPq(;28 z&YOgnKe$}BSIdebrhpq}nOy#@j7CUU2T1XWXMR_}`Ys`>;>5fHut$hq01qkl0HB;G z0(eW>Wq`YUQ5yZlF#|xYEixC?lXO-UrlbKz?XGuwhR*iE5ukFn=buvFR#Y%h?j`V? zpaKBxXcCRq1kA#`?cx_{fE$h0YC0=|3YAjr2NwQ%>34Jog>|>8|SaQZj+^ z4HohV_^;~m4M6mt<*anCO0UCIqdmvrOH*kJ%t}(}Lx#|dna_6412-`E_oH9ONnni^QG5&V)tY>R zz{+igu?-|xiY{yP%y}?*)hLnSG&*}^8pz?>o z*1{*j%IkSy!V^Eqy^8lxedIu}Q~o)Wt7KDL*)6%%SVpCR6?zDOoL-7Bk{RQ>e>#-f zr~-z+5}SFI`lJuP`2FWJqq7j_ZBPraFT3vy!n?~^P&*AGjBj8WEY-m}P}%DRbX%!& zETI8ghDff*61zvwxPO_!gfI(FjS7ItP*490Z9f{*q!c{qfWkyx9A6%!vf-nF#M&p; z$HVbfMRqztCqG6oqkJ0PKXs~boL@xPbyeOFqvHzFNDwdmyz#ni*Rw@KOyrz%@BaB* zn+Tfz2enr_S?CLUwH@bNgt&OHeGy0aMr+e`ZHi@%bvkp3T8_`LiUEop`^w)9&Q7;K zTaJ2^qFK~3(opgE;p$tXCH zGB^7YK;63U{6FH}JTA$s{U7g{nVzPc>ZvrRES+|D%PcduX)#MvbKgKpBU4jTL=r@% z#hf%X_b8W1Q&SXDb3qgIIP(SRSo5Z@f z0=+A*b)3trLrL7RN_6cUgPg6Ys}^@?8eW`Y+(`2YCcoBl9{oQ}@uF$fPa~++AcxE} z^q(q_3O$vV3JijqHj{R~di8RaAZI|$-!Hn+R(2dp-Uc;3DHp--y)^V^CUEeLYE$WD_a3iToVA zzD38Y2e`3g?`5aoS|x5166HVCPt4b7#h- z`?mpHWbUK(QQK+xql^HidUKX%X# z5^?^CnIka|L#+=1`@uV$VnKxL<)>><#zMtoH0)Yi})tJR~R3rj_ zTb|VRW8}1QpR?p$jobftu+{l6R&M*$AB(T0m^j#>EukcKH zLOOz-$EdF7v=}p*p{<6Mz~~w7!LH+pTZu(sX}4wLwtjuQ4lNkVk91SZZdRM#FAiM} zGOFy$Aga`I+l)ioSRuewj2FVwOTq`ofV8QxcjjCH?2NHMp7|H@A=9iNHzWniJ|GJ5SWa6md1$`Y@%vR@!_JG^g((se5 zb^fh(<*HYxuN*PpgyAE$MHD*2Czj?zXWV01M* z8nhg$ee2IzjEG1&EmnQ^U@6%6#3_Z=#$pH+loG#I1yH$1-m&8N#- zfT))D42^S?eXpfK`?Ku$xamvA&A(ipt}m8!VvFHfkJCSz9xpniclgoN4;gcX=|FTt zstTlXy8IV6v_al+&_We<`G;>k6Y9oxTUxxYbw7n(Uw;HKJu%ET97+q5&WQMiJt-Q_ z8^?dpB^Y@d960cSppe9JiI;Qru<#8#fQ-21a`pVR^JB{o$~Fr4Jf5)yFouUeIkoRq zEo@4;>Bc7WDH^P8ZJ63NuOk$Yng?`4^>49MFtr+rmnZN+>8}S$FSy1P#e`?M?Sygz zzPWM>EK0VUM(%Uke3`^WW%mp@e)shHXMZFlksP|t5j2mufDEs z@+$p_=dC~t2*fvsE^a~3j5VTfg!TAW5+xF}@Tr{6R3pnq#6X_R zO9?y6uxhI5$|&E1ZBq}Y5%=of8WwZYQ$$xU?g}WpnAg&~`CB`AOb132^;Y&FMgst? zwtS=F{XV>V415_c4~jQpDi-_Is=^SBtJnJdFZO?0CWrE6au4|2HkEO4>XVVM|C%Wf z6B|;A>($(7YRB}oVD{}x3;OpbSEEb1miG)T9AW#9P*RJ^mq)a@L=DCoxCvi=r+UCq znbzQAJrT_E^Sg6kPnf3msu6vGsE7kmebchsl~6!)Ta}bl!DC(j_TC$>Y7$p=L=~Ivhtsfc^a|ZL|jA) z8Mbpe>xT6{j#Xl=^h?2yflIa;dH1eVigvtV7VXM0;Ix5o_hTW>gtBt{fsNlh2_Ec# zL@O#t&1L}Vs)Eym`l#9*3A(dQzT^z{-ZLu&yc71W{eBKSb*R|?@uXN{~i1d?Fh6T*hzRf6wJtz&5%H*?oiBg}J=V}f9J7u@hU!?}yB zR)glY_;v^1-RBy5yRWnU{M#`;tM*}<>CQT>$z%9Qdl=?XfzX5OKC0t4>zyF!tlT?e z5=%SLb;z^}l<$ue>~_NgZ82*{;k$*fE|XP*mr5#eLE)7TBwor%t#1Mk)x>vB-tIZI zsizgHQVolsKZ}nVU-Ew?tVbFdRq7&@dQ_T4Wi3mz0tqSG{*@Jcq@kK#xwPcvvuS>&lu*}viDcoc4;O1 z9qS);?0FY|qsypONafwVbm!qoSEt6d^P6C?KJ^hRsXT*Y*MH=pUC0sf64$g;1jiH2|{G&y{aSHSr zlTwhwo?7KhjzA}xer`h=O4Bhh@-#846X`T2CSS4{6z@ijPtkR2GplbEF}!DwdSx|F zS32+7v|17^!BvPqq%(!FjoD4oSYc0JpD(&o=+rN`Fe<)D!#7^DWxI8V2S?|OnHT@8 z{m@82wp=*sqj23UAyu8K*=rWPSuHft%niGDsI9Z1Ga-#Qc~Gx3678t!LC~&@<_)mq z#rdk)8*{;`(>pzI)-r(+n0g+ck0I1wi-r5tMaigs z>))&}XLhKf6M0EA^--z(ibyL}m&B4g>(%U?;>G=(leQ1uM&3NXet)dpvB0b4cr{+_ zpmO#27U3%tOIUl1;mitesBD;wlDuxYhjN_Eof#fdqwPs{lVq4{LLvJonF!1B`g zLz0bWGMpwtE4wNq;sw>6jrO&1UDES@Z3ChKeAvwj-o=%T8R4$@z;Z@|QH1n4>Sv9# zEj?9t*r4{G3oCPLGdfZ9OUC7Kvh_dw@0UtwEd}(;lLbWuIgI64CY;KAw)A981<1?e+{#qB@pZ7RNkUN71S3RqD9=KBG>fuO@jtoATI!Y^W zNgD<{AxRjUj*G~c@QO{=5RaT8wr>duG99zdL>x#Ir2{8&!(PJDj%~0*C z54X7<4Y91yy5Q%BuHR!{go33#s?4=U-9)l(CpU>FMsWNw_E-%rS!ZH$WU3I74AHtBerrB$ENfZ*o6)~6U*DGTgRRf{=!eHd zxiQ-X#7o4kYDGfFfk({&#>nQBcazz3)PE(ZDqfOW8Hn< zmz@*k4UY<+hCA+YX>*Q%%xFWyKouhY%5J(v?d+8}$-BDcV!V^q5vw6u$xzki`z6*a zPUo`YkH`&^*WiVoMIaPLuao+asniz_85bd$`?_fMji+-!#R^`35NBg(7Ws_SiLqp) zv5ZGu{>7_luWb<^%%)8_ub!o8I2FjJ*l3YY8FU!OK3GALhNRfl->f8f7&~ho$tx!5 z2&V7-uDhb3YPVlaref;ePTfjT#QtpO{oS;PtZP0qG#uLJy~<0~1M9>k#PGM?tO<3g zv4D4XTB;i9byhIcH&Q+kWmrvvUBh`|VB%iNI4fv0jgZ!1^xqBw zM5i7@yN%}@2&i+KQ6AZ@Kj&Dj{TCU@g$A5bI6*#YM?Ifx;ah!MhfWdoL(l%W^!4*& z(-}n14s@qxaX(95g-O@^Bij?Fxer<$TJYBKO04Spvd*k1saGm=R!{ENx9l!(#u<~k zSlp;aR?;xfsH~e^`b>kYQ_+hVjUi1L?`F`BJ?YV0X9k~1;hA~qQ)&zhLvxr|r7qKh z%1WFyD!9U?iJsTtJf)ts6>kYWu0mte$Xj9A5(m7EpS@C#$|TrjS8X~Q*(4sQ3#bi+ z%{q64wLC~ek_*@W#kF;2UoXwsc5HY{WVHORJKp?GIsA~p^{jo0 zQkn#dtZG;4W_4#?1S#}v6X!7|yCPQ4NGpVgV3=|t96r;)>$4Dmvf%NDT$#a8QhY8_ zED4GAtfcSXM_@E|(wK%Eu8}*f07rD<#kj>`YT3b8*^`Q5E~wlT{-iq&QrzXIFT|8s zfkod73H5$1f%q4VYvBF8B2GxU(LOmxVk? z13cvEuObn{Da6n%_BSlbepUSSsp8RlCw&xD3m*?_oqGKU$C^;oKNffY{suot=UJ24 zM>=h(DWNWVz_{q1sv(B~$={f|ic?&P1OjBUU+RjI;2=wp;kZzQS!YdSNIcYj)7EX5h zlG6=&^}Y~0v*Og%#^yY_rZH-IUvg%ipW{7b3yeol6kqXD`KnRiLL z0&qzF{PF8>l{)M{e?;k!b3Zt+Ff`NMf5^+^^Tm^=*a4L-qu`V9eD1WXm%@LRG~W6e zy7HyRuq}(3co##IXU#gDbjE zt%@=wY|3PFK}#NYe{GjPLlt-Ju^Kyu7!04Gz-R%$ELY(a07>k}cONcksXc~8N&h^( z4|<9o)Duo7-aH)U&Ali81SwU3^;(P&k;I0K>R2d{m{9-kOAsX!yTBC|$FQ6j_Z$ZS zqI;(zJ(Ez@V$AfgK}{=QmR$r}>*sOQfBVmp-TwgF$ToV(l5zdJ0PdEx8G$>OOf0bL zREJ5tf*S*lJFrKF08fmG?FrxPx(ux!^a3*N##y@L*XVEUo~_Tu2nb%?wgbWLS{g9m zSF635qd>4kNur@LP%!x0)2T`-KV;ES^YMwFr*w0UZNx*QX8zOlAOJ<6z0x zkU_PN6~pYn04Nu~&otN|nOLY7$CFyo@A&~v!;+uQd=32j0y*F!@3$1o(Mns452$h} zf`(%FF6}zu`Xv`9{+)dnxhETnTWXPlLcn2vbu1z5a>@U}cNVwoZBMSd6d^GJ>aAIJ zzm0tpb#lXML>9@H*^PMAT-u{?Z%N+&1*Ag)hs<9ao2siw>i?F9ZcU-%nOR~Pjs-i# z_N@FCPV)8yR>{Imqq?g199Ah6bLrB%wk%WN&em0Q0Iw@-^yJ?@xh|=(2!OxF^KG)M z+jenoy%Hi19Q>Ap|NeeU3Mr6<(%^^h2EIOZVy{f--~S^t;1_5?a`ByTosNdHu4<(x zC3m(IG_v-ctsNAwP_=RYfpWRuBqElH)ljJo7x7tq(&&m@pI;!2EdO8Hho9~{dhOM| zxR!0XelF)gtEhUH!~Ijr__sB;Y2Pb$$alSJ&hy}+gMy!-@G2;@@V(%EvO{SKh272pTH1-j14WfqROmRN{J9>kD-LnF61 zQuhlaaH1fUs2i}Ok2p@Uv z6a#wc%pUnp-;o_Xg&18CrmYQQm5|TtVrE=^#gYscc5>Q*0ty?g6Wy&V;1>s?sph7s zNDzBzT=XU(MwEsbmR-sujKWMsnefYvcD8R;R0;oZnaMTe)s(FXJDEAxYx({9MKIoW z3W(@u6WxR25*sBAkeSFVZbeT(Pj_$lyLBMUaOXHqK$|=%&5lrBQdIW|h8B>`pK}khJ z=s`bVGD$wE+|f$rWY2W24!Svz5U!IzVC@OBCeJ}E?^pe0mh@juzO&=LNZBgPKrl8; zB-{!%$7%mMaK+J7B^tD)qXAlQGUvoSC2neLR#I4fFllX4V?R7ukX`i!l8W|)27Hk4 zFw9-+O&PB22;PNp?Wbi-jjRV~-i5|-PD#&4=kK>~@8m?x@1NpAV(%5g< zjk}ZM+T^mjF8)==MC~M-6FNbb3J79&B7Z;qvXWFDAo#A1rrv^#elH(|+G2ag>cbJt znxlc3U*el#m=DV^r%V#-O)B!pDl$EHnzG0)-)Yab2-7gbhqsg_-{zN})KN^2D&?lG zga-UCx+7iJ+0xj%Hc$TXSfidC?ompZptP9PcmA{Vv)6zLz*Pn((uhH>+nLPakoMu2 zZGzwAv*BT{O=pE3NFTxZ7ZE015@c@S1V_y_h0MZ}GCjJG^3;(#dAxFcx5_m&kKA|$^A0#^aldfGbf%$F+6fstGNP`dG+qf;4BbE& zs>3wZPGs)!e>L6n(pi&@H@DrI1@;1qAtXfav0tF~f%cf%-gVQRglR)K^rxnA{_3lP z;$H!-3p@No$sIw;d!#ZraGw-C@@w9572^!e5Sr2R{D<>9strUmmD4MtKcO2|F9D zrJM6Q-mDH5mRv}3JY`ALy3zayxK6i6%UdbGL-6ww%<2&RN8{BiHnO3qpfh!r8=i6drcSC7$a#CqZMAE})Kt&;J5?>CsL@#3=)2=vaF|&R5c+z~ z(@TBEd;5d%YLVqg9Nht{aaNgVSn1yX=FXq%RVItn@-gaOa&m zGfF|6PpB(<|3r~DaCsj2cG3EW&#NcNlLtAWA@^%yP2F6gsldhJl=%OIc~CsS^y~3I zcSQ!e*1(~f=3q)rttr%R!M(j*y43HEQgi0Z#Ekc`;MUk`YjH0UDSRm$GSylEq; z$G7Izr1;B}4z7Ry zj2fVLtn-F-bheddfO%s-OkTZ^mE6^5oa(Q@&!>lJyo`?=p2k4%O-9dmg&4_1+T+)@ zY-|0+EU-&P;H&=-LU&=~w}IJ{X=bhQa@u6f1K;f^HiowYQSs7)GJ%vIR_j^hcNqI=0SMLCXRQn z=8G@0@B=IXh3|nU zYQoS3F7no`@4hPt%lFCA8_B3sy)yxm=#PcgcwDs&fse?P--f{=bG=w*V4t>Zwxt0B z2?HL{7DRjf@S2C=T7Zs|bR+ZvdB)u<)yl@XkF84LibbRIT)CD)zvbOq86k#o2|8}= zW}fLN9lH8OPKA-^Gm;tP$r}Z&=lqjU0LoOdjGT!c5F)ZHD)X+;H<}zfrPB3VP)<#}P*O=? z+-Oh|?4%J#`#QR;+Xj;u^!^``JqkXB^4@HM1|0X;;BZvY${?OIc+!6R+^}8bgyXj5 zkLr`^vc`ntv{Ob{7u@iJ3L3K1rPdZG^Ef|`3T^Fa;Z9oN{ko4`rStwPbXV4LlmHW(UloX%8u93>VbJ zKV&Yfw`EToGVGw{*`S25HlzLspwN!8P3>@ag*{##*tX+3t~ti-g(EdfmQp| z4zE=~sdq&7Hz;b|P1WF|xizDJ3N7Ay6}?UYVIZ4;ccVDZw!c8~a_ZprJC)mH6l^)o z+(aALDXL=Vw|B=o$Cv*D=hL)crqU^|PpK+55;*&Ud(SbT`1`7sPt8;I%cbzg%y~Yk z8%KzyqUYO2h}p?w?HR7EWn&q^PB#+oRLag>>q!(-zQKvuGO}+VyGV9ATHjw{K7cwB ziYKjxYHk42^*`4wX1l29vj+rZHH*GrEbg^fXY9-1BRPdrr#7%3w&;l zHSGOtW-Q3#)t~Yh79(;zb=Z-6mY;Ha{G9-F1w^6u{;OtOgw`Mn0c>Z;?qlM^Nq1+g z5?*kM&E2{1<~d{F-!wt}4U}pK>%IpwLt~eOpD-60kxA(eg*0N9xn`8EGh}r%LBS{3 z*ssUrk~r$ze@b3AO|m_i_e%nSsIbp_rIwVhVOw|`28jX^(b7NBrsF;pM2rK%3c@5v zc*unn>)%?xWq{t_)tj{1D6_k-62F=XDQHGPuo|<4NuP3wkcY8KCZ1u1GQaEHxv#-F zmR&%~2L!P|mKYFWxk^d8JKK$RLystHrT85Bx8I%1xX;nmm#_Xb-SGr__H{o~*?*o) z``ZFbrnc7-T2@AC%Vq(59_tMQKPGZgiF^Lx1;;f_Y;97A1tcJ|aXytS&kRqy&!xx` z{Z5{ zEltaF>_+u=N46Vchyqzv1pPFLE!S@KZr6q#Nw}*HD~3M%r&;Xbob6SI*&~J`K0H~* z)}3E~Y!yubTWNlF2$pQ4cB}jijdbZx5?tJ>qF8;eCFPnS%yjzQwps6*n+K5C&x<%) z=0XF9Ob9j-Bi7+r05!`~eG)jXfRO?E%6RKSPaMFm-Zd5cG_V&d#z7>MSw9=_yOFw| zzF1O@Rn^XIH1ENKu^>VTw~X`&;Kr)j+S*wD)IfZi<$s>f z+uFyY;e72~g1#I?|6%eB};LBJu|+vjFg z6}nE5*ypxdb{;ysfz!1%19H=eZLvk8p(wc;$Wm;vQ2k8}Rw6f#5rw?m#qEQfzrJqf z)H_OZ9eJL*OlkF#6#==YRnBe!jr#+;ZrvOv)-g9mnyojk!d2~C3aXvq#x(hVME zrC9*r&&aUk(|2UtkPVB^NGA6A0qEqf*cdQ>4#2w~Z<4Tjxmm#Pj_m6gk=jcP_MJLx z#~8%!^Ma07Hvz1LCu@795T~oHL4(&P7BlYq_iZ3iM|AoROZk#py21}1USyb{t7q`P zFr)ep4F37&AK?|lGoo>+NW&SQ9DhI3$39``d)G{(F0(H1m5)H;{dl-;9gzIdmO)IM z(os*Gidw!PLz)u3z$zv9oNF!!01*H(B#mSz=A}Usfct`uP(f=HtNG>H-mEYjz=VDf z%~B&UV+Hun5rWv_w8mIzkls9~FIiydU;i7ul zKDTh6O<{{w0{qtLkJ*gFYpv<h)csa9nTQJQpW()PCXAXVd@>efuj==gsG0P(Z!Vv*gsq65g?>NR?fEcisbCnY*^x|CWU)0DK2IOj%{w#<%40a=YF9k8(snwYfKIP2h!(5tITbQ zjR66%67AptU48AK9b>CkERf-Uud=2#TB^d~SezV~7?8tjiIwY}!qacby8?&i<7;O; zCv=5xy|dszHZdg{aDsmVl0CJZ#m7bJVsN0w`LjAZLO0z3d8PQQ!S-O%&V zMQ=D5v|6QnTM32!fndneS;kk4j5nEXlFSU5?)uCOhS8v`1diS`kfqqtNkbQL4zFLN zESuj!xjWw;!z!(K?a1&KCUUHPP2(Nr;3-n*-SLqM)Rp&(R;BtN(P<}m(9nD&H^jMdS{NQWTmgeIx&g_#F&;;K|^jL?Xmx5>s9K#^2#tp(s*R$A9`skHS!AUNH zP5$syj*EyGLeN>%G|3~~b^z)*)o-x@$c?>sv9qgoul~;VTnf_xtK?x%h%dq!9lK#@ zGS70JW6jmi)By>GpYgMqU+>8%5aC1$(dnKaSS7l7kqY1G%I5NTf#of+)KhA%^#f`g z1{T*qP>dPs%nhEY-dYqmb098ter5A&nsqJflc6Cmt~WV{$$3j+eY&ntd4~P$E{0N;PQClV;Eld zYx$}&BaLPjpbBFhT_A&UH89_Ie>DYx)ceQud5y9*#^$w5qXf&u(UvKe4(7xslYUQd z?J;~$li=EdXh0Ka$R3b=P9sCGcc&k)$hywii}&-h|Eb5>4o)Km zd)j<@m5#x}ZvEYW19I_=(`$xOi7J?xJ)cb0XH9_F6i{X@V^W!Y|Ak$$*iE7B;sNsm z%z;nN+VFBH9#l7{1Id50h=9VE5Hz0wXuf!tyidv8%3wAHzDf3bcJtSQ!t+G`0g^`S z+m^auAgcl4C1f06Vct{1|3{X~Ik2YGJPz*FJ%v`1B7^ z%(Ml^3+#mTL%-IYGhM#wP%JK@+dui0`jmw!Fg2<4l<{S#0N%tszX_uAs@~z+&H-ENv|?ULeMj-o^jbDx znv8sPTRxFUbk_>Y;XN;xqitQrt zfHQ>U7L{|KuZ%Z?;&r?`C+Gk}y6FDRg;-P#0<0LX$4jm4Uu8o>F!lp}p+@Dba3ol$ zwdAzgH)igRbx(d|c18)KS*i__@LqSehcz^6yCeQ|QsWJ3ar;Mgt^xvU#lKGgFnFee zyVN0$t5zMsy3kQ(0 zE^sC^6aMCB0|jKCK^e=Mz54krtTuoxUwxBafGEBH)r_l8nqhJI(qa+u|NWVdYDM^L zoy~^b zWDZ?W$r#P7`G6-mS$7O2sh~T0XCvFIl6IYy0=Jjv2f&5sadU1GUlc&2lN2*U5KLq( zNxMecD&}5{rDfe^r4sob%GJz+DP{iV`$pr^O;sD)mm>nXf6hhfLl)H&$^wW$;TXLk zMzL&b2L%9>-YlJ0|9}jT97rQNXHEh9!Mi3dN40x1Hc$B@2s)>o4T4^wN1mU0U|~(X zIxev!PO|TjazW2;BxMEbJLP^@j-dA4t&MyiHME*uz~4t8NskE&l6Fx<;~to}|E3X5 z(G8m!!lgoNPaS)wt-l9pB;td26qnn6T}bxoiSzANcCMRE7wgaRoe&cA5Y@cmJ!Ksi zMk{{)n9$2vk9;G`fO!0FQM4Pdc5?5x?)cgoIEK7c){<1inKou8;$;N?I%*=d^pr9B zjs1FZpE9{X^R4uk@S6AggzP>o?zI+-Co_XHEkA`DqLX#5*i@4;BXF7#rXfSCtrZBE zj(VK4OXd>*hmm_Elx#4En@=&8BeFFo?R!EUIwpz1m#SU5o~%g$peP`CUA!pO2uQk- z6`u(W>&6XD_;4W8Ftm9g%$MH+Zj9m{;b=b{+2*wxX4FjdvA1x#JXS9Auclo zCkG;o1ZDh^iY#Ne`PjiAwJ?DJA`K%+;!>GfD(<%$Qiwt@FQ8WZLh?4AaWN{NR5|(} zZ}elW@c5ib^@%-8lK?J!)F)2PQH8Buy&658Zi4Ox6JL=ZL`Y}CX4B15l}vXrqx@F_ zReP#g{)tfPco)Yr{Bzcb9)N~$=@DTF4!FIT9CK~$#NY;DY4RV=PG0pYvw(iQ8%P}D zkvhScjBuU0y2%LPE0XTZC~kQ@T8QO){SI)26M{M37tnRs(WM~K#&nYH-cDqShxiHQ z#&xv4$&toDS2VYZl2|6W(xrXfW*vEpRvXhx1>xJLO2pk-4Y11a(MFaRx{dU`YntQW z0G5M>Kh2nFA*1*!iNdKnD2_->FJ_q^$~qw0P>K1OtE%lkT>HT!ywx}iaT~L**OZBf zA~a_-HVBJ7`Q7eZ9Tf%vgBiP##N8{7;$8u+APn?Me;>#1CUKu8AxQ3~@X{(&H!n-4 zYIc;wGJInzFHC#k+@%VR_w7C|h9ITnI4Ry0sCA;sc;NHzAzp#t)4oFki=#M@1n-1_x6yQjf+g8s>)gWwM6(Kgfnbe*qOJx$^u)9Er}hKMU##* zhQHaoCt<|tGZBnAp;NTNNo{me;C(1?xb~t7#oQ%o=xp}~@ozSS2(CiBesr^6V0Ark zsZ_4!%u~f#sG~ABLH3vc@6s6irJUoFsZl1G<0ukkgh(ADuEQpTBk*#$(2QIjm zYg=l-Ff(vrPbV3ob%bG4*B;S>?wvKf?60CfFWVn#$^Kpl1H@>-iVZ!*TX*2LM}Roh zd-d0^aBBtZNY_nW2J5+RQcU+KFUT;oIuuWuEH#9c{=6-=v=j+S!HjU9tgGBqt&grp zwbqJTb=D(3SXk5hl?909`0%M3u_y5Y_DWLBbI(}#-T}-2+C%o*#O%=AgB&wH*&;4TYqtlBK(XF zhaV329^EJGf{D2f#B^jG7$}(!9z$KRVF(_faNVQu5)Xx0&zyKyCqztEH^OH2!L(L0 zH%Y~KR9PxyC8j#DQws-5;70!9Dw!=0$QJ)OoBmT}I3USuC6hfkW?GM;%wGnY5Of1& ztS7w{DyK_M4gUAulGHGGbIjQ11fL`|7|@SlYA5FA$g81ls&};AhjE+gtmODkfXl$+ zJ88UXoiwvEIW6wXRlJI*1zcSrq$*!2OHPS6iAN1n>pa#I3u{Jp1GNWX`Mh?zJtCR< zN*vB=gphJ;A?5f0$iTuvF(QVZy1OQOwCb_ya$>0fL7$*ICyc?O_rXt_K9LjPKoGSgX z(6VSM=l9~^!m^f8b*W4;%n(1ktgYjcXM&wM?s6(eYv=n1lE)N>G6w6NM6g zP6abZ3%P$d6zJDQm~c7WRCU(|)_z8e?*>Fw{>}E=0V&y?6JP@u|1)Z~Ro7Yj*LJD3 z!1z@B_*LAH3(y{cAd5M zqpn`$hgY8(O|ODbH&N+Xl1Op=B@$+KbuZ+>R`hXf4-ycBvK{`p0SKY_VPhz`k{#cdh1N^kilO31cLqbRN`gLZH^D0 zx1Tt~(eXsIY@4QlPPoJ=U?DMIwV8?+2X8XOO>Cs8Ki}!V zcH&@OW%zJ!r|8q`^#R`Q8tNYB#zag{-Mo7H$ z*dvCPp*n1@%RsKI+W_#f-gu~GqHb9f+$|38S?pkj6?fn;e0BeSt z6If*vy{z|cRHgh5UV#pYcU`;VPTlm+z7j*^oFOEFn*=r5wv>b2byQy`;ocvj5A%P5z(0Ga@#1#zjP1c}tJ!aR{ zNuk0-WUNiZ#3R%cGmT^CKf4@3>*zBkvKh!}Jl|zl-66j453skJyFj^j;txM5<@7xB z+%6<06P?cJmIE=|jKe@&T^SDiVZ8+ir^egMzG*$y{^)rC#*Qq*bBt*@$Vv83As&u? zSruZXK-{YB?1e_KuhSBJo`&3ODo@qupu*5kE`3e`Y)&C+1R90gqth7^5B++7Cg$i? zBJtbKj3M1>wN(HpAfM7Gvu_atf<6RKdWXd85$uwCGmh&=@U9t(3Y2>Dt=H$W8SpHa z&3(+XpYt5p_=@)=AqkQc|2nnNCOsspx`bX~FRVsUmI)_tC{LwNuX=4P6zz&dS$Usg-$8UCWj@NPUMZklUMMD1 zGKR>aoPLvDi(sh@J)RtvMJiXnY*XF-Uga1fpM4t$C76Vdor{!eB6{~B8Uah)Ud-L=I3{WDt&1oRFEr=u!;=`jGm+xEbsD028of{I zO+NuSmUVF#X53^pJlNDJ&z()#%+z6G{2b%54iI^nf;#(;7+zkqE&>H)NEk)}1Th|2 zph~{%o7->Tk0_u_0RfnMaRej)BCG5_kHt|PLtHIy~g}jTMheaE=vWm39S_agW z2!0-=|KU&I|5ClGjp7>2_I#=E8UVnX80I;fjx)XV^ZW~e5~3bUAe%)^mMdf{OEHj7 zq}~x8*a3Wd{oVU3AANZMCuah0X2X@ zHiD&c9OCF)rcB)wQ5{2?-1{lpTC^^9kwo#hdrH zmQB(rze;(P;&A@nfY}fGgl9wt<|#F8%MQ>12JgVSVo^tBc{95hX?#f!H#Xm{U1*${ z1tgj@ssRt=uRpE$HJC-#NN9d2AW@4u36NQTGoY0K;vaOv_6X3-0TD1NDjoPA2;cvN ze(agC7{vVy2{j}Wr2`PAwuvsSx`z)*QOPG!Ff-07LL*@bDLm@QPc9^X?tzJEd}25x8cmO&%aWV6vdD68F?D0; z63j$(6k$MgmPyDAypUF7GhN;d%(CY=Gf3(n2_G=lr&Xr!3YyVpB>DdIvse|LzJs=J zZbW1k)o8MsJ?x&j`RnxfoD}7L466Z+QI83~abGUKlC!pLHqHTj4sNVSc`Yrq2Ix^~ zU1D9lby%nWN^xMXeS5Aff6Us2`2pUKPJplAq|N1kO;iieS<~H95#?HFmWPb2{A%OI zuqpFp#Y05`t)Y^*q<0dLj@Wj%08%#a^P`XRtnSj7j@sxt9%7uf>Y?1fLtR?gEdn#! zGEfOMe@cuWavIBxRlkpvO)#$3dbY&zS9mds1{(%AtT@L5$3c&^vwHhfWxSFfSoe-+ z4RGLubeG5ZO-wZA8kn1dejxW`Wy=NCs-w!o(W+-3|yx{F;W_F83tlT(UNMGg&zYkzy=j$%1f1xNXPEACoZZWOttu95!K zaer=ikQw)XDzhw?k?GakVeyE6Ew;;dT`yxF89APD0??bu?R7YpxeXqx9v%P{+tXl< zMWc31s%`+1R$F2r$sG2nW~ULvOz{C3Q9_EHX^ol)f6EbK$b$S`e1t#69oT^zpj>sb z<_eW2)O#`mm-CeW@QlkTz*a8PEzdztld{47ZX|JcB%% z$g4+2N?z}?Ve;QEA$_$?)#osPCxyOTCVA(C?gLr=>o_^-{a~rZi`?yLj-lFf ztpR>x$wE#4lvqlK?n-mKqP>GyR0!D=&j<~zS-6U`361u zBV;cygtd+h31cL@%dI2(8>azud`}O^gfpi{+lh7L^Tvv-c3u!>qDNR2L!3)4p6g@& zaML0ebx{inYb-9wp1n#dyiw~W%yq}RhZme`LWd3j6?Ij0CgXKgrpSrR&PUa^tc>wV zd#!*A#{lT!UzZBn46s+EZe_+kZi}G9E%L%hP)54XJfsG&@)wZ6Vu>(h%T8VTPRvhu zHd}@|pW@BLbf9N>)?&gA2ymfg_vvoOnZ`{~jVb}_i${o2j2W=75E42Sky1%cxQ$Pr zO{xpVOx7c(y_uElcuMc{(rCC&s-v|KC=oHHy@>+^!;bSArAH-F=iXgIQmnXBJqt9$ z=^Vg}N;x77TRJB8?dHv9iaSM2cPq%rq4A9Tm>~v!DDCyQ0(G{LH^UwoW5Q@NUJa#p zJMy}=X*SA;e<81CTu>Pk&S__rRQ(Z4uABN@oD@7Q%c6}P(l&9^;lwMWE7DDYrOGJx zP+8{td_6%m%LXepB}GL{Jrt9}XCrLSJ3;eiO_CpQY?M}XWPe!M;OkI|XKSRcG0sfK zs|NQ(`q5^9qTq{4bvBik@angedJ<&Es(9Ou5mQ**!C6NklZAp%QV~rSVM>&4M?89$ zx(AMQJ_hQr6z_3N1Om6LHwz#=i0wM!K~l$b$W9SFdaKZeJZ3R*3CO#GM7aUQE?XYm&XaV6EsM)}Ex%Z3n#^c?ge(lpo z$7`mYR1t=3`;JE7m4CqdJ@;$-AGrr4D#;g*8If!J*AtriX}2qm*C>w(Ljf7#t$>O0 zF9ca76Ls6M=JeI}9a51ofRBBg%;iOKgwIj?xhQBQuBBDg%f3Uay2U@DhL5S6x+~T0 z{*Z658X6!b`i2G5bml|!r+@x(XJqx5yqfX2)w_3m_K!+{7$^;_QkE{>)7%x(Gku$N ziW}N0JA(kxa|@>53{+i6;jx5+nLKs37^t4C@;<GE{@f>Te<444~^Dccc>sl=@<1 zWhS~SA6hQ#(?KY=G>NKyh%*7$-d?G7BT|xp{zD>F-7sygB<69jEa1FQ{RC7z*N#^7 z&nxMed}1)m0%$l_a%+63T`U^hS2VPv z^!?SVSGxu-3qa~;jDsdV10I0)!jld_%WiJJYd-0N$4tMAcD>eSf*Yj>p7faN$+hPm ztC$x>i**7(+j?F@Z8#AOYVrc+UAD5iCB?2w{YdUDAeZ?=xe;EaT2QwRzwmRl1R#&Zr`dJwn$6?D{PR{sX3*PtWUpq)n>{gvuC}UDZD_P zX=#9qeC2el--)>eC+E?6r#;$ngfal0rkvhnyx{WwLx0UsCw4S~5!NUxIhP8DEP!gV z3+^FL)EjU=B3gZ`T~>}a>7B_0`fE(J(fds3tpwq>oE}x|x zy3WVLP(c0{pL9o_l(^;ajl$XFQb&wK&?W5`4aG*<9UQv@)<9!2e$h4B>a*S2XH4G5 zeZg&W*4~o8^f5KmfI-4oDI>yx5l?#)PtjPT3c&;^y0I(s&6HPKJy3;Ur=sEgHmo(Sb>;rES$W@0nUv>N5AZER@QC>@TPFcN!VeG%YRCWp` z>(Rn6?k=Yp}90Xp@Ly0-JOa&{Q1nLCKcaxO-Y8qZ;d6wzA ztn=lx>q%q3gQfTf!tFupI;mwv89QS^vKRhqpy^!f@t@98OYIA~G&rDfIgzeJXocl$ zb0EWyuo@Wl2}f_e-m^9j>h=(b)9u`R5Yojv`3m%q8- zG5_wlJ?D)}T+rCmGR{671BvfHUqhF^XD0Ced0RX8Cc02$t%iG3b;xhN9#4vn`Qec( z1C=IS5*Yjbf`Zz8WBh}|vHg!}mqImL_m2n-|Uh$60+BmKD_R2dqZ8c zi_@%^3g@yUtw=M|oQX6Q=qr&OheB?-UE4V9?l{IUIxpRpOrSgMQXVKz!#nqcFnD<( z-po+t801|&-*D48*3lZeiP3jmLRx&e+-Au#)OWwfHO<_@#N^L(6mO85n8_ZYM?bko zOz`O1JE7C|g8P6iiiT*lOFs{u^$zf_37({w6lczKyhQ2U%rvPu9U&{&`*dem#Lg3R z$9Y3`{a!UK9)0lFQT22DN&*6j6w<~#L@kuqv-fyJ2e^L9u=_38*y+?5fwvdjJ***^ z#@k5`9}5gK`sTMAe}Blxa~Q2qyX5e8pgy|NIXNigZLKwm!cGnkEsq^@7Cq#n8uk6K z$?Eqjot=vbI(as5Y8WKA$+##RTQwaW`IzQ~*k|s@86+#E7gj%GKZ%R4;k-1gu+iaQ z`^b*m_;3LUi$7GbBR|6M&HdAjme7aZy|bsQT8Iwqj4I6eXLU~}A+ZG0V#!CRc+Z1( zurI?o3?&pWeqF@`YI4a&2$ zYauddGJ(*|`-P4!HPAJnA1>&cu&oIe6>X$v4aQA`Z02t3tLlZw%^}I9Muf4nzQQ=p z_#0aneRm`4A(z(DU!p>p>;%)kP2q*1BREL0w>Ex19II`GT4}=cVvQkirERH(VFITi zo_^UqKC^&5L*}P;grPTU3Fs##-4S#22KHKfP)*F%5xe~5_iCMm8+?57D82?me%lA! ze?J%oBctK9M%9#~j(ZKcqf`O?P(Dwf@OZRcO@gpweOnq8uu=>digxKDp@q5gv8|;Ou3`YTK*Hmn{S1NxF6Z!4Y9~qosp+o={g+ zkf&1HT|adG-j(Tb>>*h>^L$CcGT>ux-pp;RADXQnpbhP5p%mLmhtKPB%O^Q)^xfrK z8MTPbMuv@>q(uj2kMe@lb3I<#5#JlGea?ew;8C<8ur)>2B~qp zhfay?@NcH}=1$Mg&XnM6THLfWDRJxU+j~NwZ|6edgOq!+)Pwoh$Y~{Zkc(3jvnTA% z>OMKdELLw9b*-SXdT-p`-iVtyF+puR;&O7$`-X8B^95>ga2cvlKnp|3qeOxkOF2j5 zsG$N8E;`L4<|l)UuJ(eSz->C@XylpL(ql=yT+E7&b{hiL1uHnX`FVS5jes(j`f4BB zZ!9oB(@`Q;^B-oPBdLy!RT8^b`Nz+Uq`=Hb{oAC!YcJ05bv{7~l%+m*^;@-@IXKip zAd#c(?4{rK#8*|3Locd1y*w(ch>=lXqhAVAdVGk#IqXhR;<_AaEE?p&On0`iCWXEijp8;;tn3FP1#JJWy7b&M%ox7&DEG#F3b7|dB~Cd2 zK?q~<`K>2J_m1+5G@NmJ&kO~fTY(P>tn&S>$CLF~Q-_v4F1;s$9Cg-lx3x?6hB)Dv zQ;nKIyr8ZLF>jP7p|a)k3MlxEP~IHKJtYhV&Fqn=I|lEz$-EGiqO--^9s zz>hjvT=Pm+jlm;I5ws!Cm)9E`^X@ne4W49t?c%`OkRPYcoQt=tn<2lXXE%mGC`$CX zg3*}KnK<-(U7B=pbaeUjpnTogewJ*a0|8b_W}bt_+`%94J0+SrhwNn*k;5ahB3OBu zc}^ywrakpWJMkua&7KGU?zZpvF|>Rxi*U}zkz+gAF^f4h z?lRgHgx-cCGrG;{jp1bT@bQrd$UDPx4mBen90JIFY^JBXDNb`V4 zWhMMwTM9)R>US-VL%JwF4E%##uyLtD+~#Yt$~R z@T?0sr$-p;UgrF^Vw4d~fL*^CTga|&Vr=k+ep?g=)oYcEuJDnm#KS&YPsddw8b~ zs{RH+-@%c58QemT!lB%9LzWgB=d-tD7NyR%7~C*p1mCGuAa)^(vVJ%BqrDnr=sHVYL=@!tF^EbI3<{4T;%mwO(rs` znN-BFRJGa>m&PJ<2A`j=F=);FhJ}jb6e8r>Js+^@YR8?OYTFr1W3umC0V7Wox<>7# zqjZri0rM^|MY4{I88sH3voAgWRTcKQTm>+K8ZrJjqr_a9J{pGx>o-RAr-k{%1DC8q zF&fi;;HlG3O)V~mu+-O*JEd&s@eyS%xS*FIRt3|h4>IaeFhST8xjnIxr&L_B8Ck#_ zUQSZ@h5^q2OV}GYr0&klZZm;Q5zM3_8)ZJ|vc0APRTMv|SskmK)FV`kiwWs}9C0RP zh+9_V?#I_^u%a3?Ikj8ghWPR;>kf-lh$CKqWYBee-**Oy?xlD{6K@E4S;^Hg0l6g_ zm4`)ew0qyNj;h!6Oxb8g(9l?taAW5xq{^Jz%g!#stV`it=VVCxh@4T(p4ews#_woH z$QIv;<9T<8YGstdDu+ZtAwheHFvJ@&+Fvt7Y4-LtbRBoqE)ax8SX|GhP({j zd@P1Dk{$iqk1F=ne8b`Io4<%dzUW0-Rb< zQkY_z)1J*wQ`Bba59Oc})jLb|aY$(U=G*u~`G!4F=)TT_K%7W-V^rBQk-pGGFIZxd zWNLk6>zvMf-PqXvC;Uf64fU20r5L9Yu7jMUMkYcrF0%3{?4);z%M-IxsRMQ;u@jFB zPRUZYv7&UoTiqFTE-ZB?*=P%M<0hVBQ&{?SGXB{)17yb-MdVu^9Yy z6oplKf}Y9V)8S`Hqc*s#bK%+%c@rs6v@iXR0x<{G4*bB09Xv>m5OcpMIRz0Ek#SP6 zJ?*!YI~@_rk90@ChTUsWMufRB4OClPP@AhryJVPi%{c-(8thfAs2e-0zJ2IeME?fg znHir~%;-5@gPp=2QQGc6gmhmnW$e~uRrhjFPr-`OH}84b?mXgcXIte{zA|BM| zKqXN#abA z%h9P_crBnAR{hddJNF4l6}ohS`8-;`vNd`iwNrXywB*Gx5*4j2SzIESKyAs!Ne#~w z*d1H%AsSnj<09{Wd-8Dp~WCFv0nYJypl4kWGc0D&)<`kvBP3`Vr4EY)i;C&?Jmd6Z z4=>-lHhR)}7PTUx8Cw^U($`b{U$}ZjNB7ep#5OJu1Ow~D{t%O-h6<6z~>(mah^#(=x#qv#Yxrkg+@H+ z*mo!_EtBbzzPp}c>mY)5iu$QHRP1wCy8TB z%h2mE1bfX>>ho_O*0VPt94a6Nrx1VaRM#`0JFLl6N|cU(58UuV&)Ogc+Xn zIB44U&RjP;eCu60PglCwQQB@dO6^H9?4epN*R8h1% zWjBYiwuTuDz^vMdcD_ezRC`JtWT_3*`X1aJ2q8JhSpe^%7iRdqE)`}3tD_&tW1f;#p9t&{!Udm_PhDe zY6pBkN5u9hrj+0@W^N;yi-8G>d_*_VEYUs6EZ@C9bdauWu@S0rGzA0gBcIq3!QJo! z{-%)+VQl2-*%)`vDU(K`|v#^bZHzaYzE?IQcJMbPsO}Yy(F+ zj!Jjq&i3U*WF&Pl_~FH<*m?S9>U!vL4mv%|fI7%K5>o4rTi=fEDGW;=MWn!{gqTfi zD;&4>RVPp2Xz1f(GGs7~y#=M&mK!mN$KvN>KtIrJi}JQ(j@^ct*ZR7NcCgD;2&e>Q zjM1(r7t?9xdSi0ONsP9rlTNFf&77xa7lHE*d3I;P~ zL~R>7`ldm*#8OcC5H$s+%|a1v#48+kUgnC+FE??c;Wi?#shftvE_GYHZn6y*wchP; zT>QCvIa0Hy$uD*1kckeseN-C~bM`m<;Q$vsesHJKm()$5 zzDpspm|9cJm%M^G;!#?Jgn~?pUclwKp}XRv{?mJ3OojlFn8N6+?q#bmjkmE6<0EF) zFuSTAL0Y0yEn?pd+ISi`XhdIBYEXwd4@>IY_6+QR9!`*_2MpbRDAajHCblAzW&*Ox z`Il$`{72@t96Mv(1&v~1Y(G>T$?h~BaLv4G`=xQsN*q=?N zY=wp3re@a5MSxSDl8_A9E<fun+Jiq&Y}elC&Nh6a zi9c+>P9xlK7EOi1$^3NM4DX4hk9l}Hv2UzC?2M43STlO=;%0$NBXWyGG~5XoOB7qL z7^?yIlzJb<(M=*{)P?I^ZWBbxEP1U|R z{OP&x0oyUR4aimHCkv`}e~!CU47fN_V3^w{L;w$Wn}w^(sG%Tt+B$ehSbMsUX2qC* z-sx?$QsOD%WA^pZINhhaDtHla;UOXWxCU4QxelPK{W7gG5t9?=3?Gx#8=KqVun_S7 z)g(YUKso<8t2s-r7XGd%9PXGieZ*p=b{-B~bgHXHf*t6=FYQ1Bn@Ow1VG;4AK`FTY z_w0|~j87@6H&N8~eVR0uy-gdHelITYJBgEDfBEYRq6KhLPNSxaz3H6?+eqv!RmPY; z1s`M={#TxxrsPRbZ_WQK^R-XHxM7rXJj+f_&)or)nB4Nue~%%HbG8=@Z2n5E6u%J; zhAo~U!piwG_e*2z^#x<`APCV0pOK-PoK-_w0(=CY)FjKHHPy2dEZU{p3b!7i5- zLJ8m1?$|e)Vp}Krsm$!ucW>)@xRFTZUS6AX=4st<0~W7aE8k^}xOC^ImfWo`aSq^u z(yfE_rjDFd2rxQdD9SwK(WA0F`nkH=8hPll>78yqlsz70luh!UWjCFt$0Ut)9H!l~ zy(*9P1$#J*tL=}pALXtNEpII|Y{1&|b@e8{4`Y>P6AA6aFtklO?rhTY!p1UNLe)K` zWZPUnr-eJfpL4)kk`BvBn4|C%ewZ^Uwb7! z#p-tv5}R&L?_SZJ{fnEMXTMu*_Bss$sezl+XS=S*As{^3gQeBycFuyoo^Wm|`w?t+ zGig6rJ=dtwm%T={OYeDHqx32(Ym)*+zCaJP-Kmv=ugnoLk~ z&bFgJE*u+Tv3XWK*1}n4BH_)$mTA4*S>EHjF04(ca7VrSAN4bD{<&aOYh$NY={)!L z_42(+H)OqkZ@qM)Sq~S>KxQ}o7`5$*k_2*EsI59Wte$MfJgv zS1ZeEdNjHin(<3dar%C_HfL8aZ?3;-+|{4LvjP)LYo-xq4J(^7j0m zP6z}=4l{}*v%86fc*vZ$5AE8cp9VB39`V>s71m7v2+ z^L&1$zAl|Rm=xLS@7JkOX&>CMEcbX^zM$U;y}wqaR+P4hYQHXjU-gg0gi)?esA+Om z$_U4O{#XEr<2={8!a`SUS21c?Lyuh24;PouwvQ&Xy-qKH@z=5#d za}40}g3 zmjI$XYnB0nRyUtw6payiI-{X%*0Un2*P#Y22+yuVr?{-^+b`?u>o{rPMbBGTE4PNQ zkTLacDR>X~potxS>h9Evggik%W@}}Cuw!9O(xHpW@o3=4GUKoK^fP_X;bhE6WqDxg zUgM^1?;GnQbF5Dm;DHHE17|{j^Go%tA+~B%&XJ`%N1i|qwrf`yH|nNzmankYEQTOr zE>-VFh4u)6sp9FrC^vcbLe7%JH%puFqc7XqDu+p*{^RUsVj3UzKq7J=x3~DiDzYI|39FxM&VJF-A zGeSEDPj@KE72|F0Yg8&(M$;!b4Ts8AV5b|}cQqd!T4@{FI-9$>{*ltLXrwI=Thx{E zW2d!xIuKHh^4`^r-fQazSlx(L!G|8mUNVELfGi;Y6Qji8aK%HH=fbzfO#nPehD}{E zo;&XLU|hpCS8CaLkhek98Ts|?O4X>YnKR(36uB)F2`cdzY9Jhkf9cl6(7F`N8Hg5@kplZxr_p5npx4m8($lnw^yIs)eXpHePk}%)f7wg4ysTN-4;)E8cdR z2-3}uiX~24ltH5Xf(Np=qOI1!4|tc*^i=)lED(l@U(Y@;kvwG;DzKWEmH7S8-b*DfZo!*tOT*8*-{yc05AaHb5{u#>089^`eNr4JB0wZ zO*foVHn`CiNBZYEDgFB3H4cmAFD~LV_yuG10&yOH)c4 zdDh=2z*3Dgp>`-c>8;f46uD~}aGB2%1V4I`0sOI#*cbkRSe;WUmI#U6@b$XIVkXB& zDjx$YU|Mgc*CPjS>?&hC{YGjow8CQa>enZvE3v7HUhX0MXCl(Q z%DW|{TjQy)KGiu`x}!{FLtmymrXDo(+4!eH>k?a{=&IeeQ9oMm?S^Ns%Q;~kHh21W z=`CDpSKxx0YfX}N{@8tQF9TmB?rxq!Y1A}T&1T5s{%V!GB@WW|=z?D-!_>s%-nvMT zKyqh=1AqDlc3YUWJ-icj=C!bVPPyGdkzrfY8uvf6=m|GTlE z7Zv*__9Z$6-!V(U^*<~7ct!?9c4cztSb77!zO_l5gNV8KrR2!U>l&37uZ!w@NP->} z-@B1V>~98|fedn>g2UD*4iPHu-^`C{DEq@Bxv-%tO zx=Ou*5WdW@4WoaezB>p`C)K`?xCgX;j-*qHIncRIqq^d43JcP`uCPW6e- zgswMNdunt$0?%kclZACuj99ML;6alcQh!@=I0@rO!Z*3>g3G3dWQhM~Qf6WU8D#f9 z>2p=Bp3%KMzS3FQZvB_SkBflaA<sL>6}8Ix-Ja%EP@AA*AL=X2z$TuN z_0lD0 z+C_q!YHkbEI4ZgadbUpD2f3}ArIjXm!ykKPv5EbCF6)*2@lf|u@z|P~K9#u0X&*0> z5Pj57$~}9GQ`aV2CmxvzC`tM4_sfy|Y|@$}?kD*;gVR|^Y6f7AAG&F9)e8K}W_P8+Z){~Y50I#>VYjtyJjeWq%j z&X!>7K(_;5Uf{rZyDoRxD_)*rOwXoSei|}M55k`&a4?N&Bc6EiS3NKhYju~dq>ire z4?ks`TX`)1(`pTzR91hf6St&e^Z*awFXc%;2i~zw_20T5P?FWLn4V`=l@>N~hwt`c z%@qUQ#MKZdRQxD1d7SWc?|S`zj*w}8*n@VuMy;`zvd?s~a^NVst>ndXbkpT&X+yIp zbanA3yU)J}S3bmqP6`+7f-m@d`ATMS*YTO4;cq(Vl2gf&zxj{9{vUt+KmPiE{Pq9% z>;LiB|KqR!$6x=Czy81Cum7#E9i-s)Mw-eiwSEni_yP8Fq(+YQ9}mCItNhKPvn06_ zM%tir`}Ub4q;BPWdyHn=E{y?BFY+$6*RiHuXbXMcqyz(e)A3qkS7NCWRk?kFWz(6=W92;dYmymN z#ubt)?Mj_O3wmt}=ly9lQg|Mx%acfWSXsGA+ViZ;=KMgKl{k8Oc8P9l$?kgA16ZTJ zdLd1*3@KpXsB#;QM~QFR_U>FFg8iG#?EmY(*=aFa$86Ixe46-$fX5Os2@p$))oyUH zwi6KVneph%kii=a`;-5UQU*prA_0f@HD@%5y}>}Ln5d4CDw zo%0kxsL#zmWWp#wn=I5x{qBwYze-0fN6Eo`-ZY?%jM=7r3qOqM^TpL7zJ2r4|KOW{ zuQC2R)n_%J9_qp43dGuC(%H8flxNTO%|q*sjaK`QtZ-w0`>!<&w6=U_5+MbW64x~h zpuD+nD~{jJFhqt;U$V|kIUNj5OMW`mo=G`t=(TuDwdg3l z{S90w4C@HyDjNNBcEsS?wk|slf|H$%{sG%M+3_Ad_9k9mii!XACqL+W(?02Ywd_mO zTwXx24EL3(6?8NELPR-$3=y9IHLjnaRt0}|xyctoo_}fLB+MZ2p7V>-XI$^2P&fcc zTvR+d$dBzqt=Y_2#{* z8g4z!&cmug`G$1E5GV_V};6-cFPlx#>LloGdiBU*%8ceYds4d!A zRj#;6tTo@>>C>kRBb6KJRRw#pU&5prsadhgVR-{R=za|8y!>9BuqAK-j2ui&P*Z=~IjYFZr& zc*{pR>&mp&Vv)#BjM=s^8q%k=2jn03%OE{?rPvco)*?U6r>AX_>wUZAVAwvBGVFpY+pK_AxJlao75dT zA$4f0gY;PeGlHIzViWh>^q5OO+P0v%gs)NQi=md}rIgFomtk`!01>&z`&&qH&(R7a zQjjvf4pHN;uyv%3blj_X593aYOTzS|(fYg-@mB zLxMnBc1!JGo7}zN@VA|RQm6ik5dqGor1$F0mZHD|ZvK72?|gdQE^fh_tk7J~aR4B(x31Dul7%lg%4$ zjg6R{m^Zwo7wDF8{5?A=Hb>D5t}?ZJe(yw}1vJ6qxruq6WdEV|I}52n!+^^XE_^yY zJ?)+NjfV&FnWqtD%MoXZr(X{0fkYi*xmjwTLd%0kh(nZhO2cOR6x9j7+xsIO&A+Pd zp%z<=meg+fCVKD>rJLCv1>36lby@UsqJ3rdcKPDO@{4N|{E7K$=SDa$nq3^6-Ur`^ zlb=VOOV1@C?{Uw%DOSp>gV`ZIG{CgEn?ATKmnaFU87{QEoII z9Y?2Q*HHvCcp)W0872u9=2i1xHjONB-9!I=N_k)Q7{bpUG37(YZ$q=fJQMHg5TNb` zn=PqZ2~u)#n~$wY!2kgGj2Hm!%A!Z~qf+|}lQ-d*b{ZGY*!FA>GVv*BS*H<4O7Cmm zu&_P65Yzg&rJVLhjM$NWuk)93?Pe8|7py38QnrU~@H#VvT_$zw#l63*nN;?x*Q^UQ zv9O1)YH1o|`k0_mnFY!-#d=@wQhq_Rr=0Vl+D5$j9nUSD3|U=PD8OB}WKRxiAznUG zLmacmTUsR3v9AQLQ|g8@079Kn4a76Hp!)ROKJbN~>c+OFW0I6VKVS{F<$@l?*ocX>DP+X$jfkQCJgX|nW4N>nTJ)?J|5}&F`u>ojGW?D3-Et!Z zx%?d99B(svIO*x>Vu5&gOEex#^;FD#n?18kqUDQa`x9+_G<&!>;@}S(*JbxJH|s_> z3WJ$(~-^^K@Uk z`9y5(lu>??Gv8VHm z2XZs1xPeDQtW7P9@en^^`JE>d5L^K(y#v-iXwFCP?qbY&83)LBQr)dcd5#F-cWd@b`f86zxt zJ3f>wa0#aca@>FAX;g;ay@cDmm%1JlJh=Uzzm7@5bmLWddS`4!?tx{(SO?K6!i~5? zYDX!;S-ubWft@e^)-sJJhxg5F8kAY$TSU@v}{{okj;OILG}tD9&Xftl| z!Jh=JjN^Y|#!i0u8%Pa67z z%9#GZ5-j_`5?ri|9qTh6aFQ@tuf!l<@#oG?@a1jrehuxvdw>{Mm#?861pkB&e$vRg z+XY>)k88R1aajNcLgE7+Z_3?>DjV|dD-Hmf&N*QNn|2`}?M-_$R39mS$o6?TxQ2E9 zYq=Fr9ZVd(t^Y|&3@myPXgj>ib^!}LzRmVE$3cv7M$MQLNf8+KQI1IDE7z`3Mk@o< z0GV|kmvex<;jr0a^D%xV^oLG~>cItLw*@xdt{FgWZ2&o2fgr)jS@si5^m`rdS-`rJ z76bT7=EteOK?!Tbj~WGB(?&qFKY!w5dji9qq-vj>Kal(xd_O=0^&4>@dPklZdCRaZ;>nRpD9&NcC;-uQ;#hRYl>0i4u z$W{$lG=C%U)Z))%C*VsYb=2<@45e;P!&zcpLurx za|vB-Hu~a+x_j>(;iJMyTO32wYbA~4LO!Rvo?TPY)X3V1F$0vvBE0}W3f?^0UE0Vh z@_roEq4S-@h3r2w03W*1z%h7=&EXK}+$MmIJPuHl%AH3pDuH7`RPWx>U*q^5$K+N! zJEmav*R5>7My#LvaDD#2AS`?K6`c-@{Yv89!_QJ-My7P@ADC8~%@-cFP-ddnAUTBy zi2Nf!W24?N%{%;+XTNb;m=i$qejn*mgZP*+d}}TBrNO-}!VR&1)_z-||1Y9RE8+cL z<^kn#f(e3qJaG>65&nT~vHZxzt}tBHfOWo~jedN^6EfgnTBWtd&{;=Jr&PADY+T3S z_MKUmeMTts?cYKdzCo2V$Mt^`!KtLBbrVG}xXCstK2o2qWJ?Kz6^+6mANyTC#`c{UwcY=Tds7cFa2jc?$^(hZak3Sg&TvD1u@<{V+F9->sYD4TdT>Ee! zSL|u3zSngvj(kJ1{{y8p>lGjn(;%psXeFIUwZ)$0VNy~EIMU*w+gppyGjA9Sw>#sle?NQ1rXf1xAsxkrrH)e zvH995htPZb;o2Yi!o+=Z(hJT^v*_WaMI-X zd1t=j)8eu>N^c&6+v6g-Az{Dq>|w*>E;ts8v?b88#?=89{IlmqE0=N+Gq77 zSuyG@gVsCA3!=5Cx*!snky6401MjsG60uQWH8~PU1I%w=V_08MREwA|0~W3o`xHV) zFy22H0}I2W>(GoUQwW%CPP$>w!@cg9ua+*#cWZVzWnT(5x?#Drr_;SFWj=<0x#Mf* zZyg`L3T)Z{cX_oS`=4Xs5#f>q^zI0n~HqOJkoya+r*AZyWVZj8?cP zA^hYzfgj#))#j997RPc_IIZM2Umd2R@`5t3*~mAKzCGnDiM>jHCZ=+M1>NA0EM@qR zc>b!p*M(c7%{S`@0*|a;%A2iEtwUq?bTcQn*|hI6E>g3*mWa8t{zGhzYqyI@O~g=1 zsmi6ovelwr7rVHoUt)O%02x zBd?Ak-8vt$Q_MdtL^H>!MxFdSBlqu>(-ONUR047mtvipXlN187s{;f>F?cRY+jrHc z1}CeBrEacaG%MHL(QnoZxVZJ>0Q&!k&Ta?G%UPuvD0ZMUIF%R6yNl3&OtLSeJ^G1h z3#Ncwk@RkOlwL^TYHgCB5asUM4#j?&zMNd#L6;F!f!m4w^Vb8!iM0`?B3bC4E`5gq z6MifATbS7#Fwz}>EE|9!u5Qyt)WivoM)KtkhzeiNU0b%;gojOFwl@f*^Qt@7VrTjR zd)}<7A{))IBVM+2HK=G+I@o$;u$w#Qs(6Y}W*z0A=u${2?}LNE?`5kfYb#e-1k2n0 zscbT!Y$YUQVKnJC*`YQk(~FXs`fu%>AFQ+gfzwI?t-V9AfJuqm})~SNV-%}Q*G0qx_Pyz%?Y!c7y(^} zYo!p<)_iI=ROw?twb1?0fmM2LgwH+Pazjj!?^le_xrI|e%yyX2#*9Qdixu(KVwii= zDX#-Q>HxOzG#99J*~b<5=Xtx~;L=UByWTs64@c&P?xZm%^n92`M=;8tZGsjL(JDQM z!}n%M$yIk+4>!Zs4|^``XCFrvMN)r*vj!>< zVTvbi51~WHxZhWK=Q)r-~;rxs5;<9d4{4ms{RE#_tLrFq0-`7{43UY zbBRE#h4~fwf1Fswhkz4e`ZbubVc(-T8$SFEORaQ`O4CVZ5(oDAEas!a41-{0PW zd?^wM&8&eZE#XO<0Alu*g=1Xk;xTUg$DSV)U0yTbE?ei_4%Q7CZrb>8*B|-?xj~`%{fBI>`QC ztDpZ~`h)iOf8Nr7FwE(#mL?^@=A*(HYAC?88QFZa&s$)2oQcYD7_cIUEKzr-+upnE+45q(R3@ za4a9ra3s9H-2bj&CVxDQCWIgw()G780fNgYsIZ!zX5^ol*bc8ACD*=xP|fS1_6H5H ze97=@o;AXbP1P;MFm(Fm?#GWzAGajA0Dx^zz5$#IZm#U^@?CDepy-aU&E&9k`NY)V zU2SrS_$e{bSPJY3BqXkjC)p2n?4J!fP+(fpF2k{sKD`0dG?|$Se}U}lr%JxDHWM?~ z)Ylf=wxYd5h%8HLL&tp-Q~U;p^GGq)iiqd^{&Q!1xAF$&zkwGZTB#+t4&>zU4S!(` z(I_kt<89l|$>eF%U}S(@_TAP$0X*G3ZTqE!Q?L33Z+~46LUEq7Nhx8=7P8=N*p$3v zZ&2>^Os6er00}#vi4l03SbYZzeU7AIyW6k8s)@%=ZxG8+?<@+LKc5@$m=-7{6n=iP zk~9DpmefVF6SPU$m5Gt(d@W|LhR@~RKcOpFJ}`6wjnS6!^%FCqc{Gx>=#S}1ol1K2 zy98QE9z`jdBGo^C*{YsOnSDRGF4%imgX3J49s?GqM|wo9fCQV?R?@Cog1;gRf&{WI0D z;9YLmHH8zKD&K+oI22YPofb0bKH}nS6h67K`Z$(*ubO}p9_EbDljEja8bL!HfcN5L z!HodeFOcE2)hyZpj=ND zS&TXdjj;)OW3(_I7Ia7AAG_MCQgXg6UQq)sLt86JOiuq~3C^r8g#ru5g-<|J#}9CN z2kLmH^Xe~&&A|8 z4@K_(iZihsk`(u?Q7{0XhR~AHf+|GOi_KEiX|4ur!)b?Tf1?1v=2M|E&4mvTD*Oye z8bMGKg=l#)UwuihnPiR{u6sL&d{IxAtm8FFw&C3Fd-j(UCqso91A_CC%$()@d2U?q zL@y#a2yKMK@t(J4Dn(@a%~-stBy(!QkNUcrluDDcj~}$5hsqexCxamRp_SDvt6kB0 zPi!$XZd$)IO_a;Cn2kVa&;XYgCOTS=;=k8~SIMDvJ-&H^M`FhO^pF@J z(BJ@Gqi?fefzfHFhMxYh=HdwjVquVZ-5F@u7{AS_7Q)SScJIyQhQzMXIzT5#!Sn)o znTllmnq5Xi>`cT9N`1sJoNZw#GgWYab60q6E7!d@O!<3p7g{G?*cJdLpZD`lg@BPY zS2%=iR5gxM_C(F7vthvN6!~54HBM`IgGFF(;nJ;32hN3--wBTAsZD^F+r}V>^Qa%bGhcxRDylw zXpbC+KE zKpyz_6#Bl}^cgJFvG`g<+<|<^{!)i}h3f%H1O|>U*8|zJf{;)clh5XEa-+rJgnfkq zYq`se@K`rd|6CsoUKfmG=j#0^*Evl@vhL|7oAbE>TJ_{~No;~SWzffdx_y*>j2xhC zFy~ZS({tx6>RS%5v^xjQ@ zS;!gweMYutXy{6xsIqz6k5_zXQiLRNTZN-)4UQuwF+mZOJXs?s{NoBK8yA;`#5rwX<2i!bGcc_jh(=cF*M z5E}Cb7g-7HMziaqcGMeZ_Q14)B2Lh;BP8<-K0;lPCl`p-_$kiTxYIntAD#$*L1z4+ zbZErq_3~m0%QFeHK(Qo;A_$jh`)hyZ&?1-jtBx>U^jCCaHCG(U?vl1FN?sO_UE?&; zb#7UadA%8zEgGc>M&l+a*J>C80q`Wk2~p$87>&wXVe}FV9feUbF`ur^>-Me!tCB}0 zESaL6A93-xTxKC>=zZ&Hqat#&Z?fo*-?rhsk>l6YJ8C*Z0V_1-ny=EReN6H4?k#VL zHqj`y+!E>Q~`-lyxo zxF%(qrdC@f8BYTEc`YJWA=^;&pwMu(Qps>IZctFd^SAHST7gS?uik(?RJI}mtHP6} zO_c0>5BTFb5K)-@Y`XvQcD$T6Nedv2p$21Yc z$~O_y&Fik&jXj?7q?;!cBbHHN)?W5Gbt4J5fC!sfh~wOr=iT(}Hq^3W4I=l=*?J{X zA!obNKw;cATRL>0k49#coD}o|yQx!N>2|fmE#D07O~G{F^K5ZL4*erE!JH>;y{Jkz z_rtc;>x&D+tYQ*|Wx~Iz*HeOFE;s(6Fe1VeJNJ`0~*EI=wk4KX!| zu7=?5!A0&0NYqe=Ho@dvH`M{R`%PixG@uiE4n3OUwhduVbmwd1)GP`^pQEhhTx60x zBM?kY@jxG=GFlY^4y+1h5ZauYnJ|?MG<)8fLxQ%B*yNyWHoB)=vYyE8R9KJ36dKJ* z(I~<+3LGbb_Gz82+UV1x2%z-cT>eXCXiyG_Qk0UFq!+U;XbhDK89KS`LWYOE(lR3 zyNyPyDAcFV@BW%Sv;604LoNZ&b>2Ml^LqMi*4^l-u6lbq~{hl;MaE+dARO~`7y)lr~AJ8nzeM{i1KAQ z6lHb-oz6C-Ip8=0+U#eijd-5f&SdaQ$D``!x4e{`9TgXa?=qiJMklhslZbqBCmbjLNZFBeVqU<${A~;W0|3DIHOHPH2NO89O&>^G$Gvq((sD4J-O9snuFDv%VF=xEvTxBeZ(Pk9>w^o4borw*gZaQ3#JP5Oodp ztK8IvRik{l?>7h02-BoKX=u7C&fj4Zavks+WX#4d*EEMgr-Y-n&kY5o+Hsu34AJ0h z);uW>(aLB}RDo&hQRgS3<^G@c-aahpd;cHbK09YS+iHDwsx?vTY@OAXmGcJ8`*W(< zcJd}?YAWC+c?Y6X!W$6SPODal*31+Uskt;oATvZzajOZb+++%G1%b+h1cell0MQ@X z+0Oa=uCra|`d-)fy1svW_zSr7#q;@mJf63&*ZciWi87XXm0C9cgxz?;c20xP=U20) znA5Yd%Mbq;^3j}3;=Zyt$7Cgke|{`H3_l~rYkEZ!x`?n|9ITlDE(8L@ZH;)^Ft8h- zNw@qcQnkqNZxi2J_**JX%$_5f$z0qY{I)!wFP(Vt>oxT~&qRhx$v=ob$|kPg_T5Vt zTKleOax;y3O=Ry9bGp5FV4vqwceD3Ja<{!zGqM~8o%ZVU@0}ckj!Zc;fnie$S)T)2 ziggJ2p<%$uk&)P*UP=!@%(SC$*hU+jbyY%aw7QSPSEkXVWwtXBg!^BFeY9Mhysd1e zoe--8xof`b<^iSJCS6~+(|D!^orW*NBBo61MH9~;d;6U5ks9E9RN9JaLu@`RT1jVw zN8h$6yG+=nIU9fI0W09q7Q*b=X*01W<%_xohkn7`l&ZebyL4wnXzhK6ZV*Wqxzj`2 zpoFcb10`fCwLnrhzk`!r&ieE|T-I=+2iy4|o7 zP}t4f;E^CQ*FDR5ak>WBmhOmqrkcJJyR7xS@v4StIgkc%_LNP!pp$Tr($_!JcCPG| z5q;nO#mpVgEgt*^sjR{8EAsh!%b@Usuly;2_}9K^(L|Fs@$rv&fB$C6>&w7fvS<6! z{EDlUU)4wzp+kWU*fJjfCl&fpC zGJ@ZRClx?&O6x{F*D#03s!x09|0$eRFrvd>*@^GL4oFNwr3-s*Ef6@ zfz&mS76pZ*V2wq%jYc>D^Crrd9Sd{YU`EG$kO(NWs7AVpw^;2tk<67@olLhFNSldB zn`5i!Y-2aCOi&yR5Fbyv=YN^+w1pP?adIZxrW#oCQa(P{xdOS-N@nR8Bi)+bo~d4p zlrkoQsx*Qna{&swQs`F8eLkMOO8IE*?Y)BR$L9K1MUbx-fv4{TztCcT-7e17GBtlu z&F6m)d%>^xS?28SUXuN%g|PzxGt4MpD}H6cJ&nXUe2rnIQTTRu*L{zn3uLTuPbTyGz0hImqhE!upG1sfInKr3V8J?wJP1{LUt84=S0i0yw=ZBO|~< z({xXfE!iR1(!6skE}OfprkS3PTUj#gRE~vw9!Z#th}u>*nu(?pXW|WkS1bW@uV{jL z-xJN!H}sBY% zyz!UT<-Zfq6t3O+Y0cZgI~n2$`cA$S^viU5%Wr~U-@_a3yLb+EoUu$Ad8N6s{#iKE z6=9zP2;KDS@s>h%ECJy`y~t(G;cfNdOE~WZ`pIE}U9ixc>_n-&u)IIXl}X%=hC*985K5nS*34-CaPQB-XwC&&3Y?)?&*KtMX0x z-@x$M*qO85^Ov7+^?}V+aT=p6gSYs^(Hu)$uk`qNE#8ycqK{nKRvg$)(?xxzefL)1 z=?%j#ek@D7m;ct!!C}=PyY7{9eoeICpK)~k+P(fQzXoN+&&ke(!HKz3 zzZU!9Yl-k5FqQp3C7Nb;rZJ7Ei*4?+Ym=J#>=CisHq2sAKppy2qOExZ=ySd@LJPGn zoI0SB+pMl92QG`Ji7Ue$BP&b!QjaiR4AlD{{Cnp_!*8KueNqk?VItF@N4En{mK^%2 z^v9t8?0-Ri#mB$i`lQFixfXwlD{ziQs}>(JnqXXqD$u)||X+dIr-e|>KL zcfOXy|2pVKTL&vr`{W~d8c8I9zw$Hhf@6OJGo6$VA~$X!;laKkm7YJl%+#G9YHlK$;ilSf>$N!M({XD|-!r(0%DP&8df$XqWEx3g%_^k*Q!lGUROF%OD@6Y%Xl=P8f<}#Ig$IXO4Xg&fti*VjhMbc| zYfAO95f1IFB8yOHm&Zqp>v1ZPE@B)_yw%_>H{0!+X;l2-@gP*Q=(Vm?mP1J;U!VE6Ev}EvqT#Iz8ME{MzlIG!ObqU^J zf3mOzByF0RKiwuurLal5h_rG*mU;_Ms{V?C+|6bCC`l7lk*J_Ei!gGZay)4}`oyVq zauz-v3?n+**C{`QXWfy%oL9#U(RVVbGuF!{7pbX8-bIH?u*&!NL!tytlgV$SgH|G3 zIPcDGVm^HPmk@dN{2o`i@ZbBN?DL;5mHf{2y#x8|zdgS^>ioU)Q7tz|lsF+EiRrtO*^I_tpVIIT9^l1osn8aX*IO8FtWSiWbnbT;}M~^GPpBl-7K9ejW16V?)6W4#_rY)3IG@6I+Bngg`+dojrkTI1 zZ;FW_#hc@MCp*6C{wW5Wu^BykIk02{N*l0Hl)LQc^y?T_Jo1x0AgX38)v-(aAM;)f)rH^F_B;qg`=H8&#oQ+kE83M&$W`Yj-}yg??^7?%TG5 z`!C1WsLihCcK;u)PTsq#3)<*PF0-tM2Tr{3sb$LoMIdr&lTJrIIn@jW_;x#I<+JPy zuuUhH@s_@oC*xNimbN{?{%ptJNM?kNS}xH^jR*@rz=2k}vMAo*y|Qo{#0%y~`dAGP zb3Q+J9TeKk{6`=9^?(0H$_hOHFICIeu&!T|{=^#Lp2y{@=e>W8exCnQcs+Ua`o9U@ zt3&5sUa!31s)ap4VgA2-v-;-D|9-n>kN>s9b6WhrX9w?{4a?rr7*?s+ntNsJ`rRHhYsm+_9Uv(1F?kCOyP9d7Vd+E;l6sky6*{|n z?N_#}jsS|X7w)Zn>hZYnW_tXg$g|OJQ+5qPPjnmv3KRXT{^rB*IPv?9xD?$enC^3p z0F5=w*h4#~HnZ9(Utvv#AbJk0#q8BQRY`^dghp%7IGYKK7x#7$2-wr}^Wo!FUgbZK*ra_nCMWO z%%QB`-JDgV-NnarGExUR@+WVxV}?u}YxfKO;iDJ-HKuvLl6&dy-A_7peS$uM36j{r z4X5M4O}Sso_ygwaH00T~q`m8DDJ+8AP+yujTQ!O5iZR%7BMbTu8u$2L{-nuI8|1>z zzzarK__6rFU>L^@)99dN#i=7<0)yo`4KAjmt=yU&Is|JNGOMfm+nW5&OVZj!kP3bg zDRW#YVNQV$3cx#AaRyzmKC2f2HH8&H^KUX1Pfh5KYy2K#a$OoAVEFzh`5Fk26RQ*2 ztHTG@?#TSK@Q2Lm@_PR(e{_%7ZazHrS<9UNnMl*;UXOr|EaHKf5=@#r0TDG7cziP*xM+P}_o^vL`a#(WB;H(#AK#(}(DkRS<3+RQ zOgLKAh@4T*l+}02`8d^G*L9VyS6rBNFS zWIgSIjdVIU%5-&^%8B8IMw|9l*>@?L5H|nvh%qj#>fRV;+2fP|u{mVcuIK41wYhK_ zQq!5K3gKSFAv$HWPC03YJ=^fy+6Vuqz<5O~VH@6AfumyI zsIm;|xPP3wnfSO%J%o+6s1xqz-L@-bh;|b2RHeE|gE3f4 zk0P194R9bl5F0Cgl9DzfHQkcFx@zGE@2*Z+-ym%Tt$zBl=5^mAk*B{&AIyk)Ia)B0 zgsS^cc4@e$j)mKm@5|)59&-|RRQkOg(k)>Ls@GlR=>l%W8Z-PgTIvy!Bmo|jta%@@pwkH?Kr>&xE8F;%9bnKlLOscTPBzHp;-3P>jY)i z?^Z?sVt92bek=NwQ+lDEOn9A z05pvAEe5lK*O|Hr{Z@b0P;P3o`V6m_zzmYY<4v+`C{R)<%7QW}xfGlNpXk^x2Qz&f zOz>^w=rdCI^V^UI`mGj|_nur`JI(p!mP-HQo$tV#>QcRb$EeCw_5@N-LKy74 zmE`6wfaUWLb2IXBL!;kly%|K81@$yp@Al}!0<9AEPn9coHpCE3%@O;Y&t6&nq?zQm zXe2JgCUJwBdXQBa>Ro*cVLB7xK~~@+5N?BRG;g$JeWLBIO@UV-RI&5RB{7UQOY$V zDrqnqaWM@VxalFJAsUR>sPk(kG#sAIW?Bo?lY^c^%eUF*TEc-NqKWbJLDifyAK?s) zY?M<%k$vUvEM}WXolzPkd z;JR4yh%5xgv>6R4X{!@iY1H?MdRNx9Y%W$eBkz^k_r*;-9z*AKHzAJ+!HF?Gy3tIk z;wPJ^g{J#7q|VS=Z|!>IJcZmGk7pJr4a(%V^7`a*;+@n)=V6WY-h{dU4{auB%XROY z>`+^h3Lq?B<|8MhwfD$+Lb6w0JOocAPVcNavHTu$QBFto%4vo)mYJcGS~P&9`xe?* z$IMZw3&OPQS^=U%T|m-V(r0=c4Z2`USW&bD1H(_IO!~DzSuDpGAy0j8nF7KL;+zRH zh)PufgJHIJ#vuDqDaePPc2^qdO_fT}?{yGXyCQ_ep`9#h*ot&SrZM!gBt!iqY{)_e z_Q4gxp#DU|mb?+Jwqv@db=s*+V;S-y0$C&cl;*S=fm5*+HubX=G^S4~TRg=Un8W}H zYgT~OEB6hqXBsBVsEgz#BrOA8k%l*W_|?DwIg*=dlJx{eyUxm;L{l(&WS-Pw`H02> zCuppS>b^~ zkS%JIv?WHTpG74(!Ygth)&WazgHY0KD(z*O-pS)0%1Sjoy_TB5v{sgf~;>>rU6V2o8!MJKE^r7*ppWn${JB5AvE70%UY&3p<__;mgstE zy*)+f!*1l7Va1%K>dW~MUUQ`vtEbK3+FeSBvNTNA#zh5L)8-UsVn$nNL6boo*sxO$ zI!EuEY&UaM4tatGj9w{3nv4wY3F-C`FFuE{%?G$HUcTm4nfsI2D^0mrS^8_CGS3(Y z8x|n)leIjS+{F_#eGyT>mh?WtS%Pk{jipHCVMh*8l{TxC7e}EtAsLT25P2z11Ktld z(su|4_S87A5_;n7HLqFSXUQYD<1h=%D(}W*_Paa*X>FHFdjxLXC#KFM2K8Su@0$zN zsjMER=`Ml#4mW8+CXts+Svz?^CnjYr9pv{;EFMdDrG$j4(n>FNT34cvOa}@-Z>UDE zboVvm6R*WMMQ+l8-l8dQmGc1o7eDDBhuBYETDfwG7zJ`(9B6M#pm(LnGQ$nMNz6SH zR?vQ8*Zna=>1?hSQ`{qWBk$NDeORC^B+R@={ft8nr0HR=}4hsYvi*=}jh# zlQJ2s3(Xn@$@IJV0@17e^Pr9xhGZJlKhJ|q5}1u$*RvABS^b!xq*AGQqD@+_9j!fr zA}}jt{1g+?e^J_oW+yVFA8evKUe)wL8o%oT%q1D+O{I02NNr75i>^5BfCvm@|NmP zW%Yiv~R6)zf=)rFrtK7&Id4x*YhnU6EwKlLS0tXgnjqfPSrCPq=OM z2Cly!&MHnz6YfavG}1G(zulWT{{FvL<-*z4xm-RX@BH{@nAErlZ5!Miyy3}Kj(NA? z&%SwnHG&(_D$+#yjU2K6P`RL$0zd^|Q-A}J(U}1aOqH3eQImNmwAlQV{`>2|e43=C zj{$DA=;F}(eW(dwP!=_XB9U~1xlM%Y5jKnMYo}exp4l`cwC7$+RF!yW7Ur z=##)dG~qyT*^^Pvxnm{-eAa)hvqF4~v$@|wRz6`^5GvYwQKHx6O%Nqgo2m6{;!jp& zFjD$utpVm_Q*R;aAs_hf8kmN#sQf4AOJz=;(BSu8X~np|%b)p*35 z)bzQD&gLp}kezc3mg7e*hAD4lzv#8;@JU&=Cp#b&HMry4(1lq;o8cxVjdrMxgYXOt zhxf(iH<0crQ=@h~QCyON&gD>xR^B?0FdNEY2W0yh+Y5J08gc6jhI%9RRrWRjlFs`` z@FBRkbmY%3EmRkNg{Kf)G0T#eY{5dB=jWSu; zGN$ytIT$BLOo$)DOx!Rz6PIzWb@3|su&-an@x75btZke~wllQy5+v<#W@}Ueg=63m zUMBy9!0~CAUV)ZC0fxix2<&aH!9X;(51?6Vqwg2>e*Rc7u{q!iYE^r;TRaJ&WQ$(T z-xiYSB2jBA7}Pg!EYhSHAdCd0AMi3t6`SoT#dPmK-5e$Hn&pd{GE=kM0J(GgcX@2` zmVl!lf#X(D4>-|LlyRfe<&{>@ZQPXNHlFnktbpsB>1+qg3UqgE(`I)Dx#l5O**m+H z(-l2-P=M0jf4j4#TN38WIXCbPhN{b=-uMK0CqU?~oxP8hcu=B+ot&saxpgeE`4h9B z_{h&OaM0hi2Dz9LbhUPV|72S{a%8-evjY%eHfiv|lxJ_q4=GDa?bt^nfst06}$ z7lq^@!r}T5<1u+_0)&A`DkWcNOX4dNHPcF?*)QP;3&cy0-q^xpYff-Hqb>wxA##xc!@lU@z@AE+y z`}(^ddk$H2Y&f$PE~&QnnKvxY?K0LPAvzojIYjI3ksoB|9V+QPXgTM|5ku#|%=9%m`)y*8nla)TK& z;RAF@9Cypbv~H9Z1*Jc-{88p*H286g1EI>TgX|JeYe1Ztp{z^VtND(R$s|1(4hA=I ziaQcdvxIu-3NfuKODUp_f()5lL)7(IcquQ$Nys@5lsE z+&C}Roo-Gc$ztz}zORKaYFhMNaApD38b@6$&bw7}9huMr`wrVb^1!vrY3Z~Ylx|s* zHyKKWe}*S|m8lT^M}{C~$D1>&x}_iwB*QHk_gnKPirdpdm&ZZl>+@a_Dn4letH?q!+O}YL) z%*&->KmmZ6Rf~M4fbXYw9fgM`M$2vMAr_gwp|vj{7w2M}%{*P8s#@&8^txa1J=pyf z9ShCx+pztBOh8u>dV68ow#f>v0OKSq*aB+$v%TB7m$iA1(5z$SapWc^V=N%wXCvzS zqN*FzEFX|3OAF^?3>eia8D||Q{5#+5fi(*cAz4TVPj$q|)_ciy3$@a*^}~T;fqphA zq1l2h%lLa|ihFaIhBXX-r%`SZ_%K*DMPsF7YkHV*jh|0?%t1Dju)Pp( zJ%wPOmNQDHMit+%Te^KSQYivPdO#jDX&}Y@I`R&iQ=$AgdJ@3gE&#}FP%944p!*2X zZFLpEa9wLWEK6R>yBh4?@_Ae9=A16YB?}15wuGoYu7RLH~C>HwJ`n z?jYxMj0TZV**h?dbHL#>zkJtP1QnoTfsEab!D*~trVGF4oTZyfmY|l(zcof=2W5o{ zi9e(`U(7iizH&srI@(xxrH7X$&(siS&=ZfB5$py#Q)+9dGPpJybUg=ypp?ZSP#Q~G zps+mN)(RHdu-p8Mgw8rsaAozz@jwgYT_E02%FTo2Gm44I@sFs;GN@Zpn#EGrA*Zp> zrBLR8!8#u4gb1A;NKavJCi7uIZ=|cM>XEhe7XcJR2vdXwEHbhL!Wsr6{7fB) zdc$?1T$hH_25s^?H`Ne+NWRt{4w`~bnoEm-E}G9(Kbh0zkfo@zVwlVSP!J20W)9m4 z#1Z*{aezjGvc7jAXV>;lN^9$MLI3@)!)%zlvkiL~z=bzMDD9|<^g$e7ASkeQhyzQ* zB+~o%$-wZ9P+Vt2bNjHaYA#om6iwBYQIYfL;wZcJekT>oOUrv+*VmL9%T_3G}% zTxN3At?{?t*e>2tH@yRye@c8WPoP&@dUV}+o`5q>!|s)p0i|vAwI=2LKmt5q7%!2V zmy-W5OZzv<)Lp^8B#*o<%&9%+j~MxevBrgFt^tf*Ur9wXkfWLEEZII_SSnh&oJXPI zJA5-!>9mcL?I|lB&vYNRll#{ttWqLi|K^PWAymHx_~eLdj5SQTVa$rUdywJCKrJRv z3aT7*8CKu@oDRzn_Z=Yi=x~ zyIVIn(=4A2&3AKm!_%LopJ1+&UZcI-S`Z$cB>Y-CnqooS4Q2))v#NXxLAtVwkql|k z%|330Bd9^F>OWpjl8pc5(g)-62WyO~LX>fBSUWpz zO%=KrXtuqZH(zE}cA>&7Nojq%&1g4`eE#;!Ie~vs24z;=s#LMw--!lCCwvYc+c7i1 zIv{Z>oH>>{T4vjPgDF^Q02{zqrL;xfCc)=-Cz%z&Rrf#}SnAs7qcnzmZtU(~qB|0i zJWLu`QY?39>nhG7p7J7bjObKWA1oCZMu#m9b}KQ|4-u(^vs0;Db{c8KANEmQx1Jsi z2jDu00pu#7Q#m=m>yF8vMi3UoxuU&{Xo!1fUPhgkaZqoJ_ zQ^m5nnl}w=1BeUBsn#LIjBi75 zmnhXWc@|2za)k z^d$}EWgY5wuT#GD-PQs2-8u4XQ!>Tr@4Jyg)6*^PjQL$xz zX9^~qxmkWmPbxpzPyAD1+LOwU?I$m;{E1RJwOR(_SLO!T%p}tD8cE%3)TOYdYxR2A zCsY<6K@xClQu?9Zm#o42twM9?A%=D;xhD^L2Zw7&k&3~pZH!4mXT|x6JCi)7(1@F< zb$lXe@o~opJBTc^f zC$C#DdhChWJWp#_i*>-dni+%B@G=ky?ae;d4SU~&`I`8ahZQ0o>L^k1PFoj4*$osV zFCs8JUnzItT_NFi!kZZ{%1`|`vfZdH{~%UU(bvAzJ)Ss z)6?bIGrdcZd4We#XQL>}dEYpahKYfI5#U%K_YS96&5vPTEd%E`AhKzXFf9SZ0rw%4 zQw zC*>Vq0c8ty53ugzchfq2sE#ef56D-24PfG%^ExLo*#Fce#3oij> z_>Ve-F!)w)?X~h)A7Ah(v8>E55MA*qtC3G~-2#daVpRXnzNN)1OJWR{s1IwW*olGe zlvzazC9Vce1hZ}*_?kox~ENnoIL>VzWVt(=? zgBe7o<$SC76Bwj?dp0vY#M;}MZ{Cs^IUR+|s|n1+^{{3!md!&347krk=ch9_QgDd8 z?#stU&;dXJpXQay0Y4wm&v)(9xP+gbpDn*xDTfA!iP=olo4V1Nx-f%;AXfs(JkNoVIgP`K1a z$y-fpTDPIq$hT)B0PiNoJ@;DyUt}ow^My*d+t9tuL7fE#NL^DvR0dN^;Y!4Y-9A?_ zq3RD)w#+jWYjo9zTsA{eHRq~ryY7!h-;~2$j*eR3#S{SN5lx~{ozyfpF zOfB2dRom5@h)GFf*EM6479UvO-g>94d5_D*&nsED33>ASA|!iOtD|5aDw%Ib6^REn5%xR|57|;(%y+uAGj+0 z9OGSsW4J2ry_3c)gOomrwuV)sH9b%AUmU6*b)u{S?RC4r=6~4L5FYbm66$VLWXxXG z{Fgi{VrA(Ggy9;3PcGXy+{0@H_R1ntkn?40en-Gs%P?7*mqB=q1Vy^~1t(>C&u5&* zAVOmzQc=$Ss+B)H7TG2~+V9!%0iBN?XJxk8H%}dZ8@ETeIX*8SN7=9$drdup~9`u*72ZH;4-?Oke77m8rKHWxA1~>P` z(H)k%XYmkStyn#N96DdoGFvd)7{(UxA}H;LMb6E>9`3&0I?k7_ql=w&?}hjMKGgl< z>~0i9)0rTd?GemZ-@P5#>gpzV<=r&K`b0l>`W{zQfwghI%=m#9eRA<@pYF$IdX4AC zx{kQ+x0`Zq2W#6eyhsV^OE$FMXz{v#f#J+6W9oux4Eu~=G$}kWCSprnBZ!*BRcEU6 z$0SYqPbY&Z?D4>JT+3Q~F?*bI>(_>v5Ka@cuLW_(H(F$OJvw--YoR1no!|f3YKQLQ z8%d4VWX&l#6#&M65nQsT-!neddY3P~+V0pqRaVh9XW6w3T|ODNd{OG-d~<}`Hn)xB zAq`@N>@h*KU?JXl@o(U*-7&7A5eVTqv=q?8)Y&{>QrxSRP9aAW#F8TsVXUgGd59Iw zOj(KBD}5|S((^Z&!*AnWwfdz2*9}3J&4(<>ljaOkx_Za~@Q3#SN7^bniZ&JRb*MGj zGtN`iFM1t8QuL@4>Dy3axE_NyO$D(fb4zYX$U;UaUDvHH6*~R&v}@yUL!&-*Z}B_4 z+tKkRouAV_K8o3kOi43bD>ZZ(Vuo0S7t_eR^;8w`#9+2m7}08PE9W65EEZYfX@wem zq(oB|Nm>IwSL$s_b3{%;4zveL6mhPIgCjP!Teqn1Sd6+*XU^{W^rMSV~1^hE54s5Otu@A7TfHw0}MD>a~iOO((V{ay@&f?wmnmt9u)ZX zt;(O+1@w|SZi8W2F*=LPcJeEjSAj07<|b5Mo}Qz}vPvVI9=nm#4R*es7b+nLz9v>g zT9UWd7w;u%wGPft9r^we@Fx0Xn`b|Dh;I!9;fN2zASQyu>|rt2mFuR0TjMpj(ijGM zB43NG7+}x0HJGqEPa27Hjh&n|vM1VCc$2n+2Ji6UJ-!)qf+}JbeYF&0| z6Ma~dww#p210z1$F!@6OLb4N>Y+^fnCoU-t?Xn< zCUO-Af+T@%>8(IcU{Mwq8xP$rPau6kgwdyd%H?;~3dg0jH@*r1?*1vaiv|veO9BH} z$m#3$obNL=+4acx6NSkk=R#`mrhNa!<=eO}wXD-wk04N0nE8#xb5Nitpw3*eX4wJNnyoC0=wgj`HMw6 z$X7YiETr{-_Mzbx>ycCO^?Er0<%H!9h18Ua3(}gHh|nS`0-^(t26sx3#mdQtIl}yT=tkoW_S`;6EB4{Mr)CEHJQTfxah3MrUyrKdwePtW_zE z)$(KHD|L;0Eo06y#m;2PPi#z>tB@ZRMdsJtFc4@aiydSL)9*u~X@7?MX6T%oQ!l(3 zY_FS&-p&E8UDUU)8x@_czN&aS_A&zjUi=xmD&%-ig2oJu(3$cuzQI}W4k&P6BAB_q z>#~*sHx6y8{xL+jWm8oPUA_awck_zJ1e{8Z&~?Sw{ELxlu1Gn$*%#AG`{KEpO$GYp zfLwKJ0wa37Oetq?RgQ`4%?Q*ZMOQ+o^f+3+vLmmKhe(@Ck?H+PjwirRVqmEPxWT<& zi@)3+o#cK6LI1H1Ico73vNhV<=4>}3eN5Fmau;1AfYa%idiH*SVOQssV~aZ0{)!$! zBP+S6*RlLshI>mue8!c`nD4LZ3j(tp5YkAwI9TY+Wa?@mLt2KtL1O@$E{^X`@VevL zoJ;ASP<)Z#&@q1{J_t5{U`2}fQVD&M`a~5!Llw1@;YX> zS^1RLphK60ncf>EKFeQHO-olyE0l1DiI}y0C)Vh?evD`+*0#&d!QsM)m1{(BvSxX) zxT$YtWE*v|b-1o&Wody3r4BHpbbj1!spDL?I@C$TUOs^MaeYqS>bc5KK8iWW-2d6% F|3Ao4B1ixL literal 0 HcmV?d00001 diff --git a/docs/imgs/ymir_publish_image.png b/docs/imgs/ymir_publish_image.png new file mode 100644 index 0000000000000000000000000000000000000000..0795b8f4b3c222c64afc75940ed35007bb4f1984 GIT binary patch literal 30183 zcmdSBcU)8V+dhugfoN4KL#skXWtaeh3L+ydhzf`xmOxlhFdz^>WXVdbf*>eZ37{aR zsJIv*0m3E@VkE2@$_Qa72$2yG0)deAeFw1Z=ULxAzxDjSf7HHCa?Z&)?{VMPeO=eh z4f|8azm@x5PDV!NTdNaC9b{xy(Pd;-ie%S-PrBWbl)(R1gg6{Gmnmx5F#--&`TS)2 zlZ*^8b-nQ1YH+;n;t52EjLgP2(*IV_f}Wk1kqL{jI{MSkmpwQlN!lJ?OxN6$lSDQ2 z_`C*-=r1~ZOz9zG3Co@BI2VO{-iuP`s+* zxQgtRPb%%XGAXatA1+p?zq>kg<++t@ZZf9kgX9&(MpwL7n17S1^<*q9-EQ^_)6UVU z-Q++zf*)lW^SdMB9-_VE)>!wP8G@_veOH=K>w`nB;t#1e_1qAnPwd7K4bAWUdBXLB{{ttS*^7(iHzIXfXsXyAGx7Y?;J~2oz|^OOg{od9WC&5a z6UOeOh-dQz{P62mng_nVP19}fr}pH1N2wgl2RZpNhl69uoJdW?o$eUzvFISL0JGb1e#DivzS(mmlMU(IBKRt&YXLX~}r2x+;;vz@l@z>2r# zqATL`zmC(}4ZbhpNu{qLRx2R&n7qVv^;BdYO^`;@h0x#H{QZfZ_-Q5!e21K2xB${a z=qFnG<9S6S_p%bZVe+Xw{rbP0J6sk|a;UdXOCW8gW$+-DrKQ?P(+m&0fBU$R&+Rh? z+SPNR0h5i_Tpi>*&$(+h=5@X)UWfWR6gRgq#uexcIl`X!=ZWw#b3{~%c*ZF)FSc{% zoBjWKTFt`(-K#$zCETsZp%E%_67t39s)tYtPCt-F6kSv*(A>T@7o89}J(oR)6}oab z{qt{;ftW84EQNndZ0617>#7)F7ox@RXn`{3gK-v&zjISI^7G`^vGUgEGIF60e_oYe z*YyDYYb{+7W#!v{P{cj3fp7?Cc{XZ(OS49DK)+ud?~AS>jQg0=ARO2FxSLEMBXqsS1r5hUh9&G5m4DA=n&e+KbD5%9h+1_8$ok3S8+NWm@}A zIXC~yf{lD2N)vLl%-u&cXOfK<4w#&xa?qBYF=9Bk7XcMy_m}J-z)w|w2Q_!8l2C`M z+cE%lnD*=uZ!#6Ct1{ZvxiQtkcz)cc(>2A21m%jybd{+3O@GSiD&avU5$w&jv z-jw(&Vg4PfN(ZD@5!@g<`d1Stk;;Rs4lK{py+5QsJ0^$X_(TrrJvVIpIPTiq34dE0 zmfHKE5&NYxiy}yEOr^$rmwEb0=T!PQukc>Ni|{bFIicYz2-a5A*=boMd)B|Dy7XZ| zuVFk5>^-sYK)_uO`$8?tba-+j9hEGbK{(MkccW2L`4>oatpD(<0{6m>_+kp* z)Yp;;7D2Q;ZJPCU89&(%!VmX|1@lF#u&VYo+Q>j#wc1v*+7B~vT+%k2nVgDBGsXIr zb$Ys1XN~LX>JZ%`twsiBn{~%}Mi`CMq*9I=^KC!ZlVr0ux};QpT!Z4=9#og}Sm4!Ec-93#;+a6ESAqDKD8_Yk*RDakp7enMv^olSqvlvOFY)7OG(VtuxV>M)}DG=jpj0`9{4oWtZAV0?&eBCq&rr^a7TvtRjp7Z6} zhqKpqKV^nmBBHTkA!l!PaFgdyr`rLW?t(+kHEE_ zbc@0c4>W^8DZ)!r<@Vu%ybEN7R(Kpq?B(x-E1P4}$eHN3dI7j(XM<={t@}f4gXLil z`6A5%Z`@92*a{-uxv}u;WJdkkZoJ5kPZXEtf_2dVb<}$ zha3!eQ!Sa6`TF*24W)12q=?cRVZ6!y9jBA#wymAS9Q}q)2omGOPDRVMq~$6ikH?=G1Y8!VeN=X>09_TfWci zi>iP+_Qah=Dk_B-1u5KUlbT8PNwXw~kB0j1MkqHa$!kZ~&6IoaoN%#obB`$pf84R0 z(C$7@v47iFQ}pM$e7m)z0X){NzdpFT>lQdg_U~c;|A-T6?%4C*ZVx(m{P=Mj?r?UH zL8>Zjf_J4;l`bA@l~n!-Kbq)~dvgm^8yH8S(O*8iVwl9t7aJ!%kKCDCAo>Z*OK6@H zbz*;SY3rBtd=27yPYx6I;Cd-oY(BGnWW-nR9d!?lMx)Za28eEGV`Jm7iDs%9o6YX6 ztrQ!BKRpCb=|LaQF>IH9lX7;E+^{vCpB!Y&928UCJ z+0ZzYN}b|YI@!wkPPs5Rp${rlS#PS?s3OrW=&B-@ExAxNgh{KaWJ)!nRf;U_;ikCQ z`Hux%N zX8L)xZQusQH%4>EMRWZ@6TQ`)CQ&mN9^Hr?yc_W8mwd_{D6(AtCvSSZNAgE7$$rTg`F}s&(9pOZRkiRxX&P8LV^l#m{FA zAWX0rm}-agcg^DLG`|5?890K+o_T-V)$(DNVG8q@96b9v$;4wQEyZfUEl=dRP@owd zX)b-f{d?Li5N@XsayI$(b-_&r4mC-dj{cU2#1PAvF-XqtO(-i#C1UBW*FUAllUmIL z1uT-wc!{lMvTXWF#0dyp&7R8pLdu14BM5jK(~g_&Fg}W_}~>$EO~(9Wv=Xcu0nskJ*XxaVqKWk z6`?F&Ixvs9Yt2dnbC_<95O+@_!XAXt>b(loaVx+D7Sq<|+tB8VZ-4!$rdLp$KKS(P zi8y`Uv48xbcTMhifsxUXd!qhIZrkAS(2({oAJZ)CD`98rOg2$S1Wjc07gm>>y<{`ePZUE;8%G0~D ziA9>Om%`RScd82}7jAt!P-Am$tis5&*2|JPez&beyf>TMDbId~*C6BB5n#@)WPliW z(-fy|_J?HrQ@BDSETQ0Aq((CW-MKzN!GJ5zhW3t z)*aBhuZSh@|70tDAgJQLYCn>bzOqy3#$_Y!8>XU@xxAGgXygvnU!bB}*`CrF#!1tV zrUfF&VTANn@s%$z$zT4d+;mr1qsh%?ydS2>I-pkU>QR0*i$dPiJP+QdP^#@9E@9 z@3i#}z(i|RV}9?glGj70CwlGT-C+4Huy5Q|Vx9TfhB0LoM;4lhxWlY%>R&G~N2ol9 zuYO^2g(_CX3WJR!E0S2EbX2h0NQ}s8m$s>0j~zRO5De>o`$ZN7e&gkBP`WHkX~h@c zzUr*QOAW~Tkgnb$pZ3}*=9VBB9aUF~)Y*g*g}jwTz1h38X-Q$4$v>nM7U)598UxO9 z&hi!0iXKzWNgjNT*vLAKMliwUai$JUuvuGGmLDrqe3RsGbM$zAmm#RS=98>IAdW~6 zoe)>T3!S$}D>G>Tyng<$2vH)m~kC+uUWbI0I*1 z*%dhiqK_O(zS`VGshs;R!`wx4HL+L+H{_t{)wLW(Joe!%d#XTOd}o0;iE)d$)j+Lm zs}qeJuPmLc^4j`!%rfOrdRA3Ab;J2|2hH_F(*Z}cjo>ZEr(v8WncKtGu?)^Nue7W0lxo=t#9gO zH?`z$@PG>}oD@fg3QM9vHzFb%6IFOu$x zIQ_Gu1Ii1@%DPq2V7cl1n{Ox>uu*gg9noV>-_cdfV!BK&e>po8GdYI|0ssLY5q-eX zeB1so=_dL^@3Vb)Xy_k3)e(eBtOOH0Fjd}zx3CAuiKeSL^;jP zq_00?rIQZtMr=rMiiO5BeLs$@anzQUeS+A~o6 zb4+f|KI(cguSgqWpDO1zq}r^|;Wd}jxOttgqs*DbRKHfr+??2h`)aM~!Nr}fD*mV% zI9YxE`%!7~{4M2uJZXl1zOqY$*9(t(Ci}Bh+zW^Mql6i2+7Qd3Ym`>frJGZtK=E|l zk%rHQ<#pEIve*dXcnQ9CklBigfR(aRykUmr7^pW4ODn-sdB5;EdH6h`iO&?5WlZDy zhM2G>&qsbdKQ?|oJVx@%A%TWu1}|G5P_0nXWzD}AqApsEbI8rztTwOSvL=s+Ve5^|M9NO0WK7nJ5C3rNXW@?j+;cJwLcMeVoa zfrH55Rzi0E7`r!Tdn-XVJAXjZWa?j}L&zT5?ak8B$x5CEi+R7_qYbDL9?6fWwgf|w%a{rkQ3CDh33djZC*v#VVa_h-OU zEjJ5hk&;;|N8xw#@$xTI(Hw5gTyTVH7pS?K@b2ygovnDo&P?TI%T)x%V_BZCs

    - z^QyAs*!vogJT#?`Et_fip;PS30=0Z{(a&N6DvHBHSkFk<-wMs&b zn1H-~Jjv4v>pH%Dt}Ud#b4EB)mWUeLiPkiR`Jb{IV4qW|3M7ou&C<~F?GBpRlJTCb zQtR;)8(l$bdjI4kzNL4Q_tYy$cVXC*GYR)U3OW%*kefy%Y}YoTa|lFcct5D;qqvVsj2%T zdL7$Qn*O@A_eD*@@lhLHHlRZA+3lG>x4@zSPRth79M#OmW+3XzSebr0W={(|HOJPH z_LvpG%o3o=rwb?nzLwjH)7&xl2lBek(6X+du|VwD!j_$K9#4Zm&SRfITGYLOnksp^M(rG_9GnW(p@X@F7KrvsbgZSc zWa*TD-K4NW_8dPijn>@Y`>CWf$OxB*=L(;PuR~33Ly0KuBv6IMXgpmS9rg{$yoG^+E3SR=saz&fZ_6N*}l;P&K(+9PWwlirB8T2B-G3 z%esu$@Fe)I_(Fwx#^W!F$iexU%NTSWHpwfvPT}1^c$C`?R?jmF=1vDqW7AuX*9~DX z{)`3FcSl=Ce)frQ6G7yWk=M@oy@o1dyvYOtk7rU9Z-AqUOGE~uo1}^w66FPS0HLGv z<|N*|39@$=cp;Q)o|xc-S!=USolU1ane&YA4I#Y zzy6d!O4y`IR_&rR)YqMC`;(5_Ub=<2!@usB`lQ8vd9^l~Am5q+zq3LY#kMDdSq#lBDB&cL zO0%Gm;b8&?JMebC$TQ-tJ zg5unpVdc>7F3&us@YV2i(Ps6G6*Bu*ns0lgfm?B=oTbY(!CC$=2FDF$I;@m=Sucya z`M`e7N%unJFo;ngp)Q^W&XxIb=l?4QbDtX0hN;QQy`}&kD^EQ*(&yUw!y+EuA7+O) z3&7^mup<2>fHZFK_wNC~+rI}pfZ*zW@8vNkgiz*_IXE?^TT4Y|z&ez-?(QWhx*K<| z_NKj5JK6w<8lL_V);?%{kJ-3{Zhx2k-<}Kz3=y@lA1$_%e2N(oGLe$9;S%hWL|euG z2~cdL4|5QlV=2Qo#15$%N;+Uhx~HPtpxG14KT$)n=S``aK+KfV)EdSTQIfN~Bc6(` zJ>pH0e2r!buaOetugYCGzJzhT3As>pPlPYt5%BFa%v^aUnJN;K4-&B_SCt4Oy%uPX zf9vR1q7AdG+52oIheHmGv_4a)^jmsOw7+gHR3-U_ef!%xug&t+;c-;GsYAUJb}BC! za-^;YuN`Bw3!xCp3dh_6h4!Z5G%+ImuaT=I+h>^Jq5VpNQsN#KIZ(}W)&j0lv%t3g z3HlH&cR+N5=0%BdRe9S1(vx=HSta)8paYF*;sTpr^FaXshTLdr1uV8Q( z^XgPAZ+#EWKd#s$wypQE#S&qjE}vj+n)lhxX%qrq~Dod6x=$@~n#mB1Q#q z^l^~ry(*bS{uZSt|3vLBm1Ba*vtoIwxeVn(srO6eH>M9WV%72 zh0)oSK`RuG7Kfs{4JR^rwk0~aypS0Ul}S~v3=MG)3(Na@zkiRbKLWC6{)0HtYD}Hy z>t*$EN4~lx@$O-RmMyH^C-dg-;13{AD+!*^Y<5B7Nih_0J(q{~Z?CTtvG;SWu*dS2 zA9UbfV7Yi;C}=rS=Y`yT1B$vI!CJoJvSug!W~D#Chf?hACr@vB2G;myDVSBtZMqrt zAAE5bjTMejI)o*gu%UyIyg5u&;22aXEMu!{fB4&5?_EHYK|)#%rv` z$kfa)g|&9LJT-+hlJhhyoMC1F^UevaOB+UyL|)k?{mH{WQ$(4V-;5+FlBo{v2t`aV zz|P}idvZ`A=J&SE-ZaCB#*r7YCS#&`W!02DsqPxh4OUgX*=Z$#b8W*F>o53}#47K~ z@AZ&%?d1&^`5b@+@VAr98ea!UH zwg$mq?xmxzo$HJFa)ioK#`tCVPPsHsrz9T@jS^8w zy0_MYbT~1Lm7(S~R$z2*XIj+GFAD*}Mj<~eBurFO9WuzDMTIyD@s&Q!?L%QDG4*H` z)iUI?8Q6p`_&!(;LFt<=gA#4&xE-tg3p?H>^c*qsC|Q7bGELxR<*csu)uqDl`?{V^ z%5T)rIjv;v*UnS=O)-wF7_|Z_Uumkqv?xOBGi|a=P**P6=X7>0OQ??x4`aH)uzhYw zkAuqcHP&frUEHx@{q>&mf9U8Gc}oPw+`l76ZJ=?Yjn?j@x3p%pr8)04*yPTUBjur7 znLhE_4_)L?jluqUy)TN-*U1$MyKd=Zq^G!tfp|t}4+q%vkW+2@SpWgKn()L6Og9hr zLz&9*eO1@)B%zHE~QURM{qx(yz-#OTG4n^xPyG^;I=5Ko^@;vq}bW{lxKQ%c;}+SGnhmlo72$b-~ba>k4h;jtAT_040_Zx8y3Q}Bn;I1+zgXP zi|=_OPDY2fUnhkNN-k+8$0`e)9oNXbOmWZ@yunM>azyjr&tWB#!ao?Ie$husMXltx zBh}DShbGi(4-4GSy`d(c)i3zI(d|x_BpM_T*jeodpu?>*(4N0U^aYf;Z z%%L>^bSZWhh8(-?YNjD01GwmGJ7qzo4`juG1N5Szjv~!u(ePX&gzm!6OXuWeL9TXH z(Ey-{2P;LqOTzk&zSR`a4^;t}d*zV&ZbxKxk!GVgvt1a{`0$oCJgZq1turZi&J*AW z*X~2OA+TeY(Hti;QiZN|I}Adfw$WN4b2b$-86-KchY2|F=S^KGP2h_Y9>SI&L%i5u zQsODYI$tM>$6}=O&{T}2WhF$rmg@8t!0f=w8kza-lPG+O{f`Y5NTF)33i&_0$eeru;rg z!7O`m!n2p}@EqLXf?_b#J!)H;CmcKEl;l+DplQQ`TTMagp0KS6yD9i%n%n=RXLW1c z%A%k*er!+m;m1kI!&<-sKFkZy`Yk@RE@u0iWtck6$n>~g`VgICaw(5B;;)5E$B{G1 zv#m7X4v~VY+cg=RxTzP?*A|8v$7;uu7<|tKNIj!&EalwYWiImtfGYLRX3F-cb-cKs zWZ(|6_+xve@N9fGoZZwlb67S9Ad&w7gf{JA(wZZ9^p^n0hka(i~5-XX!1edMS zTQ#q_FH^vaGUg(RzOkrqEudqHugVt>Qq&CHBN>0(F##?q`H3c|Rw_3O%YhwW-yDmyoh4 zT`s$`xqX&9(Q=F3nc3_(g&lry$SJe4d0M_CKpBuxe6ZBEmc36PIotADj3-0q43yvR z^MnfKUJy(LB`d7!oP}vCvrahO;*nq>|kSfsFpk_$|Y}3w>CLD6lb5%?~gqKjrIs z!eJhuB5_u%o^HTDRi&FB338szst7L5EeJC{F?-F4ah(8CR~@|oJE73p(ouMHjC$xp zS@%#TziM;F_f7?7)iuz#VqqK6F z=*M(g@p(thYMX8i5FBlYI=!t4vrm-h8Sq%k52Gq>8&UF@68s}3uT2)kywTx)jCriA zbh5D3a6Ry(gh;VTwpiaT(-qK{y&f{Wn##JW2hApzht6^I6^r}w?it3UfOj1{xB%W2 zYW8%zH=>_I?vdKAG$=PO$!`bn?$tsrw4TGz(~BK3*|qv0gibzCX`g|_+_Nl*oVSMg zL*Tdo1$q@X0@654%V{asZb9A^TFRlmxCC$;Yi@aVb~dAwYm>I%^*Stz`94`FJr2`x zJH4CJO3#Jz+{e8frK7D6goiO?{p4Ms&P5y#nk@PuotCdq4iHIKhf`FF32rYxB)EJe zp0vMT1vWQwO_T|+hfH`0vWXEn-KZU8TZOx%(hN71a5yi`&^zhf5-W)4Rz`5cdEPlu z@?`_#VZTUACYaLvhN>UQm!vbiq3t&dW^MFb*6~!80F!LYGc53>?Qnizg0>V||Ky)$ z4?luD3Cqaq{Ej6MJQ+zin6Wcl^P37*ODYphT4EQ7CUO2voO3jCWLJcm^U47U9M2jnCwINd?lEkx_KyUEvgn1VF(jeeIs@5{$G;U9FQNI?ovof6CP>p)h^h$rRq z*%_AGT?-Co!@Au16kpc^{o&ZzV!+q|{qcW@*!sW8W&O`8dH@nj5u8*$i6HpaiDwCh zot>Q=&L*?@-$dVg;&rk7#&UJ?Q_-|nT9*bOhlXhhm%rr$8=BdMv?a9AV3R;{+yzx% z>74i>JuvgCOSe+VsEKcXmqu@kmwUGogLC=h-(V3Cp@|Db$fx)F>Up;2h!a!VR)SA} zb%-YgOGO4pXJ9n615AWTEbf2h^2tp zGa?6F^Ujm9<`~zSwG&;r4YFvdUo^^~eX|9sZP#K%#a$yoKHDqWv z?%QlB+bT52P7PQ_Yy!f8rPxiFpcT=mr%S8k2 z8tA2e(Jz@BQF)isS303>Qu_~S`BC*E3N;(4{+cdr$NKE0x0Kuq+)tV3eD@0YV$ft`ja%HF9=Q^B{)$!uI) z8DW4`?gJV^t2q7%zopykj4Ri9aX1RC-9~{ayYFiU7~k9Z?T+skA`M)TS=VzVyS$0@UUmdkFQ z9kL9Of0chw>#8XwP^aVvrJpzh1225bO{)EL0xe7LIkG@FTm_(I(2Lyh&+`?%W)dMW zkmU&w;W(-3HuA~39T^;eG5m|7@?Qa$)bQzo&F@K-HC28(x@t9HYxl}G790k`O`P-= zxZHx;nd$SqS(HxgIQ+J@Sp=H@MmXtqdd-fT|Ah#3^Zj-pIuuWUQ>$k5vpShpTU5JY z$e)$U**`(y_hy}L_qv=OX4o9w`j2w#9_dJJM2(@Rx1mZ1+<8S-iVq7^z^B-d#sQ*O z7IpD9nUCr_MjRU(CMA1NZ?tY5Q|l5QXeQm|jlcL7#o_r3*((Wjc*GZgDU+86bNpr_ zs@$s+XnE(!lMN$$p+nQeFUC%|6>Gi5>^sDEE+avuH*M5QHi5)i!qjsDB{p0N13Ole z(xp=0Sy1^LoBZH}(-yll_9cHp7B&!3N~Bts@UJxwx$im&1rsS<3(_(pupBuI$mSnAz#i4j zTP1Vl1{jD%dSI!N2X3(ctQ-OLf{JO>A5!;2KL7Kh)T&4KzWdA8fYz)Ah6gTiah4(t z@0IGdNptm*b_3EnD~=E_hfN1iK8wrcPW1Hv-$O)lxAZlo)#Ed->0Iblc#?Clv0CT8 zr%#_Y1Fr+f1VJ>i+x@}MpD*|h+!qCmbTPQI_=MTNt~v;;i6v0=k^X+rIk2bk(a#c0 zV48gfJZ%W_jzN|^KXpi%{h6kpi#C>Kak~d5z8*D~-}eG1Pq_T#^TFD%Z`&=HZxS|T z1D8fMU=8Mgo05y!QF4osRHRUPNxpU(@P0R92f9M*WZm_M+SJmDoVBHH<7bWporDz; zPl5rR7-@UT(VdwmAeVHayS?gtXK2x5-@iy8Nu5%^YI-`KjB&;`y$}$Cc9cAqJ(K^? zaaEDSZ@@Q^#L~JtG$H42)uUrsEanyi^$Wy=4OX-EN@{zkLpY)XRL%Kh&qRM}?6gc+ zW}H}%u*L|B#ixiRaZ>BGl1v(sMd0Qf2jC6Q8arqe3ZEq~+XUKZgl%b0mSUsG_l-Wz$<)1s;Od2^}sPJrB=bwO{?uXE@_>y5oAuVB0Xf?}#xX#aH7g=x@mL%6OtoH^UHm#fzEG{=bv? z&VZhR)`!>}ya6p)0AK^_j{TZzp)_^I7ynV=Im;YE?TYKwf){Wu z?U0cf?*#PBqvNSn(fL#}3po_h* z1G|N;meQ;`N!HHMP{p5K<3$?L<%U+Gd}q5jtIxJ(kL8asn}Cqnp>t-%n0r>_8}?nG zr=NNxi^5~Yk=zCid{5RVlsOZ)+@sMkw!c8GzF{(_N3$>2Pzn_&1M`s=D()Q)!n)pv znED_`DxI5?NEbWAO;B-1;{ZFo+38+$d0~fHg8iCDBrpXAN$yC)O!t|6r()hZE591X z`({u}eVhU`{mWsH1X5Q7G1U=JgE!x4nj_@SiL8K53i@a)9P7S=2C|7SRH>d6S(n(6 z9elB`s|oT5mhtx1_@PxG6t}HIxqoqqto&rF>AKMmB%@Ycf5W1Sogp$7rD2->5{<$c zLzlOE)0hdfx&y%Cn-4Nyz6GK{eQYI@uIK|2-%0Sw5&ZFz)~vaHETy`Kbp#gkgd5IVz;^oiNdu`!j_n8%dqDlkk> z%*XY}3mPjCi6_Wb_BI`s&&4x?aF?AOn>_fU3il&US$hIA)$Rh{`Ms7T{VSyZ!ew!& zYO&ND&WY&rl{{&bJ+B%&b9yn6t_X0If4%^rTmSMxRr(Mnmu4-P`$V(>3@l@v)X@$c z7eJ=)!MVE^O6k_-Nl?|7P1`_NHiw*65yMh}UUcQA4lb6P-H$FmzC)0!x{%8YuP3=5 zy_q!FRxe8Yd6%bi)lNM~zixb?$)%xJg`iJjVPHp|i1CR%Z14|NgWdb9?3D)YG~xx7 zcz@OYy>~2SlD&3cnBDzz(iyEg@m;z%L;^?ycxYT90}D-QbV{XGcI<2okmlrSk$$TM zVihUS5`od$DZe4e-hG(mN(k_XH+a0#sdx+Qb1= z*j2|QRK6#?CxxbVmP$6cL?r@OUt8xU53Qylc1l37&^>LfDL{u`jl`$pNU3HE7e?L) zxUCxNtFM!8n>(vKVvz4*R@nqY)l0|T~JR=FT$?ZJoDawC6@4z# z!BIno*%vHcq89|G!@uOBwd9%yJ4R2H`lT}ixU!weKab2)w-m@a`g*s9z&D^&4JVsb zBt@IBIeGYD4A|f|y8%+kU${w1ga@CD%MUg7bQI_g_ww9Klvp-xn^1WHRw{IcQ5$FT z-0m0D&dcsS0s5fT^S4qnT;A`YZI#xUXx`jdPD%B_>5jd3AvicrG@Q_Xh-HAZRqCGf z0u^a5mo{c(Tq!c`mqV#iIwvF^QTlg(`|m3ppBK|)UaOj`A#qY?+BtnJ2&q_SE_Oaz z98-q4mtw`f>>&AQO3aHE$5ua zzHh5s7nnfmN1F+Yn1jie(s!!15)E*@1!rw^HEJ^knLPf--bhg5l7%R7LFH)S8c3mL z_Hm;5(Z=EF(HE69iC;Q);*LT~fMN5B{P&@n*KAMf;hGJHei>EYZ>gCLGoVloMxK@H z9W8WaWj5gtY`Vx9b5W#sUz~MsPM{c3sxNDoi0r)LoufxkstIqdx@W;v2xh?rko-F2w z!v^$Ra~+M##00M!c96;}=-Gd+bf|&SXzg}-pk4o25o$*|47zI0|o?O75Crmp!9lx+mI5pl0ml* zzr?uy1T&x28gr;SO8!$y_F$Xm^HC^L>BuH0=}rv9IxSozos>g8<@=F-wAFxi7fmS#jl|16qM*rm{PX8$k#oHf*`UI}N_$In z+xGm)v;0}vd2K}b`1mwS$*ff1`vMOGj1mlc9smx!X*zI^ho?gG#U9(T_O zajU^!JQ0%>M2Q^>su;H$=aPa)iaw{qujVC;f{QsRf$fY-ty1s(?M5-Y7PDq zJ1N$oR1Ubk^C~jT5$}Q$x^$IW@y9D;B-QZBuV+tNgJdLh)!lt;*1{138(B*@-I&3CsRirLL<*Bwz7)q?=%$NW*V9+tS48P9cNU+=0b2{m zwdkzUg?!yj&bv6BV9;PKTiCYcZINv}x9&Q>I1=cbTADgwlg`tPea@#l)sYq(70yJ0 z{-!l!3Hb`u3!w4*)xto{$h;didWWlvue7Op+1cFyS%;de;hvfbs)XA91EjeZNQH|4 zk@7q8-w={ZLfqe><9ca}|JT}K*(1MHNc{7IKG4j-Z5y8d3^3>;J$DAu-KDnFWtpE z{XqTh6;Zg)20{b6g5`$nAY(Jrk{25`-e1&vq{7QCf@U_2^sP?q&Gz%XdowwD?)yQr zM3I59Z-xUCI6CDg7JV2Oq!X}o(?HEJ_>D3mq*upQ_exmiS@QaKEyI}}O3G>LP#5?A z#abEQ!hX%q5q`74-Aq33elok6ks$mg;pkUO&7?gjBh4dYE-^57B@N?aBcR3{oESS@ z9l&SMp%2nMyXi02-28JG`#u(F8vD0&R~$+`=c56Ppq5{(RX=ixUJcDbE5t!RGzia7nlTmK}?(qvHqpXjh_1)#E| zb=+*#x^_((L>9XlB0z)3*>COnwv_xN~0Z4NIv*0Uc|1G?=G>#Q!j6BAvPld_J_hv!@a zR>7~}JPAni$CXt3B+}~3rmZgp+=#4pVkn=i=1`c793d*S4PY=aC+mVFM1_}kgp~^d z-cw1{qOQMGuY;n=JVpr-_h{`VmC`Lh@ZdIpzer+q2XmV~N#ii&N*=4WPlIGg-L4oqfV6Wm=PP~D7gs`G_@igtGBlK=pRYW=~o?9ip+cL zK07N9| ztiL$^pxRmZl!d?_$fq?9xTJKi5&g#7$BQ=4c>yWD?k&*@+pq9?n&XRdIgy$vi^~2h z~ zqE_QCHd=3ATGA5W4Bj&4yMt>udy^l%= z@TLO=k@LF+lRpx?{ap$u)VW<3!}TfHP)5%|;3=Frtkx|Yn;PMx%lV!BDSq9^{a*4? zYP!ei9-X9I7F#~ch8?ftXd0_ru8q_8cq^8T8yMqSG2YnFPs$3P2QJ$ zq)fv`Qlx*FBnioCvhST08;ZYi6(*49Q-do@O^1Pd=MgB}3sj$5r@`-*2_D>Uv0ye> zh4j2F((%iC*Ks7Mr3DDce++!!mJC1s7Fi-aOZOlf_*s2y;M4(mc|Xt$RF|qcpDa0DKJ>Pqzkp>3jU^NK-uW;pm!>xsBPhuq z#zqNYlM{tJ*u1Lfd0qwCkEbO@OiW84`SpC6v*{*LwX;}B^YHqoUgUu0jrh@KXWt59a=Wyb6@@8`_$OkAZS3cAUpBLEv@&&_uKr6E(c&2 z=CQ=7x$m*$V7ICqaU=R-X{GoFpncmf)mFhPV%or3Fq?zrc5l_kp_n-cEwJW*2=Af}pu=2p`D z_pCEAE^})jR`0cVw#dOSD(|pQ&FN^wlql~+hUd>QgsGT_8?ngg6m|~^ShFTFJIE$cg&-x) zs#e@xpgN225)(6XgYO>Z0W|Fk#3*hU9q=9Xkc6lx+AwuvG04j75F7}Dzj5qBm539 zHdX{ke@5SiqAmSrj+i~ZtU%E2O~tAJ=P}y`c6@Jei$cZ3cZ6N_V@dm!BqoCumrbX9 z6j=k}Dz*)yQcaD@WvlI@;`!?A$n*h0&z=u~`$XJTSMFfu_ciYVJ>M-mCpS$qKgFH_ zG=15%Z&3kFNiv5v{mlIw_QwuYeCX+YmZgD~%#)JoCa$bECt(4vO)K=!v|+cf^USw> z=|+PxFGxx4#Q44xyL=Y|MZzwMNx?y1YTJ?E2bU)VzN*f$B;^)+1@DYjeKCq8O7s|U>wRK&D9A30w0R`#@f z#(-EI%bUt#_+aYizDt`(nD_avRk1ZB6DTVpe!YLrBO3-uH+|u@!Hthru#ER$j~mgK*0p4%|%AKP|w?%hV_# z`0LcVox>QRlwO!m={HB#R8@vft3T_FBZWrr<_AbkL!ZNpaoP#h!pbY1K`eWvsa<`z z|JB}^$2EDSdz|S^TS{#e1r$N3sR{^!xBvnY%VGseK`UWj1cM6+lmS^3h*eP}AeO}- zDAX$0%9=z77$8~+Xb`Aa1Pq%&HbbBxY#}7M&kI2qXKp{YcjkWXKmOq#F(f(fdCz&y zv;3a#TT{c6*UBoG9U|VRG-M@hki3t+^jJ%RMZ+$5&$6R?LkSF9bbj2lx%IAlV5%{W zf3kTXvoO<(IhA^5zx9=J%`YHP&F$!K+FkXuKDRAhq7Jw8^K7&C?5-Jje-n?seK%$m zS$Z%12>&E!HY>iB@mL{$+`aW;D06Cs&$AkCVliJ(I>fr`%xN-f)a)iiC+Ak!&q_V7 z9qHU{G)u=BQ-FD4jDatP#~r?~WopI6vnCMos^I zNYL}(teMuNe|F7V-Zpj@PdKntMjKVa`$~8S8SzGg^EvyDzSMQ zUPSY*6=hCWsj!rusSK**_dkbyCsn=$0lI^ChUVU??d1TD3zN zJ&V?LGGDwl6iNCM4kcg2A2WLf`tUNMdG;1gYm}%RYfReTbuRzzds_m8&GqeNHtO78 zTokZJjQLzprj*B23^;glM?OR>f&<~=?UI$LlIJ<)RrsSHg%m}eRSdQa7m_4**KV=g zS9yss*-~6OCHOX!6mKX__Yt?hDlIf?s!uTfj(mgHUA z6zP5yV`>qQO2`Yzp5CsI-&iU;TsDi7NQTmOv{20KWlIW4hJ3ntm&Ysi$VyQSG$6-% zT5w~hy1`TFGP927GH8hdi9EvyRh%}F@H7@d!vs%W*hTM@)W;Z+x8S>L`F#GY)zc$i zJ33}jsZ7QsjppC-#8li$J}zy`&Y>G<5C@u=+%`AS;Sd&dc5lj^2Y00 zjTH`i?CJ`$B2&Xi)1kJ(8DkfD2DG1lvtDYUn#<5v7H(fSX>5LFt$F~*h zR(E?z3+#d^_SAf(a*n!A&jg&lVkVk?VxaBS&RXt1-DSNw59fAG-_t;n>YcTUT>aE# z*!6yHDQ7S`DNONr^bmQrjs6Qpo%Exp57Zo{F6S6%a6vZi5M-mer~gFd{>XGYa^Gd^ z;frf@+;RGn0vL_leSGe8X~VK~r%44Xw`TN_OeGr19eTBWA>D6X+#F@Mil5KeQqvk% z-KMo$Gaq8Bw4~kO-Tr<&jnith9Sg3c`e5!Q-=- zVHfxJRoJM!K0gnM>C8Zq&UGRZt7POrsoerBMMo%DkvDf}s zY_DMt@Lm8zk?HzXdQ6&0wZV1cJk6(;5>q5>hJTQ7DT0X6+1n`1<~O@3e~{e*n+k>> z->D2$mz1zBPv~%ATr$6wYnzOC<#V3-9jHnhG2CF>U8$UlX)ro)+D>sZCKT%PBA}6!EBibPy)_7p0#M-F z&`5tx{w%G^95MxP@&c&{DjG;*58V&fiB!?e;wUDYPzN=tIe`k;p)ppZxu5I4x;{H- zLb!ObuGvuBYpHNZXdzZ)Jzu;wNIOCK@h3XXy#^E`YKl32WBXK4xZFL?0RKQcYS3oL zf$8Is!)gb#J^G63K+$lF=);i(L-zy}P<@?(EG*mJV2&Mvl;KN55*{KGf$1dy?BrX2 zNXOJ*)1k`C2t`Cd>mC5O_C7={kWgyTMFDWtzHu8L@FqVr+JI%KIM?KZ($L3Oeul59Oc zxIh81g)x#3jTqg#kbRP^Dfo1{e{Q{=zlS;I;bVxzqN6)HJw_otblu@vc6N12Oa+n{ zMdHsrUT2qML6I4V+}2LIBAY`dDSx-w(k(ivqCP*-hi}^0)I>jpz!_!t6rs!r9&sLu zG(n()KlXx6-lB0zRh)iUC!=tcs35^71Xu8P4`Z?5%QeK%O<6@ae@GZDeilg@S?4WD z8hxGqqw_vkpz>}}Z7e1^%9`7q%oc{$RzORSZ89R2lm*mDsI<80uPEgm+w#V*VzUxl zTb3Wnb@n4W5gG6+nL}r2>>zUIfkV{%#b|~axEBp z=)Jrp9_vXb;uYJTJDBQU_jlI*7?VDiy_sLTaGj8SL>Lu$@RMHWRoUJFST|6SwO#~J`>VPS4TIgE~t9!MZZg0YD4slDTbakqWGx~}lW5g0Tofw^nU zmUNV|IH8}IrWJZPT&b}D0OJ~i#l?q%BI}#&O>(OJ>CK?ty^wSI-^>BnNN5706C&yD$W;8nPV?Rvg*UK<9193a)OSN8f>GJD*(k)64@n;=-+7$LfmgP3nL7 z@B+_z#VM(HiVZ+W5wwZjt1@2w9j|EDu3g8bl8Qm&&Eqx49&2oDd=b+eQqdM33#}4a zL{Y9_{5C`g~LVKa?%tg7HtB7i<>1{jGHtJ;b=~nwe+Z$dO)e482k4^@2 zS2-fjn$M1X0$?$Fz0nl?#~n;BK)3)ipAx1MubkTu?wOZ0fq24+8f#@9U);)V zPAVkle*V#P1deymx=5*DBs1PQO3z;R{)`B+#mu{&=tptpX>sG`hT<5v#d8Q$^%kOG zs?pj)qOVRKvzk8mVZjNKcV3BdadQeMtvd+O3Ej=HRar&zXM3&Z>5G4GR=|q)z*j^GY}e~)I1W5%sUZI9);;Sf+EZE6x_jbF)XE>o&cQEHr-8Cb>z!Dq0r zH^f74PImWZ)|f0Z;^!w}p{D9O<0~Zz<)kH>t-%pHSFA*}R8K9-RX!|?0- z2kAr!qP~~e!*#3_d4JIIv3KFp1I)86jb7R?>`4SEQ9jfLLL0iM-3P=d2OUA_+ zoZB?^AOk>{AaL2g&Y5hm41@EOrPDX&4taej^MObNVHH^+mD;(he>aeKGlDS8=GTaC zXt!kejxa*fiDq|6%zUJdFFuk^<_M==)SY>2Ir)YvK13zFe8l~myDQILn=3LlcvwFpHf4c zoT~B>RgFONex={iuWG`r%}_Ah{A0_VhpQEJQLPuK!X|b@p^5(TW88jeGus&DmVo7j z)^P2boS`Hvcb^N?Z`q~!7C^(d!&%HVzh^N5UzPk8J4XUHg{9YA==l<3vfsK`bCv)U z`t0DH4NVa=>2Je<`jhtTI5=m$wH9xLe<{#s%JsG__Y7eW+Oa;;(1wJ7fk#bhGfm24 z@~wn|bg#^-#6dvT3#-~&+j*Lm0p);+J!$_Yam!)6N!D&CJFtti?Vkpw zvTqQeQ__3mlJSifc3=H6Pd;FyVx@{^4Vqph??uIT$CtOnQI+l;sF```s)Lbt$*&RA z2szXzV@FkDBg3;aHW=gVxdisG5)Ll-Ey_Uovw zrJyhMz?>_xapG#&=h%dELmvV9b7$2RN!UOe_!mujflV(T&i+L%vH?nccjoPVBD3)R z!3+rhvB@99!%uJ{Gh3yzf1UOpYw^=P>v}uv=Yc$32+(}LTKtImo!p*{d%=7If_P!J z|8ced;Z}I}CP450%p}zIK)YWBU=31{4*w~y)~y^M+SSHb*lB2ow0`T{}KZu zGj#qd7IooJq+Kv=5Mx8uWd+Mbet~f}ca~de_kK2ymOO1ZN3H{f&RH4guXdi~a&FtF zmJ9YViEtIo2r4SdiHHa?XH?^g5~SOw3M%eZ23<+cz|+f*>a*ba5BDK(Qa^_*+*8Z3 zN3@_}%r>;F{M2vi*K4YvMmzn}E{okc=&hl#Cp&guv8~I22Ccf=)SIWR?*8fKEZN{P zUY?^+`(F(n4X5v@a~P)>r8SHgl){$ET1PNv(=?k;pDpQx2ImIn;GM3p`ppxRQSL@0 z7ng=Un{%|0A2#@1MS6b{FFVzKp$*40waE#TsAiqVO8`&R&j#7O;Mvaoynqw#0w}DN z%@x*emev~xPOR74S>zfM9_1!~C?Y{Qkgg8bFWQ(EvI6mtmhqbFdAFR8>RwvS>hZ-* z*3b4Yz-$9VK3+`$8kJ)u;a3%0N1LChS=52jH)N&k4xLwIUCO-Kn+lLz{Pb&wg01$V zhJe@;ebLPJXm&_wbl`;QxBLEFOY;1!KOZuLEs(E=hkY#XAGIJ2_aAsNdAfa49@}Y0 zDfgs)a+Dh5NWa{_oz?RhK)__A4)SZg|DGmmD_cXVjkzSD0=Wwyy0)ePn;2o+KgMk# zdH?d@kMybY%TLu^n~k$+pACYPV~nM$dg32+G%ZYS;t?A+$#lZj57Y|^fvLH-y`|*^ zG&^_C#=-;EbQyOkRR=?DR(MRU0TE~X!hLOZdcK!(Q+Ol^@cG+}bywUa?>o z>}by=-G9*bKZDGEcmu5JlP+lI#`*q>rsu_7c3iv-n9@J5`u8^8e`NQ*`;vdQXz%9a z|8h>e8)*zNZ!%xD`yd?jM?{AAM1``m&3ML!_;-yjFJal5=)$RR-bI9&^H@ z_qx4(XlD=^Roi}au`Y2Ksk5WZRxg>|-pfx}g!Bj&GhgQ7ntv*O&v@MI5Ig zov4>N8SJ;63~*acgXVm}eSnv3@ZcMn2lDPs8H$^?qd}_qr2V1Sc{fZ5%!-_?&DYjN ImcD2H3-47SJ^%m! literal 0 HcmV?d00001 From d86a1eed8b77b63e09dfa486fb8a80708f68aa05 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 24 Feb 2023 16:50:25 +0800 Subject: [PATCH 200/204] update doc by chatgpt --- docs/FAQ.md | 8 ++ docs/README.MD | 32 ----- docs/common_image_error.md | 11 ++ docs/design_doc/ymir_call_image.md | 115 ++++++++++++++++++ .../custom_hyper_parameter.md} | 4 +- docs/image_community/seg-mmseg-tmi.md | 1 - .../simple_semantic_seg_infer.md | 15 +++ .../simple_semantic_seg_mining.md | 15 +++ .../simple_semantic_seg_training.md | 35 +++++- docs/imgs/2007_000783.png | Bin 0 -> 3959 bytes docs/imgs/ymir-design.png | Bin 0 -> 15545 bytes docs/index.md | 2 +- docs/object_detection/simple_det_infer.md | 17 ++- docs/object_detection/simple_det_mining.md | 15 +++ docs/object_detection/simple_det_training.md | 21 +++- docs/overview/dataset-format.md | 13 +- docs/overview/framework.md | 14 ++- docs/overview/introduction.md | 79 ++++++++++++ docs/overview/ymir-executor.md | 2 +- mkdocs.yml | 3 + 20 files changed, 353 insertions(+), 49 deletions(-) create mode 100644 docs/common_image_error.md create mode 100644 docs/design_doc/ymir_call_image.md rename docs/{hyper-parameter.md => fast_custom/custom_hyper_parameter.md} (96%) create mode 100644 docs/imgs/2007_000783.png create mode 100644 docs/imgs/ymir-design.png create mode 100644 docs/overview/introduction.md diff --git a/docs/FAQ.md b/docs/FAQ.md index 7267a48..0d06a4f 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -26,6 +26,14 @@ docker build -t ymir-executor/yolov5:cuda111 . -f ymir/docker/cuda111.dockerfile ``` +## 自制镜像出错 + +- [检测镜像调试](./object_detection/test_det.md) + +- [分割镜像调试](./image_segmentation/test_semantic_seg.md) + +- [常见镜像错误](./common_image_error.md) + ## 模型精度/速度如何权衡与提升 - 模型精度与数据集大小、数据集质量、学习率、batch size、 迭代次数、模型结构、数据增强方式、损失函数等相关,在此不做展开,详情参考: diff --git a/docs/README.MD b/docs/README.MD index a99f718..5975153 100644 --- a/docs/README.MD +++ b/docs/README.MD @@ -1,37 +1,5 @@ # ymir镜像文档 -## 简介 - -- [ymir简介](https://github.com/IndustryEssentials/ymir/wiki/%E6%93%8D%E4%BD%9C%E8%AF%B4%E6%98%8E) - -- ymir镜像: 为ymir提供模型训练,推理,挖掘服务的镜像 - -## 快速使用 - -- [安装ymir](https://github.com/IndustryEssentials/ymir/blob/master/README_zh-CN.md#2-%E5%AE%89%E8%A3%85) - -- [ymir操作说明](https://github.com/IndustryEssentials/ymir/wiki/%E6%93%8D%E4%BD%9C%E8%AF%B4%E6%98%8E) - -- [镜像社区](http://pubimg.vesionbook.com:8110/img) 下载、发布ymir镜像 - -- 💫[历史镜像](./official-docker-image.md) - -- 💫[挖掘算法评测](./mining-images-overview.md) - -## Ymir镜像说明 - -- [Ymir镜像整体流程](./overview/framework.md) - -- [Ymir镜像数据集格式](./overview/dataset-format.md) - -- [Ymir镜像超参数](./overview/hyper-parameter.md) - -- [Ymir镜像制作简介](./overview/ymir-executor.md) - -## 基于已有镜像进行定制 - -- [增/删/改: 默认超参数](./hyper-parameter.md) - ## 基于开源仓库进行定制 - [yolov5示例](https://github.com/modelai/ymir-yolov5/pull/2/files) diff --git a/docs/common_image_error.md b/docs/common_image_error.md new file mode 100644 index 0000000..628f111 --- /dev/null +++ b/docs/common_image_error.md @@ -0,0 +1,11 @@ +# 常见自制镜像错误 + +## 训练镜像 + +- 写训练结果精度时数据格式为tensor或numpy等,而不是基本的float +``` +result = torch.tensor(0.5) +evaluation_result = dict(mAP=result) # 应改为 evaluation_result = dict(mAP=result.item()) + +yaml.representer.RepresenterError: ('cannot represent an object', 0.39) +``` diff --git a/docs/design_doc/ymir_call_image.md b/docs/design_doc/ymir_call_image.md new file mode 100644 index 0000000..5d95d3b --- /dev/null +++ b/docs/design_doc/ymir_call_image.md @@ -0,0 +1,115 @@ +# 设计文档 + +本文档介绍ymir平台调用镜像的过程 + +- 镜像是ymir平台的核心组件,ymir平台通过调用镜像实现模型训练,推理及挖掘功能。 + +- 用户可以通过使用不同的镜像,满足不同场景的速度、精度及部署要求。 + +![ ](../imgs/ymir-design.png) + +## 新增镜像 + +- 即 `镜像管理/我的镜像/新增镜像` + +- 新增镜像时,ymir平台将根据镜像地址解析镜像中/img-man下的四个yaml文件 + +``` +training-template.yaml +mining-template.yaml +infer-template.yaml +manifest.yaml +``` + +- 如果镜像地址在本地不存在, 将会调用 `docker pull` 进行下载, 由于docker hub在国外,有时可能因网络原因而失败。可以尝试[镜像代理加速](https://dockerproxy.com/) + +- training-template.yaml 中包含训练任务的默认超参数 + +- mining-template.yaml 中包含挖掘任务的默认超参数 + +- infer-template.yaml 中包含推理任务的默认超参数 + +- manifest.yaml 指明镜像的目标类型,是目标检测、语义分割还是实例分割镜像 + +## 模型训练 + +- 在ymir平台选择好训练镜像, 训练集, 训练目标, 验证集, 预训练模型, GPU数量 及 超参数。 ymir平台将建立 in 与 out 目录, 并挂载到镜像中的 /in 与 /out 目录 + +### in 目录内容 + +- 训练集与验证集图片:训练集与验证集的图片均会通过软链接的方式将根目录链接到 in/assets, 对应图片的路径输出到 in/train-index.tsv 与 in/val-index.tsv + +- 训练集与验证集标注:ymir平台将根据超参数中的`export_format`决定标注的格式, 参考[数据集格式](../overview/dataset-format.md) + +- 训练目标:ymir平台将训练目标附加到超参数中的 `class_names` 字段 + +- GPU数量:ymir平台自动选择空闲的GPU(如显存占用率<30%),进行gpu_id映射后, 附加到超参数中的 `gpu_ids` 字段 + +- 预训练模型:ymir平台会将预训练模型解压到 in/models, 并将其中的文件路径附加到超参数中的 `pretrained_model_params` 字段 + +- 超参数:ymir平台将在网页上显示 training-template.yaml 中的默认超参数, 用户修改后,将与上面附加的字段一起保存到 in/config.yaml + +### out 目录内容 + +- 镜像运行需要产生 /out/monitor.txt, /out/tensorboard/xxx 及 /out/models/result.yaml + +- models目录中存在模型权重及训练结果文件/out/models/result.yaml, ymir平台将依据result.yaml打包权重, 显示模型精度。 + +- ymir平台将链接tensorboard目录到 out/tensorboard, 镜像的训练日志需要保存在此 + +- 镜像训练的进度(0到1之间)需要实时写到/out/monitor.txt, ymir平台依此在页面上显示进度,估计剩余时间。 + +### in out 目录内容示例 + +``` +. +├── in +│ ├── annotations [257 entries exceeds filelimit, not opening dir] +│ ├── assets -> /home/ymir/ymir/ymir-workplace/sandbox/0001/training_asset_cache +│ ├── config.yaml +│ ├── env.yaml +│ ├── models +│ ├── train-index.tsv +│ └── val-index.tsv +├── out +│ ├── models [29 entries exceeds filelimit, not opening dir] +│ ├── monitor.txt +│ ├── tensorboard -> /home/ymir/ymir/ymir-workplace/ymir-tensorboard-logs/0001/t00000010000028774b61663839849 +│ └── ymir-executor-out.log +└── task_config.yaml +``` + +### result.yaml 示例 +``` +best_stage_name: epoch2 +mAP: 0.5509647407646582 +model_stages: + epoch1: + files: + - epoch1.pt + mAP: 0.2869113044394813 + stage_name: epoch1 + timestamp: 1663839980 + epoch2: + files: + - epoch2.pt + mAP: 0.5509647407646582 + stage_name: epoch2 + timestamp: 1663840020 +``` + +## 模型推理与挖掘 + +- 大致上与模型训练类似, 下面是不同点 + +### in 目录内容 + +- 推理集/挖掘集图片:图片均会通过软链接的方式将根目录链接到 in/assets, 对应径均输出到 in/candidate-index.tsv + +- 预训练模型:ymir平台会将预训练模型解压到 in/models, 并将其中的文件路径附加到超参数中的 `model_params_path` 字段 + +### out 目录内容 + +- 模型推理需要产生 /out/monitor.txt 与 /out/infer-result.json + +- 模型挖掘需要产生 /out/monitor.txt 与 /out/result.tsv diff --git a/docs/hyper-parameter.md b/docs/fast_custom/custom_hyper_parameter.md similarity index 96% rename from docs/hyper-parameter.md rename to docs/fast_custom/custom_hyper_parameter.md index 2b4b443..eaab406 100644 --- a/docs/hyper-parameter.md +++ b/docs/fast_custom/custom_hyper_parameter.md @@ -1,4 +1,4 @@ -# 镜像超参数 +# 修改镜像超参数 ## ymir后台如何获取镜像超参数 @@ -81,7 +81,7 @@ docker build -t youdaoyzbx/ymir-executor:ymir2.0.1-yolov5-cu111-tmi . -f zzz.doc - training-template.yaml -- start.py: 修改该文件中的内容处理增加或删除的超参数 +- start.py: 修改该文件内容,处理增加或删除的超参数 - zzz.dockerfile diff --git a/docs/image_community/seg-mmseg-tmi.md b/docs/image_community/seg-mmseg-tmi.md index 27de31c..93b1fd7 100644 --- a/docs/image_community/seg-mmseg-tmi.md +++ b/docs/image_community/seg-mmseg-tmi.md @@ -61,7 +61,6 @@ docker pull youdaoyzbx/ymir-executor:ymir2.1.0-mmseg-cu111-tmi | 超参数 | 默认值 | 类型 | 说明 | 建议 | | - | - | - | - | - | | hyper-parameter | default value | type | note | advice | -| config_file | | export_format | seg-coco:raw | 字符串| 受ymir后台处理,ymir分割数据集导出格式 | 禁止改变 | | shm_size | 128G | 字符串| 受ymir后台处理,docker image 可用共享内存 | 建议大小:镜像占用GPU数 * 32G | | config_file | configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py | 文件路径 | mmlab配置文件 | 建议采用fastscnn系列, 参考[configs](https://github.com/modelai/ymir-mmsegmentation/tree/master/configs) | diff --git a/docs/image_segmentation/simple_semantic_seg_infer.md b/docs/image_segmentation/simple_semantic_seg_infer.md index 42a9352..588e024 100644 --- a/docs/image_segmentation/simple_semantic_seg_infer.md +++ b/docs/image_segmentation/simple_semantic_seg_infer.md @@ -2,6 +2,21 @@ 参考[ymir镜像制作简介](../overview/ymir-executor.md) +## 镜像输入输出示例 +``` +. +├── in +│ ├── annotations +│ ├── assets +│ ├── candidate-index.tsv +│ ├── config.yaml +│ ├── env.yaml +│ └── models +└── out + ├── monitor.txt + └── infer-result.json +``` + ## 工作目录 ``` diff --git a/docs/image_segmentation/simple_semantic_seg_mining.md b/docs/image_segmentation/simple_semantic_seg_mining.md index cc4e9e4..a4c50b1 100644 --- a/docs/image_segmentation/simple_semantic_seg_mining.md +++ b/docs/image_segmentation/simple_semantic_seg_mining.md @@ -2,6 +2,21 @@ 参考[ymir镜像制作简介](../overview/ymir-executor.md) +## 镜像输入输出示例 +``` +. +├── in +│ ├── annotations +│ ├── assets +│ ├── candidate-index.tsv +│ ├── config.yaml +│ ├── env.yaml +│ └── models +└── out + ├── monitor.txt + └── result.tsv +``` + ## 工作目录 ``` diff --git a/docs/image_segmentation/simple_semantic_seg_training.md b/docs/image_segmentation/simple_semantic_seg_training.md index 3dc917e..3cd4933 100644 --- a/docs/image_segmentation/simple_semantic_seg_training.md +++ b/docs/image_segmentation/simple_semantic_seg_training.md @@ -1,6 +1,39 @@ # 制作一个简单的语义分割训练镜像 -参考[ymir镜像制作简介](../overview/ymir-executor.md) +参考[ymir镜像制作简介](../overview/ymir-executor.md), 通过加载 /in 目录下的数据集,超参数,任务信息,预训练权重, 在 /out 目录下产生模型权重,进度文件,训练日志。 + +## 镜像输入输出示例 +``` +. +├── in +│ ├── annotations +│ │ └── coco-annotations.json +│ ├── assets -> /home/ymir/ymir/ymir-workplace/sandbox/0001/asset_cache +│ ├── config.yaml +│ ├── env.yaml +│ ├── models +│ │ ├── best_mIoU_iter_180.pth +│ │ └── fast_scnn_lr0.12_8x4_160k_cityscapes.py +│ ├── train-index.tsv +│ └── val-index.tsv +├── out +│ ├── models +│ │ ├── 20221103_082913.log +│ │ ├── 20221103_082913.log.json +│ │ ├── fast_scnn_lr0.12_8x4_160k_cityscapes.py +│ │ ├── iter_10000.pth +│ │ ├── iter_12000.pth +│ │ ├── iter_14000.pth +│ │ ├── iter_16000.pth +│ │ ├── iter_18000.pth +│ │ ├── iter_20000.pth +│ │ ├── latest.pth -> iter_20000.pth +│ │ └── result.yaml +│ ├── monitor.txt +│ ├── tensorboard -> /home/ymir/ymir/ymir-workplace/ymir-tensorboard-logs/0001/t00000010000043b47591667304420 +│ └── ymir-executor-out.log +└── task_config.yaml +``` ## 工作目录 ``` diff --git a/docs/imgs/2007_000783.png b/docs/imgs/2007_000783.png new file mode 100644 index 0000000000000000000000000000000000000000..2151d3266a031f65d750576547239288a75af8af GIT binary patch literal 3959 zcmb7{c{r47_{X0adz2)G7QHP}Sx+U3cu$2SoHk2nUUlr-ltdDbLRz&_gtt{?8>u9W z=S*oiNsB^C-f4v|9W;o>lH!n6qh zfQgQ4*K7oU#o*_zvK+qAc5D;_AP4?fzu69_aQuPe2gDg13labX00{wtfCLZ$NJ5Bk z5l+T6e1&Cviz~1|k^qr_L;xW`k`R%QL=fR-Tr}Q;O93tg_)TLC#42=Z=01W^E0Sy5GAPpb^Aq^n~00RI80RsUAAOj!; zAp;>n0-6K_0yF{y3271%2+{~rBw$EDA;2I&k&q!Fg&>0<0Z1B<1SDxl5&+Qv5fIT3 zQGjFsNkNipt6b%^~5)_~*Krn!206{^Tf&>F;22vDY zC_piQVE{!zhJq9W83qyrNfRVNlQc~d1fmH<(1@lHMUV_ZQZ&iXBt;;GKopG_8W9vp zQzXHVG(!>;qA5f$h-MH)kqkvr49PGgMInYl6oVKBWlBrz0RT$79i8oz8s)T9m(AFP z_MQSjZI}=`Sp1)`^DQftc5K#9%?92_dr_L$5jlaszEa=zTVbVGBYtH{a zz6z`FDE(fhWfP>H9KPUUz24;GMxX88Z%Me=syrz9IyhbUX~nOcvv-voT2w0c`E`mm z1O##6%Cp_WS`B(c)VUviDj!TcRQbcV1Fa3Qm)L5p2>I@a^n*x(Lp1{j^G%CB4 z-xqjO@WM;DM%4ebm2Ix>bk!!3QYC6=rBXZj4tz&8x2#Ef)62E|_?%IBlbF zwrr~8@|P)-CRZ%z-Kw))>1%6oQd4XMN|KEfO9Ru+eXsbO+!T}XU~*PbEoXbFmRv$) zm0!s;Zt6rg6}6JP!TYQCF3xecY^XfGj!JExx^YXywP`w^2c3G{^akvy)QEiYUfU~+ zv92g52h_P~?oM^HM`EY$HrmvpX;N)8+djI+a5XoSuhhTVx>M(V5$U$HE}XA^zPRB; z!G!yN>ZXCV=X)P6T5i#3EdC8CtKVYvABdT3CfpI+{X#R1Z6CAtIMNmSBIUFjGcE0` zC+Ub-Kfv=xa)}z!Vs(!>dMd|w(N#!E{8Rf7t2f;tz`6Kz&!Zh!4N;RN#+aE2eJ1v_ zxa2NBE^mK7ZzodvdugM^ks? z&PK}adsJn%+n*_k23o{})%>RiWs>l%$oA3iNVmtwy7Qp_&FP5=IY;I)F58m~E9#@I zyu&8Os>+KVge!ik`m?#}*}KteF`Wr z@VOoXeBI#7viP)>k3i#6@w{P&xQ~9V#gjwy7VmJ@u*r5COl1`Xx0T99);;jiVQyVF zrplLb;n7V$!nPs6$y50F=b&PPP4J$~whFKPo%evd1uI5=5jyUS<(J{wGKvdTvqT@% zE)Lr6y`z5doIlH=sQ!y_^OG0rm&AF&NYiVeC{UXx&P`CdU$ziM2{R&6fcI-_X0hmW zbOqMt@P>@PskX?QabBA{mmm2wI{+j&^gPfXc3^{|AU&Qiu2_4R;H5}iHT#|8hJG_t zm{jof!0yD>^Pg?E4Y$8h=3G@9%3nCLLpn`W^!aUKS#83U4T{HjChzhKy!FxR4D*G- zo>X_5{2I=m%3Oc-{@n&iy}EKI6jfz*4T%=8w{UQtn(QR#%UMYI767h1zZdNJblX)3 z(p0D}U3PlP1lg8*P`s~uTZ5em)Npa@o?-n{>5^K1@34BOTzT+UbS5Kqwrrj(c{;l&$KDULPvuK8~=wE5QFwMfme%~zqsT9sWBgGU;&rWv3)Hw-_^?2~9m!FQ1l$x9V_mFL9qdJ#Y3n`H3Vl?)n6EU}I|)GE4UUoe}eLuD{Bm z*}}%>O(pR)>`%69f>hc~OHJ;bUA(7C_?+*u$>M$zcluG{;Mo&3##%G^y#-+=qUsl& zGwpo1Prs_(@B97W>2zL~;gT0*<7yl9#x}dZhw2Q`89y{NBF1d(N!P4WnQ>w4i5ydjX3WScM{N(G zIBRrsh6z9EP}%9Ou5LdS_0WZ%8uv}iG)>87nYLwt9Fu|2`e19eoe;=TN`=mQPU&G>zn zN>BMvopS@`WkvXXbu-KBTD=WEy!pj@RzSoQOB;s(_|jsf+IuVhqY&+KzgIS}J~oIc z|9f>`!)=WU^g1XeLEN40&dn{RTXf zc)~iyz}YKyk!&!hY<8|i%cx>VePD|m_r)I`eg}(ECOydY`4RSGDf33@LdVhyL+0qh zBTrvw&yj}PhIzHTyzQT=P|qzXL0!gstnzk7TitBhcBi|S_uPKzcfPB{tXw>=uGb{j zSFxP&uvcH(bt%~*bFC;nhvjWvqutK_IDF3maN=*YDOm2@q$&%yva8c~vS<#@>MDf~ z<0pO7`nbHNGEz(GyAK3pDOI-mno8q0IiuHmKC%OHlI)lbk&=etP6ZIXXtz}{GxA;E z6e2?x!HxQVZFwZccx#!LrFn!{xR@`FUU4h&=i}xb2@1UHf$<5u2ZD+fyk{G!4gQ77_nMlE zdd}?T$W~|;EzE4~+Pp2#R}*~|f>{~7*q)=#{d<_2_{)Yz)L(d7?l&)7+2_;QxGqh> zCQh$#DTT@4LT=fj@x)~287Z-j{e&#mEOn72d)7~D(*!N{iZag@~ z?|T)v_JXQ$=cT(f9X~c*=C_D7lkPmZ>b=opA7$()SQfC@%(3aa@2Ny*7eA^Fs+;x| zC2HDg&dI-KuASPTx7q1# zd!T6KdV8+KC^YK8Vf~9$qjTX)p__p+@Aeh`dzEFtNhr-ps&UUn|*GP1qf9=AvKO7Guqua*qv4ZsM-x=g3(s|7E|i>>2p%92>K zbbMX$p`zpKfDeuH?(5>$$<39_f!PN5m=CivK-=QyPGHakNUY%gjDHoH9DtSSx91)6 zX0ardOU>E?IVA_OmH53qsQJnj>8@GkKtgF~xT=CuHV)AO#xJ5+HkVUzltk=_? zsOr;oslz7baO(fYKY|$b=7_k|FIG14eKTkE4oV+B7&J-mqXRvxyyDaT+7ZBwKlPXA zWm6QtLauj)B)?uaGe0mO+oVJm)i5w+8aF`HVI7xU=Ji*l)3=hBk!AuL@g~JCQOC;N jRfkM9{_j{>HVO*nO^akwR1e~>2jFPuvZiPib>M#hSItF| literal 0 HcmV?d00001 diff --git a/docs/imgs/ymir-design.png b/docs/imgs/ymir-design.png new file mode 100644 index 0000000000000000000000000000000000000000..54e1238f0e1a9c3809cee374f9b26f5e9bf597bd GIT binary patch literal 15545 zcmdVBbx>Sgw>}623Be&ikOqP#q=DdpMuR12aEH)9a0}452e$-*TW}9F9-JVJ6D%|y zBv=}EpF`gFyL0caZq?LO%|BDsRnUFTS$l0cYp>_o&kBE|EJyHw`T-gm8o}#V(rRdE z7!crpnAY%O4w>tQbq)A9fm^h0Xh{GjS z10$Y)A;F$&bH;PHY-}uPDrhQcG@5K1*?*zf<~gw2`rG(){ z!eoHuEZqCy`yXRM=wTTjX&w#T`^S7h5t%=P+Qu40>35HeRiI?_o`H22CnR91y zOB8Jz=ietj0#D$63m6soYck*n7lk~>_1ViZoLGy9v|2_5Pc+HNd9DTfskI36M=J{2 z`ew);wQ+n!^Ih9qH!iq2H?$o5{LN+YgB-`RN{>mAl~azH#nhU>^snc^N~TsXZarj5 zEswrnl)SldXBRu;AM*63BG{UY7MnkmnRDZ6YaQab4YUVs^wepOCYJ9~kG!63ARaEY zD1`CBwhTOfEr@;A|J~Ja!C~|*vGD{X8Tl?MyP0r`Wg??(J$BOV$}1sO#MVBZoY;Qg z>wFf=s@Lj9J>Pri-$0J^5mPV)|ydR zNq!?aa^#F_-QNA_EH2lYLu3>t6q4n729LZFDQ!t;897lDP9)f;N9RlgUXHBfoCa1_ zSQLEfvP5}tOmfjKU;~|oa0%1>cJV?|2zWCEciAD=_)f zRdOvmOnMcL=3}{L{20dgoMKe^pLry@A4b`ke!RG@k~8gRs61lt&2mB>iAN#RYHhgT$~x-vry_FNQhov~yWgxvpf$fWSk)LWA1 z9IWylO*AUGEaM7=oavk&VS8>)){}~`fi))Vs39XUDRg+GMiWi*E@J7dO~NZ(BV|hxVq(h?ZYWcG+L2Y)>m$bC4(!L_%Zz4IHCpXIh;3a5(gq z=DGEI+NFZ%;XJkn2!!28pBAmd@jhFj8rFsL0s(F!Hv&~@xd}OXOwC{9zksaEpy2eI zDe~%m@S99T*ZN(&9KjcAtQ^lqgqv=gjNXA7_{VP_eN0#9}{f%2%BIP(_ zRy^tuBRnbcPf?FlM>W|e$#qxWU7d^QDH%-8TRCA(LW7Lv5@OUJa@|!L7zQlhJ0oqw zHOfTjHoY2}dHbdB#4BXQCa8BM3GU#P4%C)5S%B|0u)|y z6OTY&3xNfz;W+n+dJvom+9!mcsSd1?8NZ)Vw5@o+&M%>R-9qimEIAA-?9Q`=>=OFL zCTCQ}ip0(!B2C&jlRQ3q?Nw#8uChd=A_IJarifH?-;z}d3v+(=w;9$~y86Tbz0s|+ zYIL);xt|n5u*EWuU3IXqH+oRYbqgxjIrKJID9!ZwwDnUg;YG-+m`ps$!hK0tVeboi ze!>tLLY&)!TiwC^ld0~Z;p}yR7HY^jGsargVg_X-KTlRqoWC83t(M$~=E%pbA=Q>w z;5nqt#L|UUNYZxVi5>A5TGSHR;EW^+bDYzD^1+E~B=bQyDY{XzoDw*XYGo(9`{N!3 z1y}Df)e@Pu7Bnf!^-wDM!0;;%dSJuvYhziq|ZDynWIK<80 z%z`i~ixs~f3?oYwy2<=-8Jt(9xHGM~OSgPAmGjdevli*+vj>^!w&so!rRKiK)tu zO;;QBOPj9@xV<-Pj^RrwsmEsrkM8Y@ObY)t8yLWf4+l+Ctd5%xGEZ-ht_|P~q4!(k zf22m)O>#K}y?-cM+9?3#cDXMwnA9C}7dhwqefnloE0$3rZNVHuv-{@MZ#U&QW(AUb|o2z)hG z5XIZcNr^{{)r89Fe%EXCB+Kv{&BtAAIiC^VY-@5;ZK7#n_CB5LfKnryXj+cGH|AkC zp!x;)tbV=}E|&jlE<9K5jE{F{AmS%5KjX!az-1huFDA!N55TsF0P9R(sT(qDGJ^v- ztw#ILGz(!#kQ{!eH4FuGGi})_#m6w;{M-kGlQ3K3o62&{p16WTdOXWHpL=8RWc1#5@{m7m;{e_u%RcE$cD$WqYne6Nm$U}WU8I| z{Ikla-eptYcRyF26>n6;_IT(ByN(EcAm*~aTEZ&T%J6(jDOHV=4egfKPvpNJc{MpMsUIZfp ztu4lHZa_hszXGnE_Nie|1p8MeUB%dx2a$9)$ZoWh@#@0x?qq5yMynhFUA<{-a>^>1 zWOuB5mnBnjb?IHL>3(T-g3PwBb-Ph}yK&(LY`It`t}kCN z@~Ku=v?e}~9eNB4GzpcM_^ymfrmUiYoPMF4s>nY9HBmd95DZ(sx~i+des~zz*OA|B z_bJ{PV<>}vv?`iW7Rj53-B%LsA(_~2$&XC-8~I(|T4(rj0S1@0{$8Y%SPZR7CP1v^ z0fEupJuSRGiTR5zM!XQwGyP)8{xx@+XYj+W?+D9*0h-O_H4cLK=tpw{Rkz4)M<*y! zka#@8A|Wi*NAw=mrzjxDf?Bc-Db_vG%V=E^G0$Gk5<(`|3ovJfv^@9zE+<-R>IP+- z#C=wJM$@FjXjMA@n#&zV>J>ztmK#A-nw-I|S=z1K33m%sQJ4-Vfd`yRC$bj}LPIYx zvQAbw#>C0zWGY&!o>J83|76q5ikX3i*_COCVl2v~pmiyKE_bkQnm4nm9b%@V8V>9x z@SE6X)E%^M_?AX8px)Mz!somCrGa$Pj`(nYT_Y1T?B~10SK=f;^ucuZ3M>K%2SE}n z)?f9RtYps=P8Kd-8}}&Bh9l%eymwn$60|ss%{9kYY)yILah??5Tzk-bnAK{*k_wJ8Jm*Y_gk`jaI zsKlJiSzu>I6bs8m`fcbLqm@nu#Bk5g-}vtKeg3u_*RgofJye%liv~0_C@O`D{+*Hll zS4d+LJF88_Guo?jhcqH100`8jOz{P$w%05^UpH!xf}G2eR=OXL9Py{O4}1(27AsaS z+}V_kpCp4^?P@rz!P0N)EWeU$^dmCMIdg+75UB<{m;ItIG~YX~U<_@?WLE#0ul7n> znm2Lkq!;7)>Ir*cnIZE&_R1G-$BApH%#V%Q84W-NAW49@o<}WnW(35Ypc`VQkG3>_ zrumJONncL23Vc*8o)2(J^JkDjKM#Kr(hsGACanU~fa!fs{c)SH^kal41SI*}|zq@WyQbh{^~vD~)Se0w$SmGc5IMERX!9tj39 zWCQPMx3Uh2j9pcifSiuk0URbo0`XSDMEuAnjooU|Ey2-TF4cf{cB~ff=msKj-4@5Q zU5O#WI(>!0Q0^mKm&AwwpK8p7d?=e}W7t6x>`T88Ift{NYiyiI7v0GCh~=ZEtq#=m zl1oV;O9E20B8a#`sdkGWtg-=jD{{LvsmZbQMLEDz2Gn;g;*i{>*mB$E-TC$pr)?Qv zj5ZftA*S5;ZWjJN$8`?D7ekwfEZ@8UEJG{HHgT4)Obzr6QuX@ljC$}b1;}{!x&47D zpL74cx=i|S6Y9sP-Yk6X#nHSm&T@-T)7}?9N4AL26;Y>gc6BAqWBS6wBDJly0~F9=yoT9%p@-RiW^ENLP4<=EaUc3sKsr{C&Y$y6% z4OQv5^e9zDY~eMq*}v|qgX>r(SGc_Cj@e4!&icTc^+QADbYE1l!~$_uXyQmcP`^Vu z9r!=z=o;>jl1lkwB&%iVwaFy4u}t^!^i%!B{?AL>WviK>);ze@cH_}RKcf_?$nT+c z3zAniIAiV=r(bLLRzD6~CP<8>CrK?rTkblZQOgy03*$K{W`JzqrOnKkAbzCDX3FLf zChAXYZ8Irke2rrCqZ982LT7N8`;)p5o9kdq*Dse_XJcvshAbmblMEL8{hxav;j5(W zcpr@s!u1i#R|`V!%-8vjCBKKJw1D84bqk}rxzA5y2BC)X{pQ)BOw{-)99b6>onwtFM^vgZS!1N6%YqmI*o@37K&C z@#8=xC|MNZOqU{&i&(-U0RDd-P9>q zVEnU6>HFqWe&fT}72KUWIV41iEEIAF+_xPj$5hV-BvEJMS)FPRx|DUmn=c{m+)*=Z z^7f9%sjP+V1mtT?udr#24|_hJaV1dF(--KLmx7^$>LX3E<5$H@rf?keB8rk^tjUeE?#o4=Q_+NH`ia;NS38<=vq9S}!lN}@z`Yrdpa}Zme}{0Q475ngp1oyZ zZ0_#5>O9zV*t~5;HkA5kb4zv_c+GEIe1JAygyugC%c#^*tA;s(5vxLx1QjvfXAal+ zy`gS$yy|*dg8g@{7vf0$ch_qH*PUtVb5>#?G3;7T9Ii|{=h)fQ`ab0zb;gRT?O)RT znjh`WTq}{C|J#z$e{U%lwzpZjL^M=@6MB*lxt&WfbNcmXZ`bE7c63X(Rc&^SduS)H-d= zW#4;JD4zRJ6qv^itNJ&S!upX?MGV*t+^W?M6EifwzJ`_rNJOD^5eiHF#C_}3+lVVt zE-`_ksXg(W@%Siozd;%eieUNJ%j$!nLpx?s*ct!#!BQ!DX^@ifH z5%XWlmbYZW=*$Q)I7$4GPPoifud~o7LXW05_sssfvh@Q;RgTynDM{Ze4A|2Z3@9WA z(yiQ|Mk){@9vt+f`x2-IOw#E*6|_jH3zUmH&tF$uH-d_ofJ4}eio zwW%n~z)1=6R{-#s0Yk8sU2=`# z7b!47NfL}{N5C+@%E};Ak^Somp%1YuCUEH}pzO|`E1A1qz~8Ytt~q3ICUy)*>xxle z_@s0J40GOyawuEwoh#a*VqwC8z_eXR{J)>Jb~3m#V$6Y2?&OnlLahTYs-OioUI6Xn z7|}XS3aP9RKN-?`TIuj^Rg?^Jx5MZT@p*b)#3eleLvdi^MJowaB_g`tTk}68${yH8 zfRKU&K~@L%dmP?{1S0#QviAA9Vt}pkql^w}?IDO&p)ij}0S6vWC0?jvdL_(~Zy{X2 zjR#+ODegG?gj3^bIk@ssL9kLy?8|%2p=8t>zhoP&QuQJyb(Gm%i@X zszC}5zy6^_pU1H8cE)~sNjgCIdbj!*#nHX)h=AX*DpX+aTn{5(#+`Vq5R~MZ^ z`uPoZKq#REZ`P?A6shUyzU5ZLOA{ah!gcb4zriBKEo1EanXCMRS)G(m@&_(D*dXk9 z3qxjNs!re@^#6Ge;E)ejD(fdpA_mY;594~c*uu9U zm~TUnUQKiA+EA*rhp5+R0l|pcw(xP3R&$ldy<@89B41^$278WeiY+<}`J4}QQcw1t z3{Y?xwuBG2ZK&zBKyeAS#^)3J&9&>YUsqF%z1DocGPdX3B!2!R+nde3n!i}MSl?<4 zNx&+OIspK^-oKC?B&zoEo|jv0l93Lj_H{M2V78TW#Kv1E?xA!Zf#DdrSh{SdQ-VD4 zxrxaQ9Az;Rcv^hj`&6@dmRa}d<@1T%olhFA-dJke`{$V|E3ICKwSH;<{$_hPTkIuf zI9_)TK(CwN~lK8eg3VM}g$LZ4}BCbCztbd|3x!Zgp zroWt5SlMLRbx+;YbgY?GCHT`|J`6E4Nr@g93Z+cowy-B#c13l#*C zlcSSh>Xh0>#OgjTt~VZE7cKs}gx(w3a_c+5jht75$L(e)CZ4g2uO!)e-f1n^%ayJhH+@_f#RXfmTTh zh|#n*fWHZ~0{N%iir3b^Ag4Cnogs#GJzNmxGTQqhl&h=hZ^I0n`1Yt5iuydbNA+4? z$_!+sL=V!#zW?0b|5J*DA2tqpdb5yUACEQ&dSLfOJ|kkhseDkTS(v%aoL+1pw*Ve= zh*uUao0WA^dnatg`rlG+`g$5gqZzx7gY}Kg$$@25zXQ%eT~bK~%m~|qQ`e{w$w7Ya zTnuy>A*bkrw*q&$pdv6`cvNi>I$&y|OAYTF2}@qe>ejbmfhm`}>e&XGJw9HXZ`wG} zn(FchO@H2(=+i<9k*wo7&D9*B7$pa|B668n8fcREmUqbmMv}Jlo?-ow#qfqYPxSso zpNyjxmod!g)~(1e;gf08ftV2XZ{N6gx#~fg8OEON>dzuAI~)Dp{3cW8TOge(_lR3? zcXj?w!}LrH$O3CkSq_JL)1*3Ol`Ih_ZlrZ(U07Z>!AXhM5QQX3?QCD?i~T}+tGANc z)HAy(xH)W-nUzVfXG0WNDygnpnVAe>@_#^M-S*0C!-}YZ71^F&CvFQ>a|O$kF;4OH zxhd*&l|GvdhdZXTgYQOSW941<$nU6+TuvkELMp;n6vTw#V~&9|jJgvfAEQ}<|})@MU%f&L2% zEt-=zrdVoJy>_{I>|FD^g`!&0r!iTQ;@3xH36FDtgE3zc(S#`Qc2^8N%iD=4wWjpEr=!8`K!+z5QvEBK4J;KAl- zSM~KwZYO4=It}RhrshCYy-)b)ax%!HTBAj*cv-lLpA(qvM&wlU^P5&FiNk-ur46{?#>3ZF$UYK_LA!iN;SDQp1 zv?|Wd#AAPL*X-s?~Ja*(^EfnJRgcPL{{x0 z9MVt}+NXXB)!UEv$^ti5A)L3n1C&(yRy`6t8JMqs_Dw(G#WvVHez6C>t!g_bsI_qx z%9!!|ux;FZGG>obD%Gpi16L+czJQYprgn3AIZc#5+vJ+6-?e;ZuWskIVJ}z+VodN! zaRuP@MtrOrbs;>?&UF8^EI&iapD)VqwybDf;cDG69tSHTI)f2KU)5^$P|`$RZU>%6 zIV3?|x&nfl{#C;(YHXJg$R(#OX6#U7c_I7A?wCn~KdaqXE2la{`<_>N88PG)&%8{2 zJAOo>Lg{_Ox@QWuI(>W0&e>sQF)<3JR1YF)ZYqC$^yl-yYhGHjmz6NP#EIG7@;*{@97J!R3!-e9h}rX41n(3O|qTBPb-BqUmc$+BJ?$R@v& zi{X$pMCI8S?~L%My8Z)p#g}534izwFpG?z;6bgF5&d%7qpze{#t&;nPFSII1-=cS+ zPk2ulD*J=41_qPQYW4bDsFIYmml$TXdy3~$dhiUYWC);aqg#@-6`n&M5|B%Yy#@>` zkjc3(JuB14e)Wio3TP;ed8>;&L73T_D+;c{mlJrsFJgk~xz9|+4jr&hD^BaE9Ew8; zuep?8q;$v37o!DmEgJ7NQM`_Q*<|;yFy7c|+?;fKw{~8-$kFy^wT)H}svrWS`*C)D zBG|~oO3J`7_lV>8pi#ry1+HHL>pe|DPHmMd&aI@2BPLmzZl5Gr$n|7;N+%pv1MDEz zOE1_iPcy{&dG6NqQYU^%y~1{i>1ExLaqnjZem`X$IzE zx7R+ra)-HpUkQ$?=Umm&pkrQ}b!73C-I=E1@ysEq>34#7KR|e&zvaT6S5@Jx*H$7?ILrmcKsY_>cG zYO&duzQ+6qwp-az7N)M6(K>@e=kjcQ_kUgbt8oM#L~*W+6(fO7#KUMU*7IX~F|tG8 zC!CI0W$ zRershLU=McZm(=ZaCw(k51-nV4VKUh7Is!yLZ4$*`+d`VhMeMv>lnm2At*4MD<6S zGOptZLH6C{`N2NdOX=lmZx?MpofaAZkYOYS4+aGLzmQ>X?IVSN9bKSLEw`o~d9cs; z_ktlQ0J0R^ArSgHxWTWaa`1OXwTy#Kv(3++GdJHFi*B#g`~==xHDphJ)WbTs$VyT< zK-O2?rSK-kelILaA0h?ySFfK=*TJh4+u(2_A(tf{m* zBu1sB6pN5SQZyCX3Oo0v+&rGkY?EO(3Dr`e{`?>9iP@W)YvmthajG9;T%1=6j*XzH zTqyslZn@mZ`O`RVV?+67hMsO(A<*p)A!<7}zTCY+>0h80F^qX2a1C^E+Y|pw;ZM69ny*c9 zTEl4)Ji4$3*XN?&POvU~(w*8JeZ>mMrT!vydfG}aHz*Gz|K}#)fE>=V;N}<|{7$>K zNPz7XM6XfC0U8$8dAjP_1$|_Hud@|6IK}yJA8Ly@U;kd;5f~zY4hAyo#gweB*T|lq=~;&&(N1XXNqyRMIZVV-LD3^ z)E~3tRzgn*Fia;NV*zu!^oQ|L(o%J?VdQh)TvMA2{gM{sQhhH#`^hY_L)XvVn_De< zjdYggz8>i^D-O?=S5=A{P-`C(dD>I4h8^|lxRw&ay{o(gz1X?j-AvXl#EYi)l#^C0 z^5f;!-ExpBrFgUe0z9MEwj%Fw*ifR4zz%td@^;>Mt|{JByEHGfHmR!ubR_t@gsLSb z&<{PT6`ph$fBl0ML2B5~mV|6#$nt#3(FWtn5joWLXe%)pd|ah%Wj%0bc8qwOi=(H) zS0EA^EyV;ohQP3_|V9@wceou)? z^rL(XZN6?SSFU7BBbE62G1Xrc7pKp-ti>2v@QT%KKtQTTs`~wotc8+OTcq3ZkoY4TBWgM$_b}YyI-nfAeL2XHsmB z?TeFnT}naOgV{S!;gDe*NOPYJp&{F3a%m2AIKkegG#t9l)?0G-Zv%+%IzKtE+%XwSC% zCR$AOrI6r%ba$WLOs^*m6||N6#yp_>8s$@O*^`XPZ8jxOp#tdrtnsq>Jn~fzrr2}= z9Sgp&dB|Nmhb~6Z#_RkN^2D!{0M#LX?&qh^0}7-+^93Zu=T8rOhlYxm$c6f8S12oW z{;4)yXXC4|B#)-kkSG>`pg4qE?8~ECq*6w!eW{^{RJ@*&%a);m{Xf2TpG-9X61Cy2 zhvJtM$s=ghXC&36y0g-NLf9?jn;v=NNxy>Wtmd<<1!6MDh@EG`8i%e7PTLJVznwSh zog}NuiFBoN#P_I=j(YUAPN#9qKGxIRsuji2bu%~Q|y{$Z=b@3FYH96(6-N)6&k;4It#lrFt63Z zBlqFfLI5jv-7!$etuyo!zY)2L1_aF#1UkJwNk9>@D86?&%z7DbatX)J6iEEZ0`yeA`Q^iFN-G?Bo?B zeJ}=r3r3|bRNFdERj;uLRxAGb; z8i6^pMXq3Z%0@qW$CX95<;unW0JgQUF)U|j%7Vw~lYxCVtrb>2J7p3On6p56n^R>5 z)0MV(<-F?I?2kNMyv;oJZ(S=12x!(#XEZkleeKSbb5&Kt*R^jS4m)&QOGB?yW3^YN zu2UHiPssK(QN63F`rBrnUVkV`be@KuS!B9^u37k4t_uDI>v2ke9Z{FjRcPs6I1r9J!is^CaT;XSNEx( zn?*h5nQ&tEC|OYfN%62!^9#im{KHE|MJrIqAMkXZAB6f@C>ORy6`98VQ7!i*@VuR` z-yjlnjZPyg+j?^N;MQ0=9i$qid3s6RQ*o*IDfbf78zA{40Zox;HhDFj@oO1BAHLzz zv0|^*#~<9^uMoAJPnM{2?I@`ag)KgsmSQ@0zM$Zk{Q=-waB*~&a8g~%xgJ=Y(_v)C z5TLXd5o&8;eLO5VYHZIVLDhwx?16(%h_{Bzkl`)zMdjVfD&phTGyYX-fIdfpy)sNg> zc|mHeyn;SO4P0!eMB|%uk(FdvgdXbFxr-e)d6@+kB5cW;Io|zL;MRTDGqSY_=!%?& z?PT57+zMtriFkSf?h{p8@PbBSY^7@3ifsnRzjJW_v>V;J$EW6L{mY{g^P1Dh5mFJM zIRQEZk9cy+*D+Wa==AdAI4!qtM%>FrThhA~k~Z@{#74z=tIxM*FR^|vawikgA?ew8 zP+c(@k5tDGM?|t0ofkVN8u~o2*H0HcBIz1$?U{P~7p4D^s7#hG`kw&I+PXkfG63pd zH$SRGE(!0?vbDKJD9}L>^PG`=V}_aO`T)h_la831(nho?A?S2MvyQGKc_dxK7pcP31XtW*U2uclGm2^_Z7XKIH*ls~UqL-zlOsjqH` z0Skjrdy(^vD(frd&W;}1vnLMkuM+!qT>0Ba`>UC_CAHYNU)-unUG#_ulS=n3P6C<*9^awM9$-$jz% z1E70@s?bf`v%kbqq+Cep@N$T_HF2(Do#5QuQ1XjEf-iU;75W`^1FtC-Je#_v}1KNvG+8dmd|;2$7Y z7wa2x2x?pFGL|OI9f-5iSCi;&>e-=Y&F^jL5es%+fB0AQ9@v_iS1d}R%wk35Vah<1 z?<8CNSI|`K9UKwT@F_Q+bfQ;Z9+2$@J<|1dv>l=%ph1A-G-LD9x|OTiUu80zADEfR z$rI8zL1?pbdkCH0I9JunHuPk;y~sNX^-qI_)(4+6IobD#K;G>}yMI2*%bm;Z92wX( zzZ&@T{zmx&A!1Ckzb`y8DZZTRM7fyj32=se^sE#(cCSLvu|U{%n#EX2^a&KtB@8DFeS)Hu`e4N8bwlKu;rUf70^2%`Ap4z~`P1()} zx3Pckv6`{bAUCAX=v9}ZJGWC?=Sx(>$U~t<6=0y4IBkvzKC#uLsQ2`=9PjU*DHDFP zu;|?gmo*?jY*!|Ad!K)Chb{Pcl|KH!ywDCV!_4aXY&rAbCmh*=SvcpLdChrifK2(TeX&MjF0Vf3ir>AWH++4!YR>~MMrc`B z+1{>ChYJe=;ol2Q1hT^`6SH%dmIXXzeM8B>h^Gytf;uZ*I8R4J1GcJ(eTDM*$u z^bZ3Y0P3!L@D=M&`8cYs7~?!uWmX^osL~{|DFFdEqOnKKIhL0T~QVoqdtc(fvwf2O<@%r=&^r-YKeKfb5%x6mZ16qNEwW zele8YIdS-Pl+SPLRY|(~QyNW)6hH~hKr95H+)upU5(x;SkyJeP*h-!*Bm~VZ&!XvP zv=vOhjNRSzg$6*=^I(dh3jl6aY8PHimv7GQwid1oB;@<-mWR5rrCw8&W1|!A0m{di zk#BnDMkmqY}a5cp***i;_vP>5;){EaO zccb3O6#w<{PX0#1U9&bcVT5ul8~#@r0%))l?j0=6k2X8xTmbh`-y!q%S4E6g957^Q z{A>d`$ZgI^1OC!O*m2LMzx+L80iia+rk^Z^BqjeZUOUE!4o8<2?$K_C9Y0LJld)!sE6#tsy373mpO zz@$(c=3EmLcXwjQ07&W0nuNLSom|4^pW?8*JNRVh;{HPp7zeTMvZ{9;5DNfb0J`VJ z>^(S{a#N1x-H*TUfCh}8=YGToUJJAP#IZp@*^|V-+M$aY5LAv{KfB=X<1c{619)-S z&F;h_^B8~Y!x-#s zEk9m0y`83RyeV0msFa*~~ZEu&cer2mdkuyMBW^#QUP7^B7rq?g}7Nfm};K*49YQ zjt|T$4=_qe0gO$SByZt7KzW<%wg6u5)}Ya&lVy?X zs{ud}8Tl#p+{-t!MK;LOmACn|7JYRC_iY-M8o(c@9pa_Q4(M2N0PDjHSS->%k1QL~ zFf|+^biWk+c5DnDnELT~^qwg+xVv}Vc&8AznxZR?GnJcljRVR>;N<|+;+g21zjj#A zQx}C4P6`KK#HLHvDhmm%9XrLNAN*Z7Uw~DFTVnpEqMht5QJ)VP9SaGynh6BnX_V3| z)>&+np}u+LJe(8Qgy^1ezx_2!hIxP8l!9$}x>&V^o!=hGFbrYZBnU7iRj16DLLv{g zVB^QQ++qPZ(GTv*`6NQ_A~|LlB3DUb*(LcU&j?;5)yc5!>`gYhVqogI1wX4?eqY7w|W{Y;)96@O~Il zRYe-k0Dr(|bc~ABs6Q#LS?#k}?urpomfJc;F0_)AuxsF-E6gJ>rOkj9N$0?_BPEfyMNE#yfAmgfIXrD&nqq z)F1DTP5}0~90iH4VJsz@Q4TCApqR3J8S22S92y&NxXA{|&Zc{;J>Cq|>cLG{QIqEz z@uIRfSJmz?;J~6W+FUH!oSYeCh?nyl2n;}Mfse24C5{b${s>+aP zYwh%rj0_KLe=SfY!?WsX@QPwWl^|7H6tv{VhoExgN}2wW|HPK(Ko~*iy? zUS;qBjBHkS#@|J&zn}k#@Bh}!`b%5@ogcFZW&5|<6%CW`j#&-(^hV}C!38L2l|bc< z5dK^L3B*&&JF@p4Q+C9E+1&&p((5cxCgy*l&<}rLOTd /home/ymir/ymir/ymir-workplace/sandbox/0001/training_asset_cache +│ ├── config.yaml +│ ├── env.yaml +│ ├── models +│ ├── train-index.tsv +│ └── val-index.tsv +├── out +│ ├── models [29 entries exceeds filelimit, not opening dir] +│ ├── monitor.txt +│ ├── tensorboard -> /home/ymir/ymir/ymir-workplace/ymir-tensorboard-logs/0001/t00000010000028774b61663839849 +│ └── ymir-executor-out.log +└── task_config.yaml +``` ## 工作目录 ``` diff --git a/docs/overview/dataset-format.md b/docs/overview/dataset-format.md index 7c0ee28..d52adc8 100644 --- a/docs/overview/dataset-format.md +++ b/docs/overview/dataset-format.md @@ -114,14 +114,19 @@ ymir平台导出的数据集格式,其中图片格式固定为 'raw', 而标 ## seg-coco:raw -语义与实例分割的标注格式 +语义与实例分割的标注格式, 参考coco数据集给出的格式 - `export_format = seg-coco:raw` 时的训练/验证集索引文件 !!! 注意 - 此时所有图像文件共享一个标注文件 - 此时训练集与验证集共享一个标注文件 - 语义与实例分割标注中不包含背景类,即只提供项目标签的标注mask + 训练集与验证集共享一个标注文件,需要根据索引文件进行数据集划分 + +!!! 注意 + 语义与实例分割标注中不包含背景类,即只提供项目标签的标注mask。 + 如下图所示,annotations中可能只编码人和马的区域。 + 用户可以通过超参数控制训练镜像是否忽略背景区域。 + +![](../imgs/2007_000783.png) ``` /in/assets/02/1c5c432085dc136f6920f901792d357d4266df02.jpg /in/annotations/coco-annotations.json diff --git a/docs/overview/framework.md b/docs/overview/framework.md index 3bfcc9e..b6915b9 100644 --- a/docs/overview/framework.md +++ b/docs/overview/framework.md @@ -1,4 +1,4 @@ -# ymir镜像整体流程 +# ymir镜像简介 - 从数据的角度看,ymir平台实现了数据的导入、划分、合并与标注等功能;镜像则提供代码与环境依赖,利用数据训练模型,对数据进行推理或挖掘出最有标注价值的数据。 @@ -6,6 +6,9 @@ - 从接口的角度看,约定好ymir平台提供的数据与超参数格式,镜像产生的结果文件格式。则可以提供多种镜像,实现不同的算法功能并对接到ymir平台。 +!!! 注意 + 与其它docker镜像不同,ymir镜像中包含镜像配置文件、代码与运行环境。 + ## ymir镜像使用 - [模型训练](https://github.com/IndustryEssentials/ymir/wiki/%E6%93%8D%E4%BD%9C%E8%AF%B4%E6%98%8E#%E6%A8%A1%E5%9E%8B%E8%AE%AD%E7%BB%83) @@ -27,8 +30,6 @@ - 镜像名称:用户自定义的镜像名称,注意名称长度,最多50个字符 -- 关联镜像:对于单一功能的镜像,训练镜像产生的模型,其它镜像不一定能使用。如采用基于[yolov4](https://github.com/AlexeyAB/darknet)训练的模型权重,基于[yolov7](https://github.com/WongKinYiu/yolov7) 推理镜像不支持加载相应模型权重。 因此需要对此类镜像进行关联,推荐使用多功能镜像。 - - 镜像功能参数:为提高镜像的灵活性,用户可以在ymir平台上修改镜像的默认功能参数。如 `epochs`, `batch_size_per_gpu`,控制训练镜像的训练时长及显存占用。注意ymir平台为所有镜像提供额外的[通用参数](./hyper-parameter.md) - 训练镜像功能参数:对应训练超参数,常见的有`epochs`, `batch_size_per_gpu`, `num_workers_per_gpu`。默认训练参数配置文件存放在镜像的`/img-man/training-template.yaml` @@ -41,7 +42,10 @@ - 镜像目标定义在镜像的 `/img-man/manifest.yaml` 文件中,如此文件不存在,ymir则默认镜像为目标检测镜像。 -- 添加镜像:添加镜像时需要管理员权限,ymir平台首先会通过 `docker pull` 下载镜像,再解析镜像的`/img-man`目录,确定镜像中算法的类型及镜像支持的功能。 +- 关联镜像:对于单一功能的镜像,训练镜像产生的模型,其它镜像不一定能使用。如采用基于[yolov4](https://github.com/AlexeyAB/darknet)训练的模型权重,基于[yolov7](https://github.com/WongKinYiu/yolov7) 推理镜像不支持加载相应模型权重。 因此需要对此类镜像进行关联,推荐使用多功能镜像。 + +!!! 添加镜像 + 添加镜像时需要管理员权限,ymir平台首先会通过 `docker pull` 下载镜像,再解析镜像的`/img-man`目录,确定镜像中算法的类型及镜像支持的功能。 ## ymir平台与镜像之间的接口 @@ -60,4 +64,4 @@ - [超参数信息](./hyper-parameter.md) -- [ymir平台接口文档](https://github.com/IndustryEssentials/ymir/blob/master/dev_docs/ymir-cmd-container.md) +- [接口文档](../design_doc/ymir_call_image.md) diff --git a/docs/overview/introduction.md b/docs/overview/introduction.md new file mode 100644 index 0000000..83acf37 --- /dev/null +++ b/docs/overview/introduction.md @@ -0,0 +1,79 @@ +# YMIR简介 + +YMIR是一款专为规模化生产而设计的AI平台,旨在为算法开发和标注人员提供端到端的算法研发工具。YMIR平台可以将企业的数据和模型进行平台化管理,降低算法开发和维护成本,提高数据标注和模型训练效率。除了数据标注、模型训练和模型部署功能外,YMIR还提供以下特色功能: + +1. 数据挖掘功能:YMIR利用主动学习算法,可以挖掘高质量数据,并仅使用10%的标注量即可获得接近100%标注的精度。 + +2. 数据和模型版本管理:YMIR系统可以对数据和模型进行版本管理,支持历史追溯和复现。 + +3. 项目划分:每个项目都具有固定的标签集,用户可以在同一项目中进行数据操作和模型训练,产生多个数据和模型版本,并对不同版本的数据和模型进行对比分析,提高工作效率。 + +4. 可视化:YMIR支持对数据、模型训练、模型推理和模型评估进行可视化,方便用户理解和把控AI算法生产的所有环节。 + +![ ](../imgs/ymir-design.png) + +# 安装简介 + +详情参考[官方安装说明](https://github.com/IndustryEssentials/ymir/blob/master/README_zh-CN.md#2-%E5%AE%89%E8%A3%85) + +## 服务器系统 + +- 推荐使用 ubuntu 18.04, 使用ubuntu 22.04+ 可能会出现glibc缺失的问题。 + +## nvidia驱动 + +- 推荐使用 Nvidia dirver >= 510.47.03, 以支持cuda11.6及以下镜像 + +``` +# 测试命令 +nvidia-smi +``` + +## docker & docker compose + +- 推荐使用 docker >= 20.10, 安装参考[docker install](https://docs.docker.com/engine/install/ubuntu/) + +- 推荐使用 docker compose >= 1.29.2 + +``` +# 安装docker engine, 此方式经过第三方,可能有风险 +curl -sSL https://get.daocloud.io/docker | sh + +# 安装docker compose +pip3 install docker-compose + +# 普通用户添加docker权限,重启生效 +sudo groupadd docker +sudo usermod -aG docker $USER + +# 测试普通用户使用docker +docker run hello-world + +# 查看docker-compose版本 +docker-compose version +``` + +## nvidia-docker + +- [安装参考](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installation-guide) + +``` +# 测试命令 +docker run --rm --runtime=nvidia --gpus all nvidia/cuda:11.6.2-base-ubuntu20.04 nvidia-smi +``` + +## ymir安装命令 + +- 安装并开启服务 +``` +git clone git clone https://github.com/IndustryEssentials/ymir.git +bash ymir.sh start +``` + +- 安装完成后,直接访问 http://localhost:12001 即可显示登录界面, 默认用户名为admin@example.com, 密码为12345678 + +- 停止服务 +``` +bash ymir.sh stop +``` + diff --git a/docs/overview/ymir-executor.md b/docs/overview/ymir-executor.md index 5b654e9..a390210 100644 --- a/docs/overview/ymir-executor.md +++ b/docs/overview/ymir-executor.md @@ -17,7 +17,7 @@ ## 环境依赖 -假设拥有一台带nvidia显卡的linux服务器, 以ubuntu16.04 为例 +假设拥有一台带nvidia显卡的linux服务器, 以ubuntu18.04 为例 !!! 注意 如果apt update 或 apt install 速度缓慢,可以考虑更换软件源 diff --git a/mkdocs.yml b/mkdocs.yml index 8f2ea06..1592a16 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -23,6 +23,7 @@ markdown_extensions: nav: - Home: index.md - 基本概念: + - overview/introduction.md - overview/framework.md - overview/dataset-format.md - overview/hyper-parameter.md @@ -34,6 +35,7 @@ nav: - image_segmentation/simple_semantic_seg_mining.md - image_segmentation/test_semantic_seg.md - image_segmentation/simple_instance_seg_tmi.md + - 快速定制: fast_custom - 镜像社区: - image_community/image_community.md - image_community/seg-mmseg-tmi.md @@ -46,3 +48,4 @@ nav: - image_community/det-yolov5-automl-tmi.md - image_community/det-yolov4-tmi.md - 算法仓库: algorithms + - 设计文档: design_doc From 5bb5b0358eb5014708a5efaa30cc566880aa6179 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 2 Mar 2023 15:41:28 +0800 Subject: [PATCH 201/204] update demo readme --- det-demo-tmi/README.md | 10 ++- docs/algorithms/yolov5.md | 113 ++++++++++++++++++++++++++++++++ docs/overview/dataset-format.md | 14 +++- 3 files changed, 133 insertions(+), 4 deletions(-) diff --git a/det-demo-tmi/README.md b/det-demo-tmi/README.md index abccece..36853ec 100644 --- a/det-demo-tmi/README.md +++ b/det-demo-tmi/README.md @@ -1,5 +1,13 @@ # ymir 用户自定义镜像制作指南 +!!!最新文档参考 https://ymir-executor-fork.readthedocs.io/zh/latest/object_detection/simple_det_training/ + +此处文档为ymir1.3.0时编写,现ymir最新版为ymir2.1.0, 相应代码中的接口也更新到ymir2.1.0,需要安装相应版本的sdk。 + +``` +pip install "git+https://github.com/modelai/ymir-executor-sdk.git@ymir2.1.0" +``` + ## 目的 此文档面向以下人员: @@ -137,7 +145,7 @@ ymir 通过 mir train / mir mining / mir infer 命令启动镜像,遵循以下 2. 镜像框架相关的所有内容都在 `ymir_exc` 包中,包括以下部分: - 安装方式 `pip install "git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.3.0"`, 注意通过 ~~`pip install ymir_exc`~~ 的方式安装的版本不具有 `ymir_exc.util` 包。前者在后者的代码基础上进行了扩展,提供了更多的功能(如 `ymir_exc.util`)。 + 安装方式 `pip install "git+https://github.com/modelai/ymir-executor-sdk.git@ymir2.1.0"`, 注意通过 ~~`pip install ymir_exc`~~ 的方式安装的版本不具有 `ymir_exc.util` 包。前者在后者的代码基础上进行了扩展,提供了更多的功能(如 `ymir_exc.util`)。 * `env`:环境,提供任务类型,任务 id 等信息 diff --git a/docs/algorithms/yolov5.md b/docs/algorithms/yolov5.md index e69de29..19bcea0 100644 --- a/docs/algorithms/yolov5.md +++ b/docs/algorithms/yolov5.md @@ -0,0 +1,113 @@ +# yolov5 代码库简介 + +## 安装 + +``` +git clone https://github.com/ultralytics/yolov5 # clone +cd yolov5 +pip install -r requirements.txt # install +``` + +## 训练 + +``` +python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 + yolov5s 64 + yolov5m 40 + yolov5l 24 + yolov5x 16 +``` + +## 推理 + +``` +python detect.py --weights yolov5s.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + screen # screenshot + path/ # directory + list.txt # list of images + list.streams # list of streams + 'path/*.jpg' # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream +``` + +## 数据集 + +参考[yolov5自定义数据集](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data#11-create-datasetyaml) + +- 数据集配置文件 `dataset.yaml` + +yolov5通过读取yaml配置文件,获得数据集的以下信息: + + - path: 数据集的根目录 + + - train: 训练集划分,可以是一个目录,也可以是一个索引文件,或者是一个列表 + + - val: 验证集划分 + + - test: 测试集划分 + + - names: 数据集的类别信息 + +``` +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/coco128 # dataset root dir +train: images/train2017 # train images (relative to 'path') 128 images +val: images/train2017 # val images (relative to 'path') 128 images +test: # test images (optional) + +# Classes (80 COCO classes) +names: + 0: person + 1: bicycle + 2: car + ... + 77: teddy bear + 78: hair drier + 79: toothbrush + +``` + +- 数据集划分索引文件 + +每行均为图像文件的路径,示例如下: +``` +coco128/images/im0.jpg +coco128/images/im1.jpg +coco128/images/im2.jpg +``` + +- 标注文件 + + - 标注文件的路径通过图像文件的路径进行替换得到,会将其中的 `/images/` 替换为 `/labels/`, 文件后辍替换为 `.txt`, 具体代码如下: + + ``` + def img2label_paths(img_paths): + # Define label paths as a function of image paths + sa, sb = f'{os.sep}images{os.sep}', f'{os.sep}labels{os.sep}' # /images/, /labels/ substrings + return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths] + ``` + + - 标注文件采用txt格式, 每行为一个标注框,采用 `class_id x_center y_center width height` 的格式, 以空格进行分割。 + + ![](../imgs/yolov5_ann_format.jpg) + + - `class_id`: 表示标注框所属类别的整数,从0开始计数 + + - `x_center`: 归一化后标注框的中心 x 坐标,浮点数,取值范围为[0, 1] + + - `y_center`: 归一化后标注框的中心 y 坐标,浮点数,取值范围为[0, 1] + + - `width`: 归一化后的标注框宽度,浮点数,取值范围为[0, 1] + + - `height`: 归一化后的标注框亮度,浮点数,取值范围为[0, 1] + + - 标注文件内容示例如下: + + ``` + 0 0.48 0.63 0.69 0.71 + 0 0.74 0.52 0.31 0.93 + 4 0.36 0.79 0.07 0.40 + ``` diff --git a/docs/overview/dataset-format.md b/docs/overview/dataset-format.md index d52adc8..4c302fa 100644 --- a/docs/overview/dataset-format.md +++ b/docs/overview/dataset-format.md @@ -45,16 +45,24 @@ ymir平台导出的数据集格式,其中图片格式固定为 'raw', 而标 /in/assets/56/56f3af57d381154d377ad92a99b53e4d12de6456.jpg /in/annotations/56/56f3af57d381154d377ad92a99b53e4d12de6456.txt ``` -- txt文件每行的格式为 `class_id, xmin, ymin, xmax, ymax, ann_quality, bbox_angle` +这个索引文件采用文本文件格式,每行包含一个图像的`绝对路径`及对应标注的`绝对路径`,以制表符 `\t`进行分隔。 + +- 标注txt文件每行的格式为 `class_id, xmin, ymin, xmax, ymax, ann_quality, bbox_angle`, 以英文逗号 `,` 进行分隔。 + + - `class_id`: 表示标注框所属类别的整数,从0开始计数 + + - `xmin, ymin, xmax, ymax`: 表示标注框左上角和右下角的整数坐标值,以像素为单位。 + + - `ann_quality`:表示标注质量的浮点数,默认为-1.0 + + - `bbox_angle`: 表示标注框旋转角度的浮点数,以[弧度RAD](https://baike.baidu.com/item/RAD/2262445)为单位,默认为0.0 -其中 `class_id, xmin, ymin, xmax, ymax` 均为整数,而标注质量`ann_quality`为浮点数,默认为-1.0, 标注框旋转角度`bbox_angle`为浮点数,单位为[RAD](https://baike.baidu.com/item/RAD/2262445) ``` 0, 242, 61, 424, 249, -1.0, 0.0 1, 211, 147, 325, 255, -1.0, 0.0 1, 122, 7, 372, 375, -1.0, 0.0 ``` - ## det-voc:raw 也可写为 voc:raw, 为目标检测格式 From 6a91c6d9a6466c66c4f0a75f13c400cab16bf147 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 2 Mar 2023 15:42:34 +0800 Subject: [PATCH 202/204] update sdk to ymir2.1.0 --- det-demo-tmi/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/det-demo-tmi/requirements.txt b/det-demo-tmi/requirements.txt index 6719696..cadfd37 100644 --- a/det-demo-tmi/requirements.txt +++ b/det-demo-tmi/requirements.txt @@ -2,4 +2,4 @@ pydantic>=1.8.2 pyyaml>=5.4.1 tensorboardX>=2.4 packaging>=23.0 -ymir_exc@git+https://github.com/modelai/ymir-executor-sdk.git@ymir1.3.0 +ymir_exc@git+https://github.com/modelai/ymir-executor-sdk.git@ymir2.1.0 From 7c947447957e74bcfba85bb63b706b948fc95b58 Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Thu, 2 Mar 2023 17:29:30 +0800 Subject: [PATCH 203/204] update doc --- README.md | 34 +++++++++++++++++++++++----------- README_zh-CN.md | 33 ++++++++++++++++++++++----------- 2 files changed, 45 insertions(+), 22 deletions(-) diff --git a/README.md b/README.md index 488e143..5032783 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,12 @@ # ymir-executor documentation [English](./README.md) | [简体中文](./README_zh-CN.md) -- [ymir](https://github.com/IndustryEssentials/ymir) +- 🏠 [ymir](https://github.com/IndustryEssentials/ymir) - - [bilibili: video tutorial](https://b23.tv/KS5b5oF) +- 📺 [video tutorial](https://b23.tv/KS5b5oF) -- [Image Community](http://pubimg.vesionbook.com:8110/img) search and share open source. +- 👨‍👩‍👧‍👧 [Image Community](http://pubimg.vesionbook.com:8110/img) search and share open source. -- [ymir executor Documence](https://ymir-executor-fork.readthedocs.io/zh/latest/#) +- 📘 [Documence](https://ymir-executor-fork.readthedocs.io/zh/latest/#) ## overview @@ -57,12 +57,24 @@ gpu: single GeForce GTX 1080 Ti - [import and finetune model](https://github.com/modelai/ymir-executor-fork/wiki/import-and-finetune-model) - [import pretainted model weights](https://github.com/IndustryEssentials/ymir/blob/master/dev_docs/import-extra-models.md) + ## reference -- [yolov4](https://github.com/AlexeyAB/darknet) -- [yolov5](https://github.com/ultralytics/yolov5) -- [mmdetection](https://github.com/open-mmlab/mmdetection) -- [yolov7](https://github.com/wongkinyiu/yolov7) -- [detectron2](https://github.com/facebookresearch/detectron2) -- [vidt](https://github.com/naver-ai/vidt) -- [nanodet](https://github.com/RangiLyu/nanodet) +### object detection +- [ymir-yolov5](https://github.com/modelai/ymir-yolov5) +- [ymir-yolov7](https://github.com/modelai/ymir-yolov7) +- [ymir-nanodet](https://github.com/modelai/ymir-nanodet) +- [ymir-mmyolo](https://github.com/modelai/ymir-mmyolo) +- [ymir-vidt](https://github.com/modelai/ymir-vidt) +- [ymir-detectron2](https://github.com/modelai/ymir-detectron2) + +### semantic segmenation +- [ymir-mmsegmentation](https://github.com/modelai/ymir-mmsegmentation) + +### instance segmentation +- [ymir-yolov5-seg](https://github.com/modelai/ymir-yolov5-seg) + +### resource +- [ymir-executor-sdk](https://github.com/modelai/ymir-executor-sdk) ymir_exc package, help to develop your image +- [ymir-executor-verifier](https://github.com/modelai/ymir-executor-verifier) test your ymir image +- [ymir-flask](https://github.com/modelai/ymir-flask) deploy your model on website diff --git a/README_zh-CN.md b/README_zh-CN.md index 8d8f08b..2a02159 100644 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -1,12 +1,12 @@ # ymir-executor 使用文档 [English](./README.md) | [简体中文](./README_zh-CN.md) -- [ymir](https://github.com/IndustryEssentials/ymir) +- 🏠 [ymir](https://github.com/IndustryEssentials/ymir) - - [bilibili 视频教程](https://b23.tv/KS5b5oF) +- 📺 [视频教程](https://b23.tv/KS5b5oF) -- [镜像社区](http://pubimg.vesionbook.com:8110/img) 可搜索到所有公开的ymir算法镜像, 同时可共享其他人发布的镜像。 +- 👨‍👩‍👧‍👧 [镜像社区](http://pubimg.vesionbook.com:8110/img) 可搜索到所有公开的ymir算法镜像, 同时可共享其他人发布的镜像。 -- [ymir镜像文档](https://ymir-executor-fork.readthedocs.io/zh/latest/#) +- 📘 [文档](https://ymir-executor-fork.readthedocs.io/zh/latest/#) ## 比较 @@ -64,10 +64,21 @@ gpu: single GeForce GTX 1080 Ti ## 参考 -- [yolov4](https://github.com/AlexeyAB/darknet) -- [yolov5](https://github.com/ultralytics/yolov5) -- [mmdetection](https://github.com/open-mmlab/mmdetection) -- [yolov7](https://github.com/wongkinyiu/yolov7) -- [detectron2](https://github.com/facebookresearch/detectron2) -- [vidt](https://github.com/naver-ai/vidt) -- [nanodet](https://github.com/RangiLyu/nanodet) +### 目标检测 +- [ymir-yolov5](https://github.com/modelai/ymir-yolov5) +- [ymir-yolov7](https://github.com/modelai/ymir-yolov7) +- [ymir-nanodet](https://github.com/modelai/ymir-nanodet) +- [ymir-mmyolo](https://github.com/modelai/ymir-mmyolo) +- [ymir-vidt](https://github.com/modelai/ymir-vidt) +- [ymir-detectron2](https://github.com/modelai/ymir-detectron2) + +### 语义分割 +- [ymir-mmsegmentation](https://github.com/modelai/ymir-mmsegmentation) + +### 实例分割 +- [ymir-yolov5-seg](https://github.com/modelai/ymir-yolov5-seg) + +### 资源 +- [ymir-executor-sdk](https://github.com/modelai/ymir-executor-sdk) ymir_exc 包,辅助开发镜像 +- [ymir-executor-verifier](https://github.com/modelai/ymir-executor-verifier) 测试镜像工具 +- [ymir-flask](https://github.com/modelai/ymir-flask) 云端部署示例 From 077896d6701128815ac2bba84b7ccf0f7515317a Mon Sep 17 00:00:00 2001 From: youdaoyzbx Date: Fri, 3 Mar 2023 14:47:24 +0800 Subject: [PATCH 204/204] fix docker build error --- det-demo-tmi/Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/det-demo-tmi/Dockerfile b/det-demo-tmi/Dockerfile index 0e4918c..dae7ae0 100644 --- a/det-demo-tmi/Dockerfile +++ b/det-demo-tmi/Dockerfile @@ -2,10 +2,11 @@ FROM python:3.8.13-alpine +RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.tuna.tsinghua.edu.cn/g' /etc/apk/repositories # Add bash RUN apk add bash # Required to build numpy wheel -RUN apk add g++ git +RUN apk add g++ git make COPY requirements.txt ./ RUN pip3 install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple