diff --git a/_/T5-MDLCC.ipynb b/_/T5-MDLCC.ipynb
deleted file mode 100644
index ee2d87f..0000000
--- a/_/T5-MDLCC.ipynb
+++ /dev/null
@@ -1,4506 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Mateo de la Cuadra Copetta\n",
- "Github: mateodlcc \n",
- "n° de alumno: 21637407 "
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "
\n",
- "[IIC2613] - Inteligencia Artificial\n",
- "
\n",
- " Tarea 5: Localización de Rostros
"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Para el desarrollo de la tarea se utilizará YOLO V8. \n",
- "\n",
- "Para ello usaremos las siguientes librerías:\n",
- "\n",
- "| Librerías | Instalación |\n",
- "|----------- |:-----------:|\n",
- "|ultralytics |pip3 install ultralytics|\n",
- "| torch |pip uninstall torch && pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118|\n",
- "| PIL |pip install pillow|\n",
- "|os | - |\n",
- "|opencv (cv2) | pip install opencv-python|\n",
- "|mss | pip install mss|"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "metadata": {},
- "outputs": [],
- "source": [
- "import pandas as pd\n",
- "import random\n",
- "import shutil\n",
- "import os\n",
- "from ultralytics import YOLO\n",
- "from mss import mss\n",
- "import numpy as np\n",
- "import cv2\n",
- "import torch"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Preprocesamiento de datos"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Dataset transformado exitosamente\n",
- "Train: 1542\n",
- "Test: 441\n",
- "Validation: 221\n"
- ]
- }
- ],
- "source": [
- "# Leer el dataset\n",
- "df = pd.read_csv('faces.csv')\n",
- "\n",
- "# Crear la carpeta labels\n",
- "if not os.path.exists('labels'):\n",
- " os.makedirs('labels')\n",
- "\n",
- "# Crear los archivos .txt\n",
- "for image in df['image_name'].unique():\n",
- " with open(f'labels/{image.replace(\".jpg\", \".txt\")}', 'w') as file:\n",
- " for _, row in df[df['image_name'] == image].iterrows():\n",
- " x_center = (row['x0'] + row['x1']) / (2 * row['width'])\n",
- " y_center = (row['y0'] + row['y1']) / (2 * row['height'])\n",
- " width = (row['x1'] - row['x0']) / row['width']\n",
- " height = (row['y1'] - row['y0']) / row['height']\n",
- " file.write(f'0 {x_center} {y_center} {width} {height}\\n')\n",
- "\n",
- "# Obtener la lista de imagenes y mezclarla\n",
- "images = df['image_name'].unique()\n",
- "random.shuffle(images)\n",
- "\n",
- "# Separar la lista en train, test y validation (70%, 20%, 10%)\n",
- "train_images = images[:int(0.7 * len(images))]\n",
- "test_images = images[int(0.7 * len(images)):int(0.9 * len(images))]\n",
- "validation_images = images[int(0.9 * len(images)):]\n",
- "\n",
- "# Crear las carpetas\n",
- "# train\n",
- "if not os.path.exists('train'):\n",
- " os.makedirs('train')\n",
- "if not os.path.exists('train/images'):\n",
- " os.makedirs('train/images')\n",
- "if not os.path.exists('train/labels'):\n",
- " os.makedirs('train/labels')\n",
- "\n",
- "# test\n",
- "if not os.path.exists('test'):\n",
- " os.makedirs('test')\n",
- "if not os.path.exists('test/images'):\n",
- " os.makedirs('test/images')\n",
- "if not os.path.exists('test/labels'):\n",
- " os.makedirs('test/labels')\n",
- "\n",
- "# validation\n",
- "if not os.path.exists('validation'):\n",
- " os.makedirs('validation')\n",
- "if not os.path.exists('validation/images'):\n",
- " os.makedirs('validation/images')\n",
- "if not os.path.exists('validation/labels'):\n",
- " os.makedirs('validation/labels')\n",
- "\n",
- "# Copiar las imagenes a las carpetas\n",
- "for image in train_images:\n",
- " shutil.copy(f'images/{image}', 'train/images')\n",
- "for image in test_images:\n",
- " shutil.copy(f'images/{image}', 'test/images')\n",
- "for image in validation_images:\n",
- " shutil.copy(f'images/{image}', 'validation/images')\n",
- "\n",
- "# Copiar los labels a las carpetasn\n",
- "for image in train_images:\n",
- " shutil.copy(f'labels/{image.replace(\".jpg\", \".txt\")}', 'train/labels')\n",
- "for image in test_images:\n",
- " shutil.copy(f'labels/{image.replace(\".jpg\", \".txt\")}', 'test/labels')\n",
- "for image in validation_images:\n",
- " shutil.copy(f'labels/{image.replace(\".jpg\", \".txt\")}', 'validation/labels')\n",
- "\n",
- "# Mostrar el resultado\n",
- "print('Dataset transformado exitosamente')\n",
- "print(f'Train: {len(train_images)}')\n",
- "print(f'Test: {len(test_images)}')\n",
- "print(f'Validation: {len(validation_images)}')"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Entrenamiento"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 7,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "CUDA está disponible. Los modelos usarán la GPU.\n",
- "cuda\n"
- ]
- }
- ],
- "source": [
- "if torch.cuda.is_available():\n",
- " print(\"CUDA está disponible. Los modelos usarán la GPU.\")\n",
- " device = torch.device(\"cuda\")\n",
- "else:\n",
- " print(\"CUDA no está disponible. Los modelos usarán la CPU.\")\n",
- " device = torch.device(\"cpu\")\n",
- "\n",
- "print(device)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "A continuación tenemos los modelos disponibles de Yolo V8, nosotros usaremos YOLOv8n:\n",
- "| Model | Size (pixels) | mAPval50-95 | SpeedCPU ONNX(ms) | SpeedA100 TensorRT(ms) | Params(M) | FLOPs(B) |\n",
- "|-------|---------------|-------------|--------------------|------------------------|-----------|----------|\n",
- "| YOLOv8n | 640 | 37.3 | 80.4 | 0.99 | 3.2 | 8.7 |\n",
- "| YOLOv8s | 640 | 44.9 | 128.4 | 1.20 | 11.2 | 28.6 |\n",
- "| YOLOv8m | 640 | 50.2 | 234.7 | 1.83 | 25.9 | 78.9 |\n",
- "| YOLOv8l | 640 | 52.9 | 375.2 | 2.39 | 43.7 | 165.2 |\n",
- "| YOLOv8x | 640 | 53.9 | 479.1 | 3.53 | 68.2 | 257.8 |"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 8,
- "metadata": {},
- "outputs": [],
- "source": [
- "# Cargamos el modelo\n",
- "model = YOLO(\"yolov8n.pt\")\n",
- "model = model.to(device)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Establecemos el número de épocas que usaremos para el entrenamiento y el path actual (para establecer un path común)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 9,
- "metadata": {},
- "outputs": [],
- "source": [
- "epochs = 100\n",
- "batch = 16\n",
- "PATH = os.path.join(os.getcwd(), \"data\")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 10,
- "metadata": {},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "New https://pypi.org/project/ultralytics/8.2.53 available Update with 'pip install -U ultralytics'\n",
- "\u001b[34m\u001b[1mengine\\trainer: \u001b[0mtask=detect, mode=train, model=yolov8n.pt, data=d:\\Trabajos_UC\\9 noveno semestre\\IA\\mateo\\data\\data.yaml, epochs=100, patience=50, batch=16, imgsz=640, save=True, save_period=-1, cache=False, device=cuda:0, workers=8, project=None, name=train3, exist_ok=False, pretrained=True, optimizer=auto, verbose=True, seed=0, deterministic=True, single_cls=False, rect=False, cos_lr=False, close_mosaic=10, resume=False, amp=True, fraction=1.0, profile=False, freeze=None, overlap_mask=True, mask_ratio=4, dropout=0.0, val=True, split=val, save_json=False, save_hybrid=False, conf=None, iou=0.7, max_det=300, half=False, dnn=False, plots=True, source=None, show=False, save_txt=False, save_conf=False, save_crop=False, show_labels=True, show_conf=True, vid_stride=1, stream_buffer=False, line_width=None, visualize=False, augment=False, agnostic_nms=False, classes=None, retina_masks=False, boxes=True, format=torchscript, keras=False, optimize=False, int8=False, dynamic=False, simplify=False, opset=None, workspace=4, nms=False, lr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=7.5, cls=0.5, dfl=1.5, pose=12.0, kobj=1.0, label_smoothing=0.0, nbs=64, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0, cfg=None, tracker=botsort.yaml, save_dir=runs\\detect\\train3\n",
- "Overriding model.yaml nc=80 with nc=1\n",
- "\n",
- " from n params module arguments \n",
- " 0 -1 1 464 ultralytics.nn.modules.conv.Conv [3, 16, 3, 2] \n",
- " 1 -1 1 4672 ultralytics.nn.modules.conv.Conv [16, 32, 3, 2] \n",
- " 2 -1 1 7360 ultralytics.nn.modules.block.C2f [32, 32, 1, True] \n",
- " 3 -1 1 18560 ultralytics.nn.modules.conv.Conv [32, 64, 3, 2] \n",
- " 4 -1 2 49664 ultralytics.nn.modules.block.C2f [64, 64, 2, True] \n",
- " 5 -1 1 73984 ultralytics.nn.modules.conv.Conv [64, 128, 3, 2] \n",
- " 6 -1 2 197632 ultralytics.nn.modules.block.C2f [128, 128, 2, True] \n",
- " 7 -1 1 295424 ultralytics.nn.modules.conv.Conv [128, 256, 3, 2] \n",
- " 8 -1 1 460288 ultralytics.nn.modules.block.C2f [256, 256, 1, True] \n",
- " 9 -1 1 164608 ultralytics.nn.modules.block.SPPF [256, 256, 5] \n",
- " 10 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
- " 11 [-1, 6] 1 0 ultralytics.nn.modules.conv.Concat [1] \n",
- " 12 -1 1 148224 ultralytics.nn.modules.block.C2f [384, 128, 1] \n",
- " 13 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
- " 14 [-1, 4] 1 0 ultralytics.nn.modules.conv.Concat [1] \n",
- " 15 -1 1 37248 ultralytics.nn.modules.block.C2f [192, 64, 1] \n",
- " 16 -1 1 36992 ultralytics.nn.modules.conv.Conv [64, 64, 3, 2] \n",
- " 17 [-1, 12] 1 0 ultralytics.nn.modules.conv.Concat [1] \n",
- " 18 -1 1 123648 ultralytics.nn.modules.block.C2f [192, 128, 1] \n",
- " 19 -1 1 147712 ultralytics.nn.modules.conv.Conv [128, 128, 3, 2] \n",
- " 20 [-1, 9] 1 0 ultralytics.nn.modules.conv.Concat [1] \n",
- " 21 -1 1 493056 ultralytics.nn.modules.block.C2f [384, 256, 1] \n",
- " 22 [15, 18, 21] 1 751507 ultralytics.nn.modules.head.Detect [1, [64, 128, 256]] \n",
- "Model summary: 225 layers, 3011043 parameters, 3011027 gradients, 8.2 GFLOPs\n",
- "\n",
- "Transferred 319/355 items from pretrained weights\n",
- "Freezing layer 'model.22.dfl.conv.weight'\n",
- "\u001b[34m\u001b[1mAMP: \u001b[0mrunning Automatic Mixed Precision (AMP) checks with YOLOv8n...\n",
- "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed \n",
- "\u001b[34m\u001b[1mtrain: \u001b[0mScanning D:\\Trabajos_UC\\9 noveno semestre\\IA\\mateo\\data\\train\\labels.cache... 1542 images, 0 backgrounds, 0 corrupt: 100%|██████████| 1542/1542 [00:00, ?it/s]\n",
- "\u001b[34m\u001b[1mval: \u001b[0mScanning D:\\Trabajos_UC\\9 noveno semestre\\IA\\mateo\\data\\validation\\labels.cache... 221 images, 0 backgrounds, 0 corrupt: 100%|██████████| 221/221 [00:00, ?it/s]\n",
- "Plotting labels to runs\\detect\\train3\\labels.jpg... \n",
- "\u001b[34m\u001b[1moptimizer:\u001b[0m 'optimizer=auto' found, ignoring 'lr0=0.01' and 'momentum=0.937' and determining best 'optimizer', 'lr0' and 'momentum' automatically... \n",
- "\u001b[34m\u001b[1moptimizer:\u001b[0m AdamW(lr=0.002, momentum=0.9) with parameter groups 57 weight(decay=0.0), 64 weight(decay=0.0005), 63 bias(decay=0.0)\n",
- "Image sizes 640 train, 640 val\n",
- "Using 8 dataloader workers\n",
- "Logging results to \u001b[1mruns\\detect\\train3\u001b[0m\n",
- "Starting training for 100 epochs...\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 1/100 2.11G 1.656 2.208 1.721 17 640: 100%|██████████| 97/97 [00:19<00:00, 5.05it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:03<00:00, 2.12it/s]\n",
- " all 221 310 0.613 0.623 0.634 0.268\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 2/100 2.21G 1.472 1.593 1.559 22 640: 100%|██████████| 97/97 [00:13<00:00, 7.20it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:03<00:00, 2.07it/s]\n",
- " all 221 310 0.644 0.755 0.691 0.359\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 3/100 2.21G 1.473 1.433 1.563 16 640: 100%|██████████| 97/97 [00:13<00:00, 7.17it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:02<00:00, 3.29it/s]\n",
- " all 221 310 0.627 0.764 0.709 0.343\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 4/100 2.21G 1.484 1.375 1.586 17 640: 100%|██████████| 97/97 [00:13<00:00, 7.30it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 3.69it/s]\n",
- " all 221 310 0.708 0.81 0.792 0.437\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 5/100 2.21G 1.421 1.228 1.538 14 640: 100%|██████████| 97/97 [00:18<00:00, 5.25it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:02<00:00, 2.56it/s]\n",
- " all 221 310 0.652 0.697 0.702 0.379\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 6/100 2.21G 1.353 1.157 1.501 26 640: 100%|██████████| 97/97 [00:18<00:00, 5.35it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 3.63it/s]\n",
- " all 221 310 0.818 0.829 0.894 0.508\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 7/100 2.22G 1.348 1.13 1.489 16 640: 100%|██████████| 97/97 [00:20<00:00, 4.66it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:02<00:00, 2.64it/s]\n",
- " all 221 310 0.808 0.861 0.892 0.519\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 8/100 2.21G 1.324 1.101 1.481 13 640: 100%|██████████| 97/97 [00:17<00:00, 5.59it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:03<00:00, 1.87it/s]\n",
- " all 221 310 0.769 0.852 0.866 0.492\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 9/100 2.21G 1.31 1.076 1.475 36 640: 100%|██████████| 97/97 [00:21<00:00, 4.43it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:05<00:00, 1.31it/s]\n",
- " all 221 310 0.771 0.878 0.887 0.539\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 10/100 2.21G 1.283 1.028 1.444 17 640: 100%|██████████| 97/97 [00:18<00:00, 5.26it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:02<00:00, 2.78it/s]\n",
- " all 221 310 0.761 0.865 0.856 0.51\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 11/100 2.22G 1.273 1.035 1.45 15 640: 100%|██████████| 97/97 [00:46<00:00, 2.08it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:07<00:00, 1.13s/it]\n",
- " all 221 310 0.791 0.842 0.884 0.545\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 12/100 2.22G 1.252 0.9898 1.426 12 640: 100%|██████████| 97/97 [00:17<00:00, 5.44it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:04<00:00, 1.60it/s]\n",
- " all 221 310 0.799 0.897 0.9 0.583\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 13/100 2.21G 1.229 0.9731 1.424 16 640: 100%|██████████| 97/97 [00:18<00:00, 5.19it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:04<00:00, 1.45it/s]\n",
- " all 221 310 0.817 0.935 0.907 0.583\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 14/100 2.21G 1.216 0.9429 1.401 13 640: 100%|██████████| 97/97 [00:18<00:00, 5.18it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:03<00:00, 1.98it/s]\n",
- " all 221 310 0.828 0.923 0.929 0.612\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 15/100 2.21G 1.225 0.9803 1.413 16 640: 100%|██████████| 97/97 [00:29<00:00, 3.33it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [01:09<00:00, 9.87s/it]\n",
- " all 221 310 0.865 0.91 0.945 0.601\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 16/100 2.21G 1.201 0.9447 1.398 15 640: 100%|██████████| 97/97 [00:53<00:00, 1.80it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:05<00:00, 1.38it/s]\n",
- " all 221 310 0.852 0.884 0.917 0.572\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 17/100 2.21G 1.176 0.9384 1.382 18 640: 100%|██████████| 97/97 [00:22<00:00, 4.24it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:05<00:00, 1.34it/s]\n",
- " all 221 310 0.839 0.919 0.928 0.616\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 18/100 2.21G 1.176 0.8984 1.384 13 640: 100%|██████████| 97/97 [00:20<00:00, 4.80it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:04<00:00, 1.70it/s]\n",
- " all 221 310 0.856 0.903 0.928 0.59\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 19/100 2.21G 1.151 0.9042 1.361 18 640: 100%|██████████| 97/97 [00:18<00:00, 5.25it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:02<00:00, 2.44it/s]\n",
- " all 221 310 0.829 0.903 0.904 0.622\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 20/100 2.21G 1.149 0.9042 1.358 14 640: 100%|██████████| 97/97 [01:08<00:00, 1.42it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:04<00:00, 1.42it/s]\n",
- " all 221 310 0.782 0.942 0.886 0.592\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 21/100 2.21G 1.166 0.9213 1.387 27 640: 100%|██████████| 97/97 [00:20<00:00, 4.73it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:05<00:00, 1.35it/s]\n",
- " all 221 310 0.833 0.868 0.911 0.581\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 22/100 2.21G 1.143 0.8802 1.345 18 640: 100%|██████████| 97/97 [00:21<00:00, 4.56it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:05<00:00, 1.39it/s]\n",
- " all 221 310 0.919 0.871 0.94 0.631\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 23/100 2.21G 1.124 0.8668 1.337 25 640: 100%|██████████| 97/97 [00:18<00:00, 5.33it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:12<00:00, 1.76s/it]\n",
- " all 221 310 0.821 0.939 0.893 0.593\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 24/100 2.22G 1.104 0.8693 1.337 17 640: 100%|██████████| 97/97 [00:25<00:00, 3.74it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:05<00:00, 1.40it/s]\n",
- " all 221 310 0.807 0.903 0.913 0.601\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 25/100 2.21G 1.11 0.8632 1.336 20 640: 100%|██████████| 97/97 [00:20<00:00, 4.67it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:05<00:00, 1.36it/s]\n",
- " all 221 310 0.822 0.954 0.904 0.599\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 26/100 2.21G 1.128 0.8614 1.343 16 640: 100%|██████████| 97/97 [00:21<00:00, 4.43it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:05<00:00, 1.21it/s]\n",
- " all 221 310 0.85 0.926 0.943 0.646\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 27/100 2.22G 1.092 0.8216 1.31 19 640: 100%|██████████| 97/97 [00:24<00:00, 3.99it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:08<00:00, 1.21s/it]\n",
- " all 221 310 0.826 0.932 0.926 0.628\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 28/100 2.21G 1.099 0.8512 1.323 26 640: 100%|██████████| 97/97 [00:25<00:00, 3.83it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:05<00:00, 1.39it/s]\n",
- " all 221 310 0.852 0.913 0.927 0.638\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 29/100 2.21G 1.077 0.8344 1.313 12 640: 100%|██████████| 97/97 [00:22<00:00, 4.35it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:05<00:00, 1.40it/s]\n",
- " all 221 310 0.854 0.888 0.918 0.609\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 30/100 2.21G 1.056 0.8161 1.304 21 640: 100%|██████████| 97/97 [00:23<00:00, 4.21it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:03<00:00, 1.81it/s]\n",
- " all 221 310 0.893 0.897 0.957 0.658\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 31/100 2.21G 1.063 0.795 1.301 11 640: 100%|██████████| 97/97 [00:25<00:00, 3.75it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:11<00:00, 1.62s/it]\n",
- " all 221 310 0.83 0.963 0.931 0.639\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 32/100 2.21G 1.051 0.8165 1.299 16 640: 100%|██████████| 97/97 [00:20<00:00, 4.80it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:04<00:00, 1.40it/s]\n",
- " all 221 310 0.823 0.948 0.94 0.626\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 33/100 2.21G 1.051 0.7935 1.29 17 640: 100%|██████████| 97/97 [00:21<00:00, 4.54it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:05<00:00, 1.28it/s]\n",
- " all 221 310 0.893 0.913 0.96 0.675\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 34/100 2.21G 1.041 0.8 1.295 19 640: 100%|██████████| 97/97 [00:21<00:00, 4.61it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:08<00:00, 1.16s/it]\n",
- " all 221 310 0.874 0.903 0.956 0.663\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 35/100 2.22G 1.025 0.7778 1.281 13 640: 100%|██████████| 97/97 [00:38<00:00, 2.54it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:05<00:00, 1.36it/s]\n",
- " all 221 310 0.865 0.955 0.921 0.643\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 36/100 2.22G 1.024 0.7855 1.275 12 640: 100%|██████████| 97/97 [00:22<00:00, 4.33it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:05<00:00, 1.36it/s]\n",
- " all 221 310 0.831 0.945 0.953 0.674\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 37/100 2.21G 1.017 0.7783 1.266 31 640: 100%|██████████| 97/97 [00:21<00:00, 4.50it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:04<00:00, 1.43it/s]\n",
- " all 221 310 0.828 0.947 0.951 0.686\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 38/100 2.21G 1.011 0.7604 1.262 14 640: 100%|██████████| 97/97 [00:23<00:00, 4.21it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:14<00:00, 2.03s/it]\n",
- " all 221 310 0.888 0.929 0.95 0.671\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 39/100 2.22G 1.014 0.7596 1.264 19 640: 100%|██████████| 97/97 [00:32<00:00, 2.94it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:06<00:00, 1.13it/s]\n",
- " all 221 310 0.88 0.955 0.954 0.675\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 40/100 2.21G 1.005 0.7545 1.259 20 640: 100%|██████████| 97/97 [00:22<00:00, 4.30it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:05<00:00, 1.37it/s]\n",
- " all 221 310 0.843 0.938 0.932 0.661\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 41/100 2.21G 1.021 0.7664 1.276 15 640: 100%|██████████| 97/97 [00:23<00:00, 4.17it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:04<00:00, 1.43it/s]\n",
- " all 221 310 0.862 0.946 0.949 0.685\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 42/100 2.21G 0.9806 0.7312 1.253 22 640: 100%|██████████| 97/97 [00:28<00:00, 3.45it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:04<00:00, 1.45it/s]\n",
- " all 221 310 0.899 0.961 0.964 0.691\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 43/100 2.21G 0.9875 0.7498 1.259 20 640: 100%|██████████| 97/97 [00:28<00:00, 3.41it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:05<00:00, 1.31it/s]\n",
- " all 221 310 0.908 0.925 0.967 0.681\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 44/100 2.21G 1.015 0.7461 1.274 19 640: 100%|██████████| 97/97 [00:21<00:00, 4.46it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:05<00:00, 1.29it/s]\n",
- " all 221 310 0.882 0.963 0.965 0.68\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 45/100 2.21G 0.9845 0.7459 1.273 16 640: 100%|██████████| 97/97 [00:24<00:00, 4.03it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:04<00:00, 1.74it/s]\n",
- " all 221 310 0.912 0.929 0.965 0.695\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 46/100 2.21G 0.9871 0.7303 1.262 17 640: 100%|██████████| 97/97 [00:22<00:00, 4.37it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:02<00:00, 2.57it/s]\n",
- " all 221 310 0.862 0.963 0.959 0.688\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 47/100 2.21G 0.9882 0.7382 1.265 24 640: 100%|██████████| 97/97 [00:13<00:00, 7.09it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:02<00:00, 3.37it/s]\n",
- " all 221 310 0.897 0.928 0.956 0.676\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 48/100 2.21G 0.9703 0.7204 1.243 22 640: 100%|██████████| 97/97 [00:12<00:00, 7.77it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.11it/s]\n",
- " all 221 310 0.89 0.963 0.969 0.694\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 49/100 2.21G 0.976 0.7364 1.25 33 640: 100%|██████████| 97/97 [00:11<00:00, 8.42it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.16it/s]\n",
- " all 221 310 0.878 0.948 0.962 0.699\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 50/100 2.21G 0.9537 0.7302 1.238 23 640: 100%|██████████| 97/97 [00:10<00:00, 8.88it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.05it/s]\n",
- " all 221 310 0.827 0.942 0.953 0.677\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 51/100 2.22G 0.9489 0.7088 1.228 22 640: 100%|██████████| 97/97 [00:11<00:00, 8.80it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 3.66it/s]\n",
- " all 221 310 0.87 0.973 0.968 0.69\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 52/100 2.22G 0.9554 0.7096 1.238 20 640: 100%|██████████| 97/97 [00:10<00:00, 9.01it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.35it/s]\n",
- " all 221 310 0.899 0.939 0.968 0.69\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 53/100 2.21G 0.9377 0.691 1.217 21 640: 100%|██████████| 97/97 [00:11<00:00, 8.38it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 3.84it/s]\n",
- " all 221 310 0.895 0.93 0.96 0.692\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 54/100 2.21G 0.9424 0.7009 1.224 17 640: 100%|██████████| 97/97 [00:11<00:00, 8.19it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.77it/s]\n",
- " all 221 310 0.872 0.948 0.95 0.678\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 55/100 2.21G 0.9573 0.7155 1.23 25 640: 100%|██████████| 97/97 [00:09<00:00, 9.96it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 3.70it/s]\n",
- " all 221 310 0.859 0.923 0.951 0.682\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 56/100 2.21G 0.9238 0.7067 1.212 23 640: 100%|██████████| 97/97 [00:11<00:00, 8.42it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.13it/s]\n",
- " all 221 310 0.871 0.932 0.954 0.687\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 57/100 2.21G 0.919 0.6929 1.208 8 640: 100%|██████████| 97/97 [00:10<00:00, 9.06it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.51it/s]\n",
- " all 221 310 0.905 0.958 0.96 0.703\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 58/100 2.21G 0.9244 0.6805 1.205 11 640: 100%|██████████| 97/97 [00:10<00:00, 9.41it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.68it/s]\n",
- " all 221 310 0.861 0.958 0.95 0.689\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 59/100 2.22G 0.9146 0.6795 1.204 15 640: 100%|██████████| 97/97 [00:10<00:00, 9.23it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 3.96it/s]\n",
- " all 221 310 0.883 0.922 0.956 0.68\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 60/100 2.22G 0.9028 0.6688 1.192 18 640: 100%|██████████| 97/97 [00:12<00:00, 7.57it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.09it/s]\n",
- " all 221 310 0.907 0.916 0.971 0.704\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 61/100 2.21G 0.9073 0.6615 1.205 21 640: 100%|██████████| 97/97 [00:11<00:00, 8.63it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.05it/s]\n",
- " all 221 310 0.9 0.956 0.972 0.71\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 62/100 2.21G 0.8899 0.6707 1.19 18 640: 100%|██████████| 97/97 [00:11<00:00, 8.65it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.14it/s]\n",
- " all 221 310 0.85 0.972 0.963 0.693\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 63/100 2.21G 0.8941 0.6565 1.201 38 640: 100%|██████████| 97/97 [00:10<00:00, 9.16it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.72it/s]\n",
- " all 221 310 0.869 0.967 0.965 0.713\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 64/100 2.21G 0.8797 0.6502 1.187 17 640: 100%|██████████| 97/97 [00:12<00:00, 7.77it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 3.66it/s]\n",
- " all 221 310 0.88 0.97 0.962 0.699\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 65/100 2.12G 0.893 0.6666 1.195 28 640: 100%|██████████| 97/97 [00:13<00:00, 7.41it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 3.52it/s]\n",
- " all 221 310 0.881 0.954 0.963 0.699\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 66/100 2.21G 0.8721 0.6553 1.188 16 640: 100%|██████████| 97/97 [00:12<00:00, 7.78it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 3.59it/s]\n",
- " all 221 310 0.891 0.972 0.975 0.718\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 67/100 2.21G 0.8607 0.6384 1.167 17 640: 100%|██████████| 97/97 [00:12<00:00, 7.61it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 3.50it/s]\n",
- " all 221 310 0.894 0.942 0.965 0.698\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 68/100 2.21G 0.866 0.6454 1.179 20 640: 100%|██████████| 97/97 [00:13<00:00, 7.15it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 3.55it/s]\n",
- " all 221 310 0.892 0.956 0.972 0.72\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 69/100 2.21G 0.8626 0.6276 1.164 12 640: 100%|██████████| 97/97 [00:13<00:00, 7.35it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:02<00:00, 3.19it/s]\n",
- " all 221 310 0.901 0.968 0.97 0.696\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 70/100 2.21G 0.8613 0.6194 1.165 15 640: 100%|██████████| 97/97 [00:13<00:00, 7.27it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:02<00:00, 3.26it/s]\n",
- " all 221 310 0.877 0.943 0.962 0.704\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 71/100 2.21G 0.8618 0.6367 1.177 13 640: 100%|██████████| 97/97 [00:10<00:00, 9.16it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.67it/s]\n",
- " all 221 310 0.89 0.963 0.962 0.702\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 72/100 2.21G 0.8357 0.62 1.148 12 640: 100%|██████████| 97/97 [00:10<00:00, 9.11it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 3.91it/s]\n",
- " all 221 310 0.906 0.935 0.965 0.707\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 73/100 2.21G 0.8362 0.6197 1.143 14 640: 100%|██████████| 97/97 [00:11<00:00, 8.64it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.24it/s]\n",
- " all 221 310 0.892 0.93 0.969 0.706\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 74/100 2.21G 0.8308 0.6145 1.148 24 640: 100%|██████████| 97/97 [00:10<00:00, 8.83it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.47it/s]\n",
- " all 221 310 0.862 0.974 0.966 0.703\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 75/100 2.21G 0.8323 0.6167 1.154 22 640: 100%|██████████| 97/97 [00:11<00:00, 8.79it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.29it/s]\n",
- " all 221 310 0.903 0.965 0.97 0.698\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 76/100 2.21G 0.8266 0.6022 1.153 18 640: 100%|██████████| 97/97 [00:10<00:00, 8.95it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.42it/s]\n",
- " all 221 310 0.908 0.939 0.968 0.714\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 77/100 2.21G 0.812 0.5984 1.134 15 640: 100%|██████████| 97/97 [00:10<00:00, 8.85it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.41it/s]\n",
- " all 221 310 0.912 0.942 0.972 0.719\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 78/100 2.21G 0.7966 0.5909 1.128 12 640: 100%|██████████| 97/97 [00:11<00:00, 8.70it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.20it/s]\n",
- " all 221 310 0.908 0.948 0.976 0.723\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 79/100 2.22G 0.8104 0.6037 1.131 18 640: 100%|██████████| 97/97 [00:10<00:00, 9.16it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.72it/s]\n",
- " all 221 310 0.891 0.972 0.97 0.712\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 80/100 2.21G 0.8107 0.6026 1.136 10 640: 100%|██████████| 97/97 [00:10<00:00, 8.91it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.62it/s]\n",
- " all 221 310 0.919 0.914 0.966 0.707\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 81/100 2.21G 0.816 0.5962 1.146 20 640: 100%|██████████| 97/97 [00:10<00:00, 9.50it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.52it/s]\n",
- " all 221 310 0.916 0.919 0.968 0.713\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 82/100 2.21G 0.7966 0.593 1.123 18 640: 100%|██████████| 97/97 [00:11<00:00, 8.61it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.55it/s]\n",
- " all 221 310 0.882 0.966 0.967 0.718\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 83/100 2.21G 0.8121 0.5974 1.142 19 640: 100%|██████████| 97/97 [00:09<00:00, 9.88it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.90it/s]\n",
- " all 221 310 0.897 0.95 0.963 0.712\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 84/100 2.21G 0.7905 0.5826 1.135 15 640: 100%|██████████| 97/97 [00:10<00:00, 9.60it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.34it/s]\n",
- " all 221 310 0.93 0.932 0.973 0.727\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 85/100 2.21G 0.7959 0.5884 1.131 9 640: 100%|██████████| 97/97 [00:11<00:00, 8.44it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.03it/s]\n",
- " all 221 310 0.89 0.965 0.97 0.72\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 86/100 2.21G 0.7894 0.5805 1.126 10 640: 100%|██████████| 97/97 [00:12<00:00, 7.67it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:02<00:00, 3.35it/s]\n",
- " all 221 310 0.889 0.968 0.977 0.729\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 87/100 2.21G 0.7853 0.5705 1.12 26 640: 100%|██████████| 97/97 [00:13<00:00, 7.42it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:02<00:00, 2.74it/s]\n",
- " all 221 310 0.891 0.952 0.975 0.724\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 88/100 2.21G 0.7912 0.5672 1.133 13 640: 100%|██████████| 97/97 [00:13<00:00, 6.93it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:02<00:00, 3.45it/s]\n",
- " all 221 310 0.892 0.956 0.968 0.711\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 89/100 2.21G 0.7777 0.571 1.123 9 640: 100%|██████████| 97/97 [00:12<00:00, 8.08it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 3.82it/s]\n",
- " all 221 310 0.896 0.949 0.976 0.726\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 90/100 2.21G 0.777 0.556 1.12 12 640: 100%|██████████| 97/97 [00:12<00:00, 7.61it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:02<00:00, 3.45it/s]\n",
- " all 221 310 0.893 0.964 0.971 0.724\n",
- "Closing dataloader mosaic\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 91/100 2.21G 0.6943 0.435 1.068 11 640: 100%|██████████| 97/97 [00:14<00:00, 6.50it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.14it/s]\n",
- " all 221 310 0.892 0.961 0.972 0.716\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 92/100 2.21G 0.6728 0.4002 1.057 10 640: 100%|██████████| 97/97 [00:10<00:00, 9.30it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.57it/s]\n",
- " all 221 310 0.92 0.952 0.977 0.727\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 93/100 2.21G 0.6581 0.384 1.051 8 640: 100%|██████████| 97/97 [00:09<00:00, 9.89it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.45it/s]\n",
- " all 221 310 0.915 0.967 0.977 0.73\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 94/100 2.21G 0.6479 0.3894 1.039 7 640: 100%|██████████| 97/97 [00:10<00:00, 9.47it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.32it/s]\n",
- " all 221 310 0.909 0.968 0.974 0.731\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 95/100 2.21G 0.6354 0.3767 1.029 14 640: 100%|██████████| 97/97 [00:13<00:00, 7.16it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:02<00:00, 2.49it/s]\n",
- " all 221 310 0.925 0.958 0.978 0.737\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 96/100 2.21G 0.6329 0.372 1.027 11 640: 100%|██████████| 97/97 [00:15<00:00, 6.24it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.35it/s]\n",
- " all 221 310 0.922 0.955 0.977 0.734\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 97/100 2.21G 0.6306 0.368 1.027 8 640: 100%|██████████| 97/97 [00:10<00:00, 9.16it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.34it/s]\n",
- " all 221 310 0.912 0.965 0.978 0.737\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 98/100 2.21G 0.6135 0.3665 1.017 7 640: 100%|██████████| 97/97 [00:10<00:00, 9.11it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 4.36it/s]\n",
- " all 221 310 0.909 0.97 0.977 0.736\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 99/100 2.22G 0.6068 0.3572 1.014 12 640: 100%|██████████| 97/97 [00:16<00:00, 5.82it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 3.72it/s]\n",
- " all 221 310 0.914 0.961 0.976 0.73\n",
- "\n",
- " Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
- " 100/100 2.21G 0.6131 0.363 1.017 7 640: 100%|██████████| 97/97 [00:18<00:00, 5.21it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:01<00:00, 3.78it/s]\n",
- " all 221 310 0.907 0.958 0.977 0.738\n",
- "\n",
- "100 epochs completed in 0.637 hours.\n",
- "Optimizer stripped from runs\\detect\\train3\\weights\\last.pt, 6.2MB\n",
- "Optimizer stripped from runs\\detect\\train3\\weights\\best.pt, 6.2MB\n",
- "\n",
- "Validating runs\\detect\\train3\\weights\\best.pt...\n",
- "Ultralytics YOLOv8.0.208 Python-3.10.4 torch-2.1.0+cu121 CUDA:0 (NVIDIA GeForce RTX 3080, 10239MiB)\n",
- "Model summary (fused): 168 layers, 3005843 parameters, 0 gradients, 8.1 GFLOPs\n",
- " Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 7/7 [00:03<00:00, 2.25it/s]\n",
- " all 221 310 0.907 0.958 0.977 0.739\n",
- "Speed: 0.8ms preprocess, 1.3ms inference, 0.0ms loss, 2.4ms postprocess per image\n",
- "Results saved to \u001b[1mruns\\detect\\train3\u001b[0m\n"
- ]
- }
- ],
- "source": [
- "# Entrenamos el modelo\n",
- "data_path = os.path.join(PATH, \"data.yaml\")\n",
- "results = model.train(data=data_path, epochs=epochs, batch=batch)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Test del modelo\n",
- "\n",
- "Se usara el modelo entrenado 'best.pt' para evaluar el modelo caprutando la pantalla en tiempo real"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "metadata": {},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "WARNING best2.pt appears to require 'dill', which is not in ultralytics requirements.\n",
- "AutoInstall will run now for 'dill' but this feature will be removed in the future.\n",
- "Recommend fixes are to train a new model using the latest 'ultralytics' package or to run a command with an official YOLOv8 model, i.e. 'yolo predict model=yolov8n.pt'\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\u001b[31m\u001b[1mrequirements:\u001b[0m Ultralytics requirement ['dill'] not found, attempting AutoUpdate...\n",
- "Collecting dill\n",
- " Downloading dill-0.3.8-py3-none-any.whl.metadata (10 kB)\n",
- "Downloading dill-0.3.8-py3-none-any.whl (116 kB)\n",
- " ---------------------------------------- 116.3/116.3 kB 6.6 MB/s eta 0:00:00\n",
- "Installing collected packages: dill\n",
- "Successfully installed dill-0.3.8\n",
- "\n",
- "\u001b[31m\u001b[1mrequirements:\u001b[0m AutoUpdate success 8.1s, installed 1 package: ['dill']\n",
- "\u001b[31m\u001b[1mrequirements:\u001b[0m \u001b[1mRestart runtime or rerun command for updates to take effect\u001b[0m\n",
- "\n",
- "\n",
- "0: 480x640 (no detections), 621.2ms\n",
- "Speed: 20.2ms preprocess, 621.2ms inference, 13.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 525.7ms\n",
- "Speed: 9.8ms preprocess, 525.7ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 420.5ms\n",
- "Speed: 10.6ms preprocess, 420.5ms inference, 2.1ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 445.7ms\n",
- "Speed: 6.6ms preprocess, 445.7ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 648.4ms\n",
- "Speed: 9.7ms preprocess, 648.4ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 440.7ms\n",
- "Speed: 5.6ms preprocess, 440.7ms inference, 4.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 801.3ms\n",
- "Speed: 11.6ms preprocess, 801.3ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 712.4ms\n",
- "Speed: 5.0ms preprocess, 712.4ms inference, 3.1ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 403.9ms\n",
- "Speed: 22.2ms preprocess, 403.9ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 541.8ms\n",
- "Speed: 10.7ms preprocess, 541.8ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 482.6ms\n",
- "Speed: 6.6ms preprocess, 482.6ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 418.5ms\n",
- "Speed: 13.2ms preprocess, 418.5ms inference, 0.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 417.1ms\n",
- "Speed: 16.1ms preprocess, 417.1ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 296.1ms\n",
- "Speed: 6.0ms preprocess, 296.1ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 320.6ms\n",
- "Speed: 9.6ms preprocess, 320.6ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 900.8ms\n",
- "Speed: 6.1ms preprocess, 900.8ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 618.6ms\n",
- "Speed: 5.6ms preprocess, 618.6ms inference, 1.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 273.5ms\n",
- "Speed: 5.7ms preprocess, 273.5ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 245.2ms\n",
- "Speed: 5.2ms preprocess, 245.2ms inference, 1.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 315.2ms\n",
- "Speed: 5.6ms preprocess, 315.2ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 232.9ms\n",
- "Speed: 6.0ms preprocess, 232.9ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 231.6ms\n",
- "Speed: 5.6ms preprocess, 231.6ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 269.5ms\n",
- "Speed: 4.6ms preprocess, 269.5ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 235.9ms\n",
- "Speed: 4.6ms preprocess, 235.9ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 213.4ms\n",
- "Speed: 4.6ms preprocess, 213.4ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 216.6ms\n",
- "Speed: 3.0ms preprocess, 216.6ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 224.3ms\n",
- "Speed: 4.5ms preprocess, 224.3ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 214.2ms\n",
- "Speed: 3.0ms preprocess, 214.2ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 211.1ms\n",
- "Speed: 4.6ms preprocess, 211.1ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 249.5ms\n",
- "Speed: 6.7ms preprocess, 249.5ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 212.4ms\n",
- "Speed: 4.6ms preprocess, 212.4ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 218.9ms\n",
- "Speed: 5.0ms preprocess, 218.9ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 219.0ms\n",
- "Speed: 4.0ms preprocess, 219.0ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 221.7ms\n",
- "Speed: 5.7ms preprocess, 221.7ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 213.5ms\n",
- "Speed: 4.0ms preprocess, 213.5ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 221.5ms\n",
- "Speed: 5.0ms preprocess, 221.5ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 231.0ms\n",
- "Speed: 8.9ms preprocess, 231.0ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 256.2ms\n",
- "Speed: 5.6ms preprocess, 256.2ms inference, 2.1ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 214.7ms\n",
- "Speed: 4.6ms preprocess, 214.7ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 222.5ms\n",
- "Speed: 4.0ms preprocess, 222.5ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 204.9ms\n",
- "Speed: 5.6ms preprocess, 204.9ms inference, 1.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 240.8ms\n",
- "Speed: 3.0ms preprocess, 240.8ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 292.7ms\n",
- "Speed: 4.0ms preprocess, 292.7ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 265.7ms\n",
- "Speed: 5.1ms preprocess, 265.7ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 224.9ms\n",
- "Speed: 5.1ms preprocess, 224.9ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 242.9ms\n",
- "Speed: 6.2ms preprocess, 242.9ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 234.1ms\n",
- "Speed: 4.6ms preprocess, 234.1ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 236.4ms\n",
- "Speed: 5.1ms preprocess, 236.4ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 269.4ms\n",
- "Speed: 4.6ms preprocess, 269.4ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 231.6ms\n",
- "Speed: 5.0ms preprocess, 231.6ms inference, 2.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 271.9ms\n",
- "Speed: 4.0ms preprocess, 271.9ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 387.9ms\n",
- "Speed: 4.0ms preprocess, 387.9ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 285.8ms\n",
- "Speed: 4.9ms preprocess, 285.8ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 225.9ms\n",
- "Speed: 4.0ms preprocess, 225.9ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 277.0ms\n",
- "Speed: 5.0ms preprocess, 277.0ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 232.5ms\n",
- "Speed: 14.2ms preprocess, 232.5ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 273.3ms\n",
- "Speed: 5.0ms preprocess, 273.3ms inference, 1.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 237.2ms\n",
- "Speed: 4.6ms preprocess, 237.2ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 230.6ms\n",
- "Speed: 5.0ms preprocess, 230.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 223.3ms\n",
- "Speed: 4.0ms preprocess, 223.3ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 223.5ms\n",
- "Speed: 4.5ms preprocess, 223.5ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 230.3ms\n",
- "Speed: 5.0ms preprocess, 230.3ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 237.1ms\n",
- "Speed: 6.6ms preprocess, 237.1ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 282.6ms\n",
- "Speed: 4.6ms preprocess, 282.6ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 238.0ms\n",
- "Speed: 4.0ms preprocess, 238.0ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 228.4ms\n",
- "Speed: 6.0ms preprocess, 228.4ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 222.8ms\n",
- "Speed: 4.0ms preprocess, 222.8ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 230.4ms\n",
- "Speed: 6.0ms preprocess, 230.4ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 224.8ms\n",
- "Speed: 3.6ms preprocess, 224.8ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 225.8ms\n",
- "Speed: 5.6ms preprocess, 225.8ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 223.1ms\n",
- "Speed: 4.0ms preprocess, 223.1ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 386.9ms\n",
- "Speed: 3.6ms preprocess, 386.9ms inference, 1.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 405.8ms\n",
- "Speed: 8.1ms preprocess, 405.8ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 285.2ms\n",
- "Speed: 10.3ms preprocess, 285.2ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 227.8ms\n",
- "Speed: 6.0ms preprocess, 227.8ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 225.0ms\n",
- "Speed: 4.0ms preprocess, 225.0ms inference, 4.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 265.7ms\n",
- "Speed: 4.6ms preprocess, 265.7ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 282.4ms\n",
- "Speed: 10.6ms preprocess, 282.4ms inference, 2.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 402.8ms\n",
- "Speed: 7.6ms preprocess, 402.8ms inference, 1.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 329.8ms\n",
- "Speed: 4.0ms preprocess, 329.8ms inference, 1.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 360.3ms\n",
- "Speed: 6.6ms preprocess, 360.3ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 424.7ms\n",
- "Speed: 9.6ms preprocess, 424.7ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 408.3ms\n",
- "Speed: 11.2ms preprocess, 408.3ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 313.3ms\n",
- "Speed: 8.6ms preprocess, 313.3ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 258.8ms\n",
- "Speed: 7.6ms preprocess, 258.8ms inference, 2.1ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 268.1ms\n",
- "Speed: 6.0ms preprocess, 268.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 251.9ms\n",
- "Speed: 4.0ms preprocess, 251.9ms inference, 1.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 489.9ms\n",
- "Speed: 4.7ms preprocess, 489.9ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 252.5ms\n",
- "Speed: 5.6ms preprocess, 252.5ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 249.1ms\n",
- "Speed: 5.6ms preprocess, 249.1ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 328.6ms\n",
- "Speed: 4.6ms preprocess, 328.6ms inference, 2.1ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 231.8ms\n",
- "Speed: 5.1ms preprocess, 231.8ms inference, 21.8ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 254.5ms\n",
- "Speed: 5.6ms preprocess, 254.5ms inference, 2.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 306.0ms\n",
- "Speed: 8.6ms preprocess, 306.0ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 298.1ms\n",
- "Speed: 4.0ms preprocess, 298.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 375.2ms\n",
- "Speed: 9.6ms preprocess, 375.2ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 370.9ms\n",
- "Speed: 11.6ms preprocess, 370.9ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 290.6ms\n",
- "Speed: 7.1ms preprocess, 290.6ms inference, 5.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 327.3ms\n",
- "Speed: 5.6ms preprocess, 327.3ms inference, 1.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 300.3ms\n",
- "Speed: 13.6ms preprocess, 300.3ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 242.6ms\n",
- "Speed: 7.6ms preprocess, 242.6ms inference, 1.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 301.8ms\n",
- "Speed: 4.0ms preprocess, 301.8ms inference, 5.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 229.2ms\n",
- "Speed: 6.6ms preprocess, 229.2ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 237.2ms\n",
- "Speed: 4.0ms preprocess, 237.2ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 407.4ms\n",
- "Speed: 7.0ms preprocess, 407.4ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 321.6ms\n",
- "Speed: 5.6ms preprocess, 321.6ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 759.5ms\n",
- "Speed: 7.2ms preprocess, 759.5ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 409.7ms\n",
- "Speed: 6.1ms preprocess, 409.7ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 413.8ms\n",
- "Speed: 6.6ms preprocess, 413.8ms inference, 0.9ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 358.2ms\n",
- "Speed: 15.2ms preprocess, 358.2ms inference, 1.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 540.0ms\n",
- "Speed: 8.7ms preprocess, 540.0ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 375.2ms\n",
- "Speed: 13.6ms preprocess, 375.2ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 378.5ms\n",
- "Speed: 5.6ms preprocess, 378.5ms inference, 2.1ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 281.2ms\n",
- "Speed: 11.6ms preprocess, 281.2ms inference, 0.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 491.3ms\n",
- "Speed: 5.0ms preprocess, 491.3ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 361.3ms\n",
- "Speed: 7.2ms preprocess, 361.3ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 374.7ms\n",
- "Speed: 9.6ms preprocess, 374.7ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 399.8ms\n",
- "Speed: 11.7ms preprocess, 399.8ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 406.2ms\n",
- "Speed: 13.7ms preprocess, 406.2ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 498.5ms\n",
- "Speed: 9.6ms preprocess, 498.5ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 237.1ms\n",
- "Speed: 5.0ms preprocess, 237.1ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 242.3ms\n",
- "Speed: 4.0ms preprocess, 242.3ms inference, 5.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 279.8ms\n",
- "Speed: 5.6ms preprocess, 279.8ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 218.3ms\n",
- "Speed: 4.0ms preprocess, 218.3ms inference, 1.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 245.7ms\n",
- "Speed: 4.0ms preprocess, 245.7ms inference, 1.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 228.4ms\n",
- "Speed: 7.6ms preprocess, 228.4ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 244.7ms\n",
- "Speed: 4.0ms preprocess, 244.7ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 208.1ms\n",
- "Speed: 4.0ms preprocess, 208.1ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 310.9ms\n",
- "Speed: 4.7ms preprocess, 310.9ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 3 Faces, 317.8ms\n",
- "Speed: 9.7ms preprocess, 317.8ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 247.5ms\n",
- "Speed: 5.6ms preprocess, 247.5ms inference, 2.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 288.5ms\n",
- "Speed: 9.7ms preprocess, 288.5ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 234.3ms\n",
- "Speed: 7.6ms preprocess, 234.3ms inference, 5.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 218.8ms\n",
- "Speed: 3.6ms preprocess, 218.8ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 216.4ms\n",
- "Speed: 3.5ms preprocess, 216.4ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 300.7ms\n",
- "Speed: 4.0ms preprocess, 300.7ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 258.3ms\n",
- "Speed: 4.6ms preprocess, 258.3ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 205.7ms\n",
- "Speed: 3.9ms preprocess, 205.7ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 226.8ms\n",
- "Speed: 5.0ms preprocess, 226.8ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 227.9ms\n",
- "Speed: 5.0ms preprocess, 227.9ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 211.4ms\n",
- "Speed: 4.6ms preprocess, 211.4ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 215.5ms\n",
- "Speed: 4.0ms preprocess, 215.5ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 296.4ms\n",
- "Speed: 5.1ms preprocess, 296.4ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 324.2ms\n",
- "Speed: 4.0ms preprocess, 324.2ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 228.9ms\n",
- "Speed: 3.0ms preprocess, 228.9ms inference, 2.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 216.9ms\n",
- "Speed: 5.6ms preprocess, 216.9ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 211.2ms\n",
- "Speed: 3.6ms preprocess, 211.2ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 210.9ms\n",
- "Speed: 5.6ms preprocess, 210.9ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 224.0ms\n",
- "Speed: 3.6ms preprocess, 224.0ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 268.0ms\n",
- "Speed: 4.7ms preprocess, 268.0ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 263.7ms\n",
- "Speed: 5.6ms preprocess, 263.7ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 346.7ms\n",
- "Speed: 9.6ms preprocess, 346.7ms inference, 3.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 292.3ms\n",
- "Speed: 5.0ms preprocess, 292.3ms inference, 1.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 318.0ms\n",
- "Speed: 13.6ms preprocess, 318.0ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 224.0ms\n",
- "Speed: 5.6ms preprocess, 224.0ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 260.6ms\n",
- "Speed: 4.7ms preprocess, 260.6ms inference, 3.1ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 246.7ms\n",
- "Speed: 4.8ms preprocess, 246.7ms inference, 1.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 219.7ms\n",
- "Speed: 3.0ms preprocess, 219.7ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 233.6ms\n",
- "Speed: 4.6ms preprocess, 233.6ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 244.9ms\n",
- "Speed: 5.6ms preprocess, 244.9ms inference, 2.1ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 224.5ms\n",
- "Speed: 4.0ms preprocess, 224.5ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 10 Faces, 238.0ms\n",
- "Speed: 7.6ms preprocess, 238.0ms inference, 4.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 7 Faces, 314.0ms\n",
- "Speed: 6.1ms preprocess, 314.0ms inference, 4.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 252.2ms\n",
- "Speed: 5.6ms preprocess, 252.2ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 233.2ms\n",
- "Speed: 8.6ms preprocess, 233.2ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 304.6ms\n",
- "Speed: 4.6ms preprocess, 304.6ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 226.9ms\n",
- "Speed: 5.5ms preprocess, 226.9ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 244.5ms\n",
- "Speed: 4.0ms preprocess, 244.5ms inference, 2.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 246.2ms\n",
- "Speed: 5.0ms preprocess, 246.2ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 228.4ms\n",
- "Speed: 4.5ms preprocess, 228.4ms inference, 1.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 487.6ms\n",
- "Speed: 3.0ms preprocess, 487.6ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 379.6ms\n",
- "Speed: 11.6ms preprocess, 379.6ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 210.2ms\n",
- "Speed: 3.0ms preprocess, 210.2ms inference, 1.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 252.0ms\n",
- "Speed: 4.6ms preprocess, 252.0ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 274.8ms\n",
- "Speed: 5.0ms preprocess, 274.8ms inference, 0.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 268.3ms\n",
- "Speed: 8.5ms preprocess, 268.3ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 346.4ms\n",
- "Speed: 7.6ms preprocess, 346.4ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 240.0ms\n",
- "Speed: 6.0ms preprocess, 240.0ms inference, 1.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 257.1ms\n",
- "Speed: 4.6ms preprocess, 257.1ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 233.5ms\n",
- "Speed: 6.6ms preprocess, 233.5ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 214.6ms\n",
- "Speed: 5.0ms preprocess, 214.6ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 227.2ms\n",
- "Speed: 3.0ms preprocess, 227.2ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 237.5ms\n",
- "Speed: 3.0ms preprocess, 237.5ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 232.6ms\n",
- "Speed: 6.6ms preprocess, 232.6ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 226.2ms\n",
- "Speed: 4.6ms preprocess, 226.2ms inference, 1.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 231.4ms\n",
- "Speed: 5.6ms preprocess, 231.4ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 233.0ms\n",
- "Speed: 4.6ms preprocess, 233.0ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 213.8ms\n",
- "Speed: 5.1ms preprocess, 213.8ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 241.9ms\n",
- "Speed: 6.6ms preprocess, 241.9ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 234.3ms\n",
- "Speed: 5.6ms preprocess, 234.3ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 241.7ms\n",
- "Speed: 4.0ms preprocess, 241.7ms inference, 2.1ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 223.1ms\n",
- "Speed: 4.0ms preprocess, 223.1ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 228.2ms\n",
- "Speed: 4.0ms preprocess, 228.2ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 320.3ms\n",
- "Speed: 3.6ms preprocess, 320.3ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 242.4ms\n",
- "Speed: 5.6ms preprocess, 242.4ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 245.2ms\n",
- "Speed: 5.0ms preprocess, 245.2ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 279.7ms\n",
- "Speed: 5.9ms preprocess, 279.7ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 224.0ms\n",
- "Speed: 5.7ms preprocess, 224.0ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 259.2ms\n",
- "Speed: 6.6ms preprocess, 259.2ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 242.4ms\n",
- "Speed: 3.9ms preprocess, 242.4ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 245.5ms\n",
- "Speed: 4.0ms preprocess, 245.5ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 237.7ms\n",
- "Speed: 5.9ms preprocess, 237.7ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 209.7ms\n",
- "Speed: 4.6ms preprocess, 209.7ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 230.6ms\n",
- "Speed: 4.6ms preprocess, 230.6ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 298.0ms\n",
- "Speed: 5.0ms preprocess, 298.0ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 220.7ms\n",
- "Speed: 5.0ms preprocess, 220.7ms inference, 1.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 227.3ms\n",
- "Speed: 5.4ms preprocess, 227.3ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 253.4ms\n",
- "Speed: 5.0ms preprocess, 253.4ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 229.0ms\n",
- "Speed: 5.0ms preprocess, 229.0ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 232.2ms\n",
- "Speed: 4.6ms preprocess, 232.2ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 227.2ms\n",
- "Speed: 4.6ms preprocess, 227.2ms inference, 2.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 246.0ms\n",
- "Speed: 5.6ms preprocess, 246.0ms inference, 2.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 228.9ms\n",
- "Speed: 4.6ms preprocess, 228.9ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 4745.1ms\n",
- "Speed: 4.6ms preprocess, 4745.1ms inference, 30.7ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 257.1ms\n",
- "Speed: 8.6ms preprocess, 257.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 312.0ms\n",
- "Speed: 4.6ms preprocess, 312.0ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 244.0ms\n",
- "Speed: 5.6ms preprocess, 244.0ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 266.0ms\n",
- "Speed: 3.6ms preprocess, 266.0ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 217.7ms\n",
- "Speed: 4.0ms preprocess, 217.7ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 237.6ms\n",
- "Speed: 4.5ms preprocess, 237.6ms inference, 2.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 247.0ms\n",
- "Speed: 5.0ms preprocess, 247.0ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 324.8ms\n",
- "Speed: 6.6ms preprocess, 324.8ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 331.2ms\n",
- "Speed: 7.6ms preprocess, 331.2ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 258.1ms\n",
- "Speed: 4.0ms preprocess, 258.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 260.9ms\n",
- "Speed: 6.2ms preprocess, 260.9ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 333.8ms\n",
- "Speed: 13.1ms preprocess, 333.8ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 329.5ms\n",
- "Speed: 5.5ms preprocess, 329.5ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 16 Faces, 311.1ms\n",
- "Speed: 10.6ms preprocess, 311.1ms inference, 2.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 16 Faces, 262.4ms\n",
- "Speed: 5.6ms preprocess, 262.4ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 16 Faces, 366.2ms\n",
- "Speed: 11.2ms preprocess, 366.2ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 16 Faces, 235.9ms\n",
- "Speed: 6.6ms preprocess, 235.9ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 16 Faces, 278.5ms\n",
- "Speed: 4.0ms preprocess, 278.5ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 280.1ms\n",
- "Speed: 6.6ms preprocess, 280.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 421.5ms\n",
- "Speed: 26.8ms preprocess, 421.5ms inference, 3.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 361.9ms\n",
- "Speed: 12.6ms preprocess, 361.9ms inference, 2.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 214.4ms\n",
- "Speed: 5.6ms preprocess, 214.4ms inference, 1.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 212.4ms\n",
- "Speed: 8.1ms preprocess, 212.4ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 234.8ms\n",
- "Speed: 5.2ms preprocess, 234.8ms inference, 1.8ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 453.7ms\n",
- "Speed: 5.0ms preprocess, 453.7ms inference, 3.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 288.0ms\n",
- "Speed: 7.0ms preprocess, 288.0ms inference, 2.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 374.5ms\n",
- "Speed: 5.6ms preprocess, 374.5ms inference, 2.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 513.7ms\n",
- "Speed: 55.2ms preprocess, 513.7ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 391.5ms\n",
- "Speed: 8.7ms preprocess, 391.5ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 379.9ms\n",
- "Speed: 10.6ms preprocess, 379.9ms inference, 2.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 335.6ms\n",
- "Speed: 30.4ms preprocess, 335.6ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 530.7ms\n",
- "Speed: 6.0ms preprocess, 530.7ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 360.2ms\n",
- "Speed: 4.0ms preprocess, 360.2ms inference, 1.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 570.8ms\n",
- "Speed: 5.0ms preprocess, 570.8ms inference, 3.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 365.6ms\n",
- "Speed: 8.6ms preprocess, 365.6ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 330.1ms\n",
- "Speed: 11.2ms preprocess, 330.1ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 370.1ms\n",
- "Speed: 4.6ms preprocess, 370.1ms inference, 10.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 770.1ms\n",
- "Speed: 9.6ms preprocess, 770.1ms inference, 4.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 331.6ms\n",
- "Speed: 7.0ms preprocess, 331.6ms inference, 5.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 268.0ms\n",
- "Speed: 4.1ms preprocess, 268.0ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 409.7ms\n",
- "Speed: 13.7ms preprocess, 409.7ms inference, 5.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 300.1ms\n",
- "Speed: 10.6ms preprocess, 300.1ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 314.1ms\n",
- "Speed: 5.0ms preprocess, 314.1ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 289.7ms\n",
- "Speed: 8.6ms preprocess, 289.7ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 270.4ms\n",
- "Speed: 9.6ms preprocess, 270.4ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 273.4ms\n",
- "Speed: 5.0ms preprocess, 273.4ms inference, 5.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 291.3ms\n",
- "Speed: 9.6ms preprocess, 291.3ms inference, 2.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 317.3ms\n",
- "Speed: 16.2ms preprocess, 317.3ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 334.4ms\n",
- "Speed: 6.6ms preprocess, 334.4ms inference, 5.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 244.3ms\n",
- "Speed: 6.6ms preprocess, 244.3ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 242.8ms\n",
- "Speed: 5.6ms preprocess, 242.8ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 258.7ms\n",
- "Speed: 6.6ms preprocess, 258.7ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 254.6ms\n",
- "Speed: 5.1ms preprocess, 254.6ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 485.0ms\n",
- "Speed: 8.6ms preprocess, 485.0ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 370.6ms\n",
- "Speed: 13.7ms preprocess, 370.6ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 302.4ms\n",
- "Speed: 8.6ms preprocess, 302.4ms inference, 4.1ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 306.0ms\n",
- "Speed: 5.6ms preprocess, 306.0ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 279.3ms\n",
- "Speed: 6.6ms preprocess, 279.3ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 247.3ms\n",
- "Speed: 4.0ms preprocess, 247.3ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 267.6ms\n",
- "Speed: 4.6ms preprocess, 267.6ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 273.2ms\n",
- "Speed: 4.0ms preprocess, 273.2ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 250.0ms\n",
- "Speed: 3.1ms preprocess, 250.0ms inference, 1.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 248.4ms\n",
- "Speed: 3.5ms preprocess, 248.4ms inference, 2.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 280.6ms\n",
- "Speed: 6.6ms preprocess, 280.6ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 259.9ms\n",
- "Speed: 4.5ms preprocess, 259.9ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 322.7ms\n",
- "Speed: 5.6ms preprocess, 322.7ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 265.9ms\n",
- "Speed: 6.5ms preprocess, 265.9ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 275.2ms\n",
- "Speed: 5.7ms preprocess, 275.2ms inference, 2.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 252.3ms\n",
- "Speed: 5.6ms preprocess, 252.3ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 259.2ms\n",
- "Speed: 5.0ms preprocess, 259.2ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 245.2ms\n",
- "Speed: 7.6ms preprocess, 245.2ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 287.7ms\n",
- "Speed: 11.6ms preprocess, 287.7ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 244.3ms\n",
- "Speed: 4.6ms preprocess, 244.3ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 246.6ms\n",
- "Speed: 5.6ms preprocess, 246.6ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 250.7ms\n",
- "Speed: 4.0ms preprocess, 250.7ms inference, 1.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 297.2ms\n",
- "Speed: 4.0ms preprocess, 297.2ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 255.3ms\n",
- "Speed: 4.6ms preprocess, 255.3ms inference, 2.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 289.9ms\n",
- "Speed: 4.0ms preprocess, 289.9ms inference, 5.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 283.7ms\n",
- "Speed: 4.1ms preprocess, 283.7ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 264.2ms\n",
- "Speed: 5.8ms preprocess, 264.2ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 12 Faces, 533.1ms\n",
- "Speed: 10.7ms preprocess, 533.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 302.2ms\n",
- "Speed: 11.7ms preprocess, 302.2ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 328.2ms\n",
- "Speed: 5.0ms preprocess, 328.2ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 601.4ms\n",
- "Speed: 6.0ms preprocess, 601.4ms inference, 1.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 693.9ms\n",
- "Speed: 13.7ms preprocess, 693.9ms inference, 1.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 328.4ms\n",
- "Speed: 6.7ms preprocess, 328.4ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 299.0ms\n",
- "Speed: 7.0ms preprocess, 299.0ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 477.3ms\n",
- "Speed: 7.7ms preprocess, 477.3ms inference, 10.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 327.6ms\n",
- "Speed: 5.0ms preprocess, 327.6ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 377.0ms\n",
- "Speed: 9.6ms preprocess, 377.0ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 434.0ms\n",
- "Speed: 4.6ms preprocess, 434.0ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 383.7ms\n",
- "Speed: 29.4ms preprocess, 383.7ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 712.7ms\n",
- "Speed: 5.5ms preprocess, 712.7ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 446.9ms\n",
- "Speed: 14.6ms preprocess, 446.9ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 560.0ms\n",
- "Speed: 7.6ms preprocess, 560.0ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 404.9ms\n",
- "Speed: 5.6ms preprocess, 404.9ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 403.1ms\n",
- "Speed: 10.3ms preprocess, 403.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 293.3ms\n",
- "Speed: 3.1ms preprocess, 293.3ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 297.7ms\n",
- "Speed: 5.0ms preprocess, 297.7ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 263.0ms\n",
- "Speed: 4.0ms preprocess, 263.0ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 265.9ms\n",
- "Speed: 4.6ms preprocess, 265.9ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 261.0ms\n",
- "Speed: 4.5ms preprocess, 261.0ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n"
- ]
- },
- {
- "ename": "",
- "evalue": "",
- "output_type": "error",
- "traceback": [
- "\u001b[1;31mThe Kernel crashed while executing code in the current cell or a previous cell. \n",
- "\u001b[1;31mPlease review the code in the cell(s) to identify a possible cause of the failure. \n",
- "\u001b[1;31mClick here for more info. \n",
- "\u001b[1;31mView Jupyter log for further details."
- ]
- }
- ],
- "source": [
- "device = torch.device(\"cpu\")\n",
- "\n",
- "# Cargamos el modelo\n",
- "model_path = '/results/weights/best.pt'\n",
- "model = YOLO(\"best2.pt\")\n",
- "model = model.to(device)\n",
- "\n",
- "# Inicializamos la captura de pantalla\n",
- "sct = mss()\n",
- "bounding_box = {\"top\": 0, \"left\": 0, \"width\": 800, \"height\": 600}\n",
- "\n",
- "while True:\n",
- " sct_img = sct.grab(bounding_box)\n",
- " frame = np.array(sct_img)\n",
- " frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
- "\n",
- " # Realizamos la deteccion\n",
- " results = model(frame)\n",
- "\n",
- " # Dibujamos los bounding boxes\n",
- " for result in results[0].boxes: # Accede a los resultados de las cajas\n",
- " x1, y1, x2, y2 = result.xyxy[0].cpu().numpy().astype(int)\n",
- " conf = result.conf[0].cpu().numpy()\n",
- " cls = int(result.cls[0].cpu().numpy())\n",
- " \n",
- " cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2) # Dibujar el rectángulo\n",
- " label = f'{model.names[cls]} {conf:.2f}' # Crear la etiqueta\n",
- " cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) # Añadir la etiqueta\n",
- "\n",
- "\n",
- " # Mostramos el resultado\n",
- " cv2.imshow(\"frame\", frame)\n",
- "\n",
- " if cv2.waitKey(1) & 0xFF == ord('q'):\n",
- " cv2.destroyAllWindows()\n",
- " break"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "metadata": {},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "0: 480x640 (no detections), 487.6ms\n",
- "Speed: 12.6ms preprocess, 487.6ms inference, 16.2ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 345.9ms\n",
- "Speed: 8.0ms preprocess, 345.9ms inference, 7.7ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 300.4ms\n",
- "Speed: 8.0ms preprocess, 300.4ms inference, 6.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 328.3ms\n",
- "Speed: 8.4ms preprocess, 328.3ms inference, 9.3ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 420.3ms\n",
- "Speed: 0.0ms preprocess, 420.3ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 400.9ms\n",
- "Speed: 8.0ms preprocess, 400.9ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 339.1ms\n",
- "Speed: 11.1ms preprocess, 339.1ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 278.2ms\n",
- "Speed: 0.0ms preprocess, 278.2ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 3 Faces, 266.9ms\n",
- "Speed: 0.0ms preprocess, 266.9ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 329.0ms\n",
- "Speed: 7.1ms preprocess, 329.0ms inference, 1.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 500.6ms\n",
- "Speed: 5.6ms preprocess, 500.6ms inference, 7.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 351.2ms\n",
- "Speed: 0.0ms preprocess, 351.2ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 268.7ms\n",
- "Speed: 0.0ms preprocess, 268.7ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 291.6ms\n",
- "Speed: 0.0ms preprocess, 291.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 361.1ms\n",
- "Speed: 7.5ms preprocess, 361.1ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 357.2ms\n",
- "Speed: 0.0ms preprocess, 357.2ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 284.3ms\n",
- "Speed: 0.0ms preprocess, 284.3ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 253.1ms\n",
- "Speed: 0.0ms preprocess, 253.1ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 333.8ms\n",
- "Speed: 0.0ms preprocess, 333.8ms inference, 2.3ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 350.5ms\n",
- "Speed: 7.6ms preprocess, 350.5ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 333.2ms\n",
- "Speed: 8.1ms preprocess, 333.2ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 269.4ms\n",
- "Speed: 0.0ms preprocess, 269.4ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 301.6ms\n",
- "Speed: 0.0ms preprocess, 301.6ms inference, 0.4ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 291.2ms\n",
- "Speed: 0.0ms preprocess, 291.2ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 289.7ms\n",
- "Speed: 11.8ms preprocess, 289.7ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 281.3ms\n",
- "Speed: 0.0ms preprocess, 281.3ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 274.5ms\n",
- "Speed: 10.4ms preprocess, 274.5ms inference, 9.3ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 277.7ms\n",
- "Speed: 8.1ms preprocess, 277.7ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 236.2ms\n",
- "Speed: 8.7ms preprocess, 236.2ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 208.4ms\n",
- "Speed: 0.0ms preprocess, 208.4ms inference, 8.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 235.3ms\n",
- "Speed: 4.8ms preprocess, 235.3ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 3 Faces, 254.7ms\n",
- "Speed: 0.0ms preprocess, 254.7ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 3 Faces, 276.2ms\n",
- "Speed: 7.6ms preprocess, 276.2ms inference, 7.3ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 3 Faces, 274.3ms\n",
- "Speed: 0.0ms preprocess, 274.3ms inference, 8.7ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 3 Faces, 272.0ms\n",
- "Speed: 8.0ms preprocess, 272.0ms inference, 0.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 3 Faces, 247.6ms\n",
- "Speed: 6.0ms preprocess, 247.6ms inference, 8.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 3 Faces, 257.9ms\n",
- "Speed: 0.0ms preprocess, 257.9ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 203.1ms\n",
- "Speed: 0.0ms preprocess, 203.1ms inference, 1.1ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 237.3ms\n",
- "Speed: 0.0ms preprocess, 237.3ms inference, 8.7ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 275.0ms\n",
- "Speed: 0.0ms preprocess, 275.0ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 3 Faces, 271.3ms\n",
- "Speed: 0.0ms preprocess, 271.3ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 3 Faces, 264.5ms\n",
- "Speed: 10.2ms preprocess, 264.5ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 3 Faces, 269.5ms\n",
- "Speed: 0.0ms preprocess, 269.5ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 3 Faces, 290.2ms\n",
- "Speed: 0.0ms preprocess, 290.2ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 280.0ms\n",
- "Speed: 0.0ms preprocess, 280.0ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 3 Faces, 203.2ms\n",
- "Speed: 0.0ms preprocess, 203.2ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 3 Faces, 209.3ms\n",
- "Speed: 0.0ms preprocess, 209.3ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 3 Faces, 269.3ms\n",
- "Speed: 9.5ms preprocess, 269.3ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 3 Faces, 267.7ms\n",
- "Speed: 0.0ms preprocess, 267.7ms inference, 2.1ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 3 Faces, 284.4ms\n",
- "Speed: 8.0ms preprocess, 284.4ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 3 Faces, 258.6ms\n",
- "Speed: 0.0ms preprocess, 258.6ms inference, 5.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 218.6ms\n",
- "Speed: 7.5ms preprocess, 218.6ms inference, 7.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 215.8ms\n",
- "Speed: 0.0ms preprocess, 215.8ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 243.6ms\n",
- "Speed: 0.0ms preprocess, 243.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 245.0ms\n",
- "Speed: 6.3ms preprocess, 245.0ms inference, 10.9ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 269.6ms\n",
- "Speed: 0.0ms preprocess, 269.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 267.0ms\n",
- "Speed: 0.0ms preprocess, 267.0ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 264.2ms\n",
- "Speed: 0.0ms preprocess, 264.2ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 226.4ms\n",
- "Speed: 0.0ms preprocess, 226.4ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 244.1ms\n",
- "Speed: 0.0ms preprocess, 244.1ms inference, 3.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 249.9ms\n",
- "Speed: 8.0ms preprocess, 249.9ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 260.2ms\n",
- "Speed: 0.0ms preprocess, 260.2ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 279.6ms\n",
- "Speed: 0.0ms preprocess, 279.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 251.2ms\n",
- "Speed: 0.0ms preprocess, 251.2ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 209.5ms\n",
- "Speed: 5.3ms preprocess, 209.5ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 264.1ms\n",
- "Speed: 0.0ms preprocess, 264.1ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 256.1ms\n",
- "Speed: 7.1ms preprocess, 256.1ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 255.6ms\n",
- "Speed: 5.6ms preprocess, 255.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 242.6ms\n",
- "Speed: 0.0ms preprocess, 242.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 262.6ms\n",
- "Speed: 0.0ms preprocess, 262.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 274.6ms\n",
- "Speed: 0.0ms preprocess, 274.6ms inference, 4.9ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 249.1ms\n",
- "Speed: 0.0ms preprocess, 249.1ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 234.7ms\n",
- "Speed: 0.0ms preprocess, 234.7ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 265.2ms\n",
- "Speed: 0.0ms preprocess, 265.2ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 269.8ms\n",
- "Speed: 0.0ms preprocess, 269.8ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 226.8ms\n",
- "Speed: 0.0ms preprocess, 226.8ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 274.6ms\n",
- "Speed: 0.0ms preprocess, 274.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 272.0ms\n",
- "Speed: 8.0ms preprocess, 272.0ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 235.6ms\n",
- "Speed: 0.0ms preprocess, 235.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 297.1ms\n",
- "Speed: 8.0ms preprocess, 297.1ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 205.7ms\n",
- "Speed: 8.2ms preprocess, 205.7ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 248.6ms\n",
- "Speed: 0.0ms preprocess, 248.6ms inference, 8.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 275.0ms\n",
- "Speed: 0.0ms preprocess, 275.0ms inference, 4.4ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 260.4ms\n",
- "Speed: 8.1ms preprocess, 260.4ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 233.5ms\n",
- "Speed: 0.0ms preprocess, 233.5ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 243.6ms\n",
- "Speed: 0.0ms preprocess, 243.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 243.7ms\n",
- "Speed: 5.0ms preprocess, 243.7ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 259.9ms\n",
- "Speed: 0.0ms preprocess, 259.9ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 248.8ms\n",
- "Speed: 0.0ms preprocess, 248.8ms inference, 8.3ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 245.4ms\n",
- "Speed: 0.0ms preprocess, 245.4ms inference, 7.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 265.5ms\n",
- "Speed: 8.5ms preprocess, 265.5ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 238.0ms\n",
- "Speed: 0.0ms preprocess, 238.0ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 244.6ms\n",
- "Speed: 0.0ms preprocess, 244.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 253.2ms\n",
- "Speed: 0.0ms preprocess, 253.2ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 270.6ms\n",
- "Speed: 0.0ms preprocess, 270.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 232.8ms\n",
- "Speed: 0.0ms preprocess, 232.8ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 240.1ms\n",
- "Speed: 8.2ms preprocess, 240.1ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 244.4ms\n",
- "Speed: 6.6ms preprocess, 244.4ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 231.8ms\n",
- "Speed: 4.6ms preprocess, 231.8ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 255.5ms\n",
- "Speed: 0.0ms preprocess, 255.5ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 255.7ms\n",
- "Speed: 8.1ms preprocess, 255.7ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 227.7ms\n",
- "Speed: 0.0ms preprocess, 227.7ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 343.3ms\n",
- "Speed: 0.0ms preprocess, 343.3ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 258.4ms\n",
- "Speed: 4.6ms preprocess, 258.4ms inference, 8.3ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 302.9ms\n",
- "Speed: 0.0ms preprocess, 302.9ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 261.7ms\n",
- "Speed: 8.0ms preprocess, 261.7ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 231.1ms\n",
- "Speed: 0.0ms preprocess, 231.1ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 271.6ms\n",
- "Speed: 8.2ms preprocess, 271.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 292.9ms\n",
- "Speed: 0.0ms preprocess, 292.9ms inference, 9.9ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 237.6ms\n",
- "Speed: 9.0ms preprocess, 237.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 212.1ms\n",
- "Speed: 0.0ms preprocess, 212.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 247.3ms\n",
- "Speed: 0.0ms preprocess, 247.3ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 266.0ms\n",
- "Speed: 8.1ms preprocess, 266.0ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 250.5ms\n",
- "Speed: 8.2ms preprocess, 250.5ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 292.8ms\n",
- "Speed: 0.0ms preprocess, 292.8ms inference, 5.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 271.1ms\n",
- "Speed: 0.0ms preprocess, 271.1ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 233.6ms\n",
- "Speed: 0.0ms preprocess, 233.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 208.4ms\n",
- "Speed: 0.0ms preprocess, 208.4ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 268.9ms\n",
- "Speed: 0.0ms preprocess, 268.9ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 256.8ms\n",
- "Speed: 4.6ms preprocess, 256.8ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 256.1ms\n",
- "Speed: 8.8ms preprocess, 256.1ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 249.6ms\n",
- "Speed: 0.0ms preprocess, 249.6ms inference, 5.3ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 247.1ms\n",
- "Speed: 8.0ms preprocess, 247.1ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 211.3ms\n",
- "Speed: 0.0ms preprocess, 211.3ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 248.9ms\n",
- "Speed: 0.0ms preprocess, 248.9ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 240.5ms\n",
- "Speed: 8.0ms preprocess, 240.5ms inference, 8.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 247.7ms\n",
- "Speed: 8.1ms preprocess, 247.7ms inference, 3.3ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 258.9ms\n",
- "Speed: 5.0ms preprocess, 258.9ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 256.9ms\n",
- "Speed: 8.1ms preprocess, 256.9ms inference, 2.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 213.2ms\n",
- "Speed: 0.0ms preprocess, 213.2ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 239.7ms\n",
- "Speed: 0.0ms preprocess, 239.7ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 251.1ms\n",
- "Speed: 0.0ms preprocess, 251.1ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 274.5ms\n",
- "Speed: 0.0ms preprocess, 274.5ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 3 Faces, 276.6ms\n",
- "Speed: 0.0ms preprocess, 276.6ms inference, 4.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 246.1ms\n",
- "Speed: 7.5ms preprocess, 246.1ms inference, 8.2ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 246.3ms\n",
- "Speed: 0.0ms preprocess, 246.3ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 222.6ms\n",
- "Speed: 0.0ms preprocess, 222.6ms inference, 1.1ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 249.6ms\n",
- "Speed: 0.0ms preprocess, 249.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 261.9ms\n",
- "Speed: 8.0ms preprocess, 261.9ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 260.5ms\n",
- "Speed: 0.0ms preprocess, 260.5ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 216.5ms\n",
- "Speed: 8.0ms preprocess, 216.5ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 236.7ms\n",
- "Speed: 0.0ms preprocess, 236.7ms inference, 0.4ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 273.6ms\n",
- "Speed: 0.0ms preprocess, 273.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 254.9ms\n",
- "Speed: 0.0ms preprocess, 254.9ms inference, 1.4ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 253.5ms\n",
- "Speed: 0.0ms preprocess, 253.5ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 250.1ms\n",
- "Speed: 0.0ms preprocess, 250.1ms inference, 1.1ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 246.3ms\n",
- "Speed: 0.0ms preprocess, 246.3ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 258.1ms\n",
- "Speed: 9.5ms preprocess, 258.1ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 273.3ms\n",
- "Speed: 0.0ms preprocess, 273.3ms inference, 0.9ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 271.4ms\n",
- "Speed: 0.0ms preprocess, 271.4ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 260.3ms\n",
- "Speed: 0.0ms preprocess, 260.3ms inference, 8.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 245.5ms\n",
- "Speed: 0.0ms preprocess, 245.5ms inference, 10.3ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 286.6ms\n",
- "Speed: 0.0ms preprocess, 286.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 270.7ms\n",
- "Speed: 8.0ms preprocess, 270.7ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 236.0ms\n",
- "Speed: 0.0ms preprocess, 236.0ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 284.3ms\n",
- "Speed: 8.1ms preprocess, 284.3ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 214.3ms\n",
- "Speed: 0.0ms preprocess, 214.3ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 244.8ms\n",
- "Speed: 0.0ms preprocess, 244.8ms inference, 7.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 256.5ms\n",
- "Speed: 0.0ms preprocess, 256.5ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 230.5ms\n",
- "Speed: 0.0ms preprocess, 230.5ms inference, 7.4ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 237.0ms\n",
- "Speed: 0.0ms preprocess, 237.0ms inference, 3.1ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 249.0ms\n",
- "Speed: 0.0ms preprocess, 249.0ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 247.9ms\n",
- "Speed: 8.0ms preprocess, 247.9ms inference, 8.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 223.2ms\n",
- "Speed: 0.0ms preprocess, 223.2ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 259.1ms\n",
- "Speed: 1.1ms preprocess, 259.1ms inference, 4.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 232.8ms\n",
- "Speed: 0.0ms preprocess, 232.8ms inference, 7.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 240.7ms\n",
- "Speed: 0.0ms preprocess, 240.7ms inference, 8.7ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 251.6ms\n",
- "Speed: 8.4ms preprocess, 251.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 248.8ms\n",
- "Speed: 0.0ms preprocess, 248.8ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 254.4ms\n",
- "Speed: 6.2ms preprocess, 254.4ms inference, 0.3ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 255.8ms\n",
- "Speed: 0.0ms preprocess, 255.8ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 217.0ms\n",
- "Speed: 1.2ms preprocess, 217.0ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 270.2ms\n",
- "Speed: 6.2ms preprocess, 270.2ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 222.2ms\n",
- "Speed: 0.0ms preprocess, 222.2ms inference, 6.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 273.6ms\n",
- "Speed: 0.0ms preprocess, 273.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 222.3ms\n",
- "Speed: 0.0ms preprocess, 222.3ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 254.4ms\n",
- "Speed: 0.0ms preprocess, 254.4ms inference, 8.4ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 303.2ms\n",
- "Speed: 8.0ms preprocess, 303.2ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 219.7ms\n",
- "Speed: 0.0ms preprocess, 219.7ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 256.9ms\n",
- "Speed: 0.0ms preprocess, 256.9ms inference, 2.1ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 257.3ms\n",
- "Speed: 5.9ms preprocess, 257.3ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 252.3ms\n",
- "Speed: 0.0ms preprocess, 252.3ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 3 Faces, 257.6ms\n",
- "Speed: 8.0ms preprocess, 257.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 247.7ms\n",
- "Speed: 0.0ms preprocess, 247.7ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 217.9ms\n",
- "Speed: 0.0ms preprocess, 217.9ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 253.5ms\n",
- "Speed: 0.0ms preprocess, 253.5ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 265.0ms\n",
- "Speed: 8.0ms preprocess, 265.0ms inference, 8.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 261.2ms\n",
- "Speed: 5.2ms preprocess, 261.2ms inference, 6.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 251.2ms\n",
- "Speed: 0.0ms preprocess, 251.2ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 269.4ms\n",
- "Speed: 11.7ms preprocess, 269.4ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 262.5ms\n",
- "Speed: 8.5ms preprocess, 262.5ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 214.7ms\n",
- "Speed: 0.0ms preprocess, 214.7ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 254.2ms\n",
- "Speed: 0.0ms preprocess, 254.2ms inference, 8.4ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 242.2ms\n",
- "Speed: 8.1ms preprocess, 242.2ms inference, 8.2ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 259.7ms\n",
- "Speed: 0.0ms preprocess, 259.7ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 255.7ms\n",
- "Speed: 2.3ms preprocess, 255.7ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 225.6ms\n",
- "Speed: 0.0ms preprocess, 225.6ms inference, 6.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 256.2ms\n",
- "Speed: 6.4ms preprocess, 256.2ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 256.1ms\n",
- "Speed: 0.0ms preprocess, 256.1ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 260.1ms\n",
- "Speed: 0.0ms preprocess, 260.1ms inference, 1.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 239.1ms\n",
- "Speed: 8.0ms preprocess, 239.1ms inference, 5.1ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 271.6ms\n",
- "Speed: 8.0ms preprocess, 271.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 247.1ms\n",
- "Speed: 8.0ms preprocess, 247.1ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 255.4ms\n",
- "Speed: 0.0ms preprocess, 255.4ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 254.1ms\n",
- "Speed: 0.0ms preprocess, 254.1ms inference, 4.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 250.1ms\n",
- "Speed: 0.0ms preprocess, 250.1ms inference, 8.3ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 246.7ms\n",
- "Speed: 0.0ms preprocess, 246.7ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 208.0ms\n",
- "Speed: 0.0ms preprocess, 208.0ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 248.9ms\n",
- "Speed: 8.1ms preprocess, 248.9ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 263.6ms\n",
- "Speed: 0.0ms preprocess, 263.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 256.7ms\n",
- "Speed: 0.0ms preprocess, 256.7ms inference, 8.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 232.9ms\n",
- "Speed: 0.0ms preprocess, 232.9ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 241.6ms\n",
- "Speed: 0.0ms preprocess, 241.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 257.7ms\n",
- "Speed: 5.1ms preprocess, 257.7ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 264.5ms\n",
- "Speed: 0.0ms preprocess, 264.5ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 293.8ms\n",
- "Speed: 0.0ms preprocess, 293.8ms inference, 8.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 280.2ms\n",
- "Speed: 8.0ms preprocess, 280.2ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 269.6ms\n",
- "Speed: 0.0ms preprocess, 269.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 215.3ms\n",
- "Speed: 0.0ms preprocess, 215.3ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 260.2ms\n",
- "Speed: 1.6ms preprocess, 260.2ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 281.8ms\n",
- "Speed: 0.0ms preprocess, 281.8ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 272.7ms\n",
- "Speed: 8.0ms preprocess, 272.7ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 280.8ms\n",
- "Speed: 0.0ms preprocess, 280.8ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 220.0ms\n",
- "Speed: 0.0ms preprocess, 220.0ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 217.9ms\n",
- "Speed: 0.0ms preprocess, 217.9ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 247.8ms\n",
- "Speed: 0.0ms preprocess, 247.8ms inference, 1.3ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 264.5ms\n",
- "Speed: 0.0ms preprocess, 264.5ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 256.1ms\n",
- "Speed: 9.2ms preprocess, 256.1ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 258.5ms\n",
- "Speed: 8.0ms preprocess, 258.5ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 268.0ms\n",
- "Speed: 9.0ms preprocess, 268.0ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 232.3ms\n",
- "Speed: 5.1ms preprocess, 232.3ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 214.7ms\n",
- "Speed: 0.0ms preprocess, 214.7ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 268.5ms\n",
- "Speed: 8.1ms preprocess, 268.5ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 265.4ms\n",
- "Speed: 0.0ms preprocess, 265.4ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 280.1ms\n",
- "Speed: 7.7ms preprocess, 280.1ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 270.4ms\n",
- "Speed: 0.0ms preprocess, 270.4ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 271.7ms\n",
- "Speed: 7.6ms preprocess, 271.7ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 217.4ms\n",
- "Speed: 0.0ms preprocess, 217.4ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 231.9ms\n",
- "Speed: 0.0ms preprocess, 231.9ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 279.9ms\n",
- "Speed: 0.0ms preprocess, 279.9ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 271.8ms\n",
- "Speed: 7.6ms preprocess, 271.8ms inference, 1.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 274.7ms\n",
- "Speed: 0.0ms preprocess, 274.7ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 264.6ms\n",
- "Speed: 0.0ms preprocess, 264.6ms inference, 1.3ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 254.9ms\n",
- "Speed: 8.0ms preprocess, 254.9ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 207.8ms\n",
- "Speed: 2.2ms preprocess, 207.8ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 (no detections), 253.3ms\n",
- "Speed: 0.0ms preprocess, 253.3ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 262.0ms\n",
- "Speed: 8.1ms preprocess, 262.0ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 287.2ms\n",
- "Speed: 0.0ms preprocess, 287.2ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 264.4ms\n",
- "Speed: 0.0ms preprocess, 264.4ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 253.2ms\n",
- "Speed: 0.0ms preprocess, 253.2ms inference, 7.7ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 221.6ms\n",
- "Speed: 0.0ms preprocess, 221.6ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 268.5ms\n",
- "Speed: 0.0ms preprocess, 268.5ms inference, 2.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 270.8ms\n",
- "Speed: 0.0ms preprocess, 270.8ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 343.6ms\n",
- "Speed: 8.6ms preprocess, 343.6ms inference, 4.2ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 212.5ms\n",
- "Speed: 0.0ms preprocess, 212.5ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 207.9ms\n",
- "Speed: 0.0ms preprocess, 207.9ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 296.7ms\n",
- "Speed: 0.0ms preprocess, 296.7ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 301.8ms\n",
- "Speed: 8.0ms preprocess, 301.8ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 291.3ms\n",
- "Speed: 1.2ms preprocess, 291.3ms inference, 3.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 239.9ms\n",
- "Speed: 8.0ms preprocess, 239.9ms inference, 9.2ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 258.1ms\n",
- "Speed: 0.0ms preprocess, 258.1ms inference, 6.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 2 Faces, 275.9ms\n",
- "Speed: 0.0ms preprocess, 275.9ms inference, 0.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 264.9ms\n",
- "Speed: 0.0ms preprocess, 264.9ms inference, 1.7ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n",
- "0: 480x640 1 Face, 232.1ms\n",
- "Speed: 0.0ms preprocess, 232.1ms inference, 8.0ms postprocess per image at shape (1, 3, 480, 640)\n"
- ]
- }
- ],
- "source": [
- "device = torch.device(\"cpu\")\n",
- "\n",
- "# Cargamos el modelo\n",
- "model_path = '/results/weights/best.pt'\n",
- "model = YOLO(\"best2.pt\")\n",
- "model = model.to(device)\n",
- "\n",
- "\n",
- "# Ahora con la camara\n",
- "cap = cv2.VideoCapture(0)\n",
- "\n",
- "while True:\n",
- " ret, frame = cap.read()\n",
- "\n",
- " # Realizamos la deteccion\n",
- " results = model(frame)\n",
- "\n",
- " # Dibujamos los bounding boxes\n",
- " for result in results[0].boxes: # Accede a los resultados de las cajas\n",
- " x1, y1, x2, y2 = result.xyxy[0].cpu().numpy().astype(int)\n",
- " conf = result.conf[0].cpu().numpy()\n",
- " cls = int(result.cls[0].cpu().numpy())\n",
- " \n",
- " cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2) # Dibujar el rectángulo\n",
- " label = f'{model.names[cls]} {conf:.2f}' # Crear la etiqueta\n",
- " cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) # Añadir la etiqueta\n",
- "\n",
- " # Mostramos el resultado\n",
- " cv2.imshow(\"frame\", frame)\n",
- "\n",
- " if cv2.waitKey(1) & 0xFF == ord('q'):\n",
- " cv2.destroyAllWindows()\n",
- " cap.release()\n",
- " break\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Test del modelo\n",
- "\n",
- "Se usara el modelo entrenado 'best.pt' para evaluar el modelo caprutando la pantalla en tiempo real"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Cara mas cercana"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Finalmente, nos interesa quedarnos con la cara mas cercana, el criterio para determinar aquello será el Bounding Box mas grande."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "metadata": {},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "0: 480x640 3 Faces, 279.7ms\n",
- "Speed: 8.0ms preprocess, 279.7ms inference, 14.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 512.0, 191.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 1 Face, 227.7ms\n",
- "Speed: 6.0ms preprocess, 227.7ms inference, 8.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 344.0, 172.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 1 Face, 232.1ms\n",
- "Speed: 4.0ms preprocess, 232.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 337.0, 187.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 1 Face, 317.2ms\n",
- "Speed: 3.1ms preprocess, 317.2ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 350.5, 191.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 1 Face, 219.1ms\n",
- "Speed: 6.0ms preprocess, 219.1ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 347.5, 190.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 1 Face, 223.7ms\n",
- "Speed: 4.0ms preprocess, 223.7ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 334.0, 175.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 301.7ms\n",
- "Speed: 5.0ms preprocess, 301.7ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 304.5, 175.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 316.0ms\n",
- "Speed: 3.0ms preprocess, 316.0ms inference, 6.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 279.0, 173.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 274.1ms\n",
- "Speed: 4.0ms preprocess, 274.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 262.5, 176.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 251.6ms\n",
- "Speed: 4.0ms preprocess, 251.6ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 261.5, 179.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 269.8ms\n",
- "Speed: 3.0ms preprocess, 269.8ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 263.0, 181.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 241.9ms\n",
- "Speed: 4.0ms preprocess, 241.9ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 265.0, 183.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "0: 480x640 2 Faces, 217.7ms\n",
- "Speed: 4.0ms preprocess, 217.7ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 263.5, 183.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 197.2ms\n",
- "Speed: 4.0ms preprocess, 197.2ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 262.0, 181.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 217.5ms\n",
- "Speed: 4.0ms preprocess, 217.5ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 250.5, 155.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 215.7ms\n",
- "Speed: 3.0ms preprocess, 215.7ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 254.5, 135.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 222.1ms\n",
- "Speed: 3.0ms preprocess, 222.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 260.5, 162.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 228.9ms\n",
- "Speed: 5.0ms preprocess, 228.9ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 499.5, 196.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 207.2ms\n",
- "Speed: 4.0ms preprocess, 207.2ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 499.5, 198.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 254.8ms\n",
- "Speed: 4.0ms preprocess, 254.8ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 494.0, 199.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 210.2ms\n",
- "Speed: 5.0ms preprocess, 210.2ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 493.5, 199.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 234.7ms\n",
- "Speed: 5.1ms preprocess, 234.7ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 497.0, 199.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 232.2ms\n",
- "Speed: 5.0ms preprocess, 232.2ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 496.0, 199.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 214.5ms\n",
- "Speed: 4.0ms preprocess, 214.5ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 496.5, 199.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 206.1ms\n",
- "Speed: 3.0ms preprocess, 206.1ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 495.0, 200.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 212.5ms\n",
- "Speed: 2.1ms preprocess, 212.5ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 251.0, 102.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 204.1ms\n",
- "Speed: 4.0ms preprocess, 204.1ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 262.0, 88.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 264.7ms\n",
- "Speed: 4.1ms preprocess, 264.7ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 276.5, 78.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 218.1ms\n",
- "Speed: 3.0ms preprocess, 218.1ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 287.0, 73.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 1 Face, 209.6ms\n",
- "Speed: 3.0ms preprocess, 209.6ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 285.5, 71.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 220.1ms\n",
- "Speed: 4.0ms preprocess, 220.1ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 285.0, 74.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 214.1ms\n",
- "Speed: 3.0ms preprocess, 214.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 280.0, 81.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 210.1ms\n",
- "Speed: 2.0ms preprocess, 210.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 271.0, 80.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 222.1ms\n",
- "Speed: 4.0ms preprocess, 222.1ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 510.0, 199.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 212.7ms\n",
- "Speed: 4.0ms preprocess, 212.7ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 504.0, 199.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 266.1ms\n",
- "Speed: 4.0ms preprocess, 266.1ms inference, 2.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 502.0, 198.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 210.7ms\n",
- "Speed: 5.0ms preprocess, 210.7ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 506.5, 198.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 218.1ms\n",
- "Speed: 3.0ms preprocess, 218.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 498.5, 196.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 213.8ms\n",
- "Speed: 3.1ms preprocess, 213.8ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 502.0, 196.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 207.1ms\n",
- "Speed: 4.0ms preprocess, 207.1ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 501.5, 197.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 224.2ms\n",
- "Speed: 3.0ms preprocess, 224.2ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 501.0, 197.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 289.7ms\n",
- "Speed: 3.0ms preprocess, 289.7ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 499.0, 197.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 277.8ms\n",
- "Speed: 4.0ms preprocess, 277.8ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 497.5, 198.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 241.2ms\n",
- "Speed: 4.0ms preprocess, 241.2ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 502.5, 199.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 226.7ms\n",
- "Speed: 4.0ms preprocess, 226.7ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 500.5, 201.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 216.7ms\n",
- "Speed: 4.0ms preprocess, 216.7ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 498.5, 201.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 206.1ms\n",
- "Speed: 2.1ms preprocess, 206.1ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 499.5, 201.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 206.1ms\n",
- "Speed: 4.0ms preprocess, 206.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 504.0, 199.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 204.1ms\n",
- "Speed: 3.0ms preprocess, 204.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 499.0, 201.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 200.1ms\n",
- "Speed: 3.0ms preprocess, 200.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 500.0, 201.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 212.2ms\n",
- "Speed: 4.0ms preprocess, 212.2ms inference, 2.5ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 501.5, 199.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 4 Faces, 210.7ms\n",
- "Speed: 4.0ms preprocess, 210.7ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 503.5, 200.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 4 Faces, 265.7ms\n",
- "Speed: 4.0ms preprocess, 265.7ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 505.0, 200.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 4 Faces, 207.2ms\n",
- "Speed: 4.0ms preprocess, 207.2ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 500.0, 201.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 4 Faces, 212.6ms\n",
- "Speed: 3.0ms preprocess, 212.6ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 227.5, 110.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 207.6ms\n",
- "Speed: 2.0ms preprocess, 207.6ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 220.0, 84.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 1 Face, 211.1ms\n",
- "Speed: 4.0ms preprocess, 211.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 224.0, 74.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 1 Face, 215.4ms\n",
- "Speed: 4.0ms preprocess, 215.4ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 227.5, 72.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 1 Face, 207.0ms\n",
- "Speed: 3.0ms preprocess, 207.0ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 230.5, 75.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 222.0ms\n",
- "Speed: 4.1ms preprocess, 222.0ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 234.0, 77.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 206.2ms\n",
- "Speed: 4.0ms preprocess, 206.2ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 234.5, 79.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 263.1ms\n",
- "Speed: 4.0ms preprocess, 263.1ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 234.0, 81.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 1 Face, 203.2ms\n",
- "Speed: 3.6ms preprocess, 203.2ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 252.5, 103.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 1 Face, 210.0ms\n",
- "Speed: 4.1ms preprocess, 210.0ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 254.5, 111.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 1 Face, 203.6ms\n",
- "Speed: 5.0ms preprocess, 203.6ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 250.5, 116.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 1 Face, 210.6ms\n",
- "Speed: 5.0ms preprocess, 210.6ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 250.0, 118.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 224.1ms\n",
- "Speed: 4.0ms preprocess, 224.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 244.5, 122.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 1 Face, 215.2ms\n",
- "Speed: 4.0ms preprocess, 215.2ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 242.0, 118.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 207.3ms\n",
- "Speed: 4.0ms preprocess, 207.3ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 232.0, 104.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 212.1ms\n",
- "Speed: 5.0ms preprocess, 212.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 221.0, 109.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 257.7ms\n",
- "Speed: 4.1ms preprocess, 257.7ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 200.5, 113.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 219.5ms\n",
- "Speed: 4.0ms preprocess, 219.5ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 512.0, 188.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 208.0ms\n",
- "Speed: 4.1ms preprocess, 208.0ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 516.0, 183.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 233.2ms\n",
- "Speed: 4.0ms preprocess, 233.2ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 335.0, 182.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 209.0ms\n",
- "Speed: 3.1ms preprocess, 209.0ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 552.5, 180.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 222.2ms\n",
- "Speed: 4.0ms preprocess, 222.2ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 568.5, 182.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 199.1ms\n",
- "Speed: 5.0ms preprocess, 199.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 573.5, 180.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 217.1ms\n",
- "Speed: 3.0ms preprocess, 217.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 571.5, 177.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 262.7ms\n",
- "Speed: 4.0ms preprocess, 262.7ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 338.0, 187.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 203.3ms\n",
- "Speed: 3.0ms preprocess, 203.3ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 542.0, 175.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 204.3ms\n",
- "Speed: 3.0ms preprocess, 204.3ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 339.5, 187.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 218.6ms\n",
- "Speed: 3.0ms preprocess, 218.6ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 338.0, 186.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 209.1ms\n",
- "Speed: 4.0ms preprocess, 209.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 336.5, 186.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 216.1ms\n",
- "Speed: 3.0ms preprocess, 216.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 548.0, 178.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 211.0ms\n",
- "Speed: 3.1ms preprocess, 211.0ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 336.5, 182.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 203.1ms\n",
- "Speed: 3.0ms preprocess, 203.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 547.5, 178.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 266.8ms\n",
- "Speed: 3.0ms preprocess, 266.8ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 549.5, 178.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 219.2ms\n",
- "Speed: 3.0ms preprocess, 219.2ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 337.0, 175.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 214.1ms\n",
- "Speed: 3.0ms preprocess, 214.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 549.0, 178.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 215.2ms\n",
- "Speed: 5.0ms preprocess, 215.2ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 549.5, 180.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 206.6ms\n",
- "Speed: 3.0ms preprocess, 206.6ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 338.5, 169.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 203.7ms\n",
- "Speed: 3.0ms preprocess, 203.7ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 544.0, 180.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 208.2ms\n",
- "Speed: 4.0ms preprocess, 208.2ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 547.5, 179.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 224.8ms\n",
- "Speed: 4.0ms preprocess, 224.8ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 334.5, 166.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 257.1ms\n",
- "Speed: 4.0ms preprocess, 257.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 545.0, 181.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 202.6ms\n",
- "Speed: 4.0ms preprocess, 202.6ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 153.0, 63.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 204.2ms\n",
- "Speed: 3.0ms preprocess, 204.2ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 550.0, 182.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 4 Faces, 202.1ms\n",
- "Speed: 3.1ms preprocess, 202.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 547.0, 181.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 4 Faces, 215.1ms\n",
- "Speed: 4.0ms preprocess, 215.1ms inference, 2.6ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 335.5, 157.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 4 Faces, 204.1ms\n",
- "Speed: 4.0ms preprocess, 204.1ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 334.5, 154.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 217.3ms\n",
- "Speed: 3.0ms preprocess, 217.3ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 334.5, 153.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 4 Faces, 230.6ms\n",
- "Speed: 4.0ms preprocess, 230.6ms inference, 1.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 162.5, 62.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 267.7ms\n",
- "Speed: 3.0ms preprocess, 267.7ms inference, 3.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 338.0, 147.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 253.2ms\n",
- "Speed: 5.0ms preprocess, 253.2ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 150.0, 121.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 223.1ms\n",
- "Speed: 4.0ms preprocess, 223.1ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 156.5, 145.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 2 Faces, 202.2ms\n",
- "Speed: 4.0ms preprocess, 202.2ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 167.5, 146.5\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 3 Faces, 215.6ms\n",
- "Speed: 3.0ms preprocess, 215.6ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n",
- "\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 187.5, 145.0\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "0: 480x640 4 Faces, 214.9ms\n",
- "Speed: 4.1ms preprocess, 214.9ms inference, 2.0ms postprocess per image at shape (1, 3, 480, 640)\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Centro: 219.0, 145.5\n"
- ]
- }
- ],
- "source": [
- "device = torch.device(\"cpu\")\n",
- "\n",
- "# Cargamos el modelo\n",
- "model_path = '/results/weights/best.pt'\n",
- "model = YOLO(\"best2.pt\")\n",
- "model = model.to(device)\n",
- "\n",
- "# Bucasmos el BB más grande dentro de la imagen, primero probaremos con la imagen de la cámara\n",
- "cap = cv2.VideoCapture(0)\n",
- "\n",
- "while True:\n",
- " ret, frame = cap.read()\n",
- "\n",
- " # Realizamos la deteccion\n",
- " results = model(frame)\n",
- "\n",
- " # Encontramos el bounding box mas grande\n",
- " max_area = 0\n",
- " max_bb = None\n",
- " for result in results[0].boxes: # Accede a los resultados de las cajas\n",
- " x1, y1, x2, y2 = result.xyxy[0].cpu().numpy().astype(int)\n",
- " area = (x2 - x1) * (y2 - y1)\n",
- " if area > max_area:\n",
- " max_area = area\n",
- " max_bb = result\n",
- "\n",
- " if max_bb is not None:\n",
- " x1, y1, x2, y2 = max_bb.xyxy[0].cpu().numpy().astype(int)\n",
- " cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)\n",
- " label = f'{model.names[int(max_bb.cls[0].cpu().numpy())]} {max_bb.conf[0].cpu().numpy():.2f}'\n",
- " cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n",
- " print(f'Centro: {(x1 + x2) / 2}, {(y1 + y2) / 2}')\n",
- "\n",
- " # Mostramos el resultado\n",
- " cv2.imshow(\"frame\", frame)\n",
- "\n",
- " if cv2.waitKey(1) & 0xFF == ord('q'):\n",
- " cv2.destroyAllWindows()\n",
- " cap.release()\n",
- " break\n"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.11.8"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/_/best2.pt b/_/best2.pt
deleted file mode 100644
index 7052eb0..0000000
Binary files a/_/best2.pt and /dev/null differ
diff --git a/labels/face.py b/labels/face.py
index 8b3eed5..70046b3 100644
--- a/labels/face.py
+++ b/labels/face.py
@@ -135,7 +135,7 @@ def step_face(self):
int(self.pose.y() + (self.target.y() - self.pose.y()) * 0.3)
)
- def face_detectio_target(self, x, y):
+ def face_tracking_target(self, x, y):
self.target = QPoint(
int(self.width() * x * 0.6 + self.width() * 0.15),
int(self.height() * y * 0.8 + self.height() * 0.1)
diff --git a/main.py b/main.py
index 428c508..0139fb2 100644
--- a/main.py
+++ b/main.py
@@ -3,7 +3,7 @@
import sys
LLM = False
-FACE_DETECTION = False
+FACE_TRACKING = True
if __name__ == "__main__":
@@ -11,14 +11,14 @@
window = MainWindow()
- if FACE_DETECTION:
- from modules.face_detection import FaceDetection
+ if FACE_TRACKING:
+ from modules.face_tracking import FaceTracking
- facedetection = FaceDetection()
+ faceTracking = FaceTracking()
- window.signal_start_detection.connect(facedetection.start_detection)
- window.signal_stop_detection.connect(facedetection.stop_detection)
- facedetection.sender_pose.connect(window.face.face_detectio_target)
+ window.signal_start_tracking.connect(faceTracking.start_tracking)
+ window.signal_stop_tracking.connect(faceTracking.stop_tracking)
+ faceTracking.sender_pose.connect(window.face.face_tracking_target)
if LLM:
from modules.sst import SpeechToText
diff --git a/modules/face_tracking.py b/modules/face_tracking.py
new file mode 100644
index 0000000..ac55637
--- /dev/null
+++ b/modules/face_tracking.py
@@ -0,0 +1,60 @@
+import cv2
+from cvzone.FaceDetectionModule import FaceDetector
+from PyQt6.QtCore import pyqtSignal, QObject
+from threading import Thread
+
+
+class FaceTracking(QObject):
+
+ sender_pose = pyqtSignal(float, float)
+
+ def __init__(self):
+ super().__init__()
+ self.detector = FaceDetector()
+ self.state = 0
+
+ def start_tracking(self):
+ self.state = 1
+ connection_thread = Thread(target=self.track, daemon=True)
+ connection_thread.start()
+
+ def stop_tracking(self):
+ self.state = 0
+
+
+
+ def track(self):
+ cap = cv2.VideoCapture(0)
+ while self.state:
+ success, img = cap.read()
+ img, bboxs = self.detector.findFaces(img, draw=False)
+
+ if bboxs:
+ #get the coordinate
+ fx, fy = bboxs[0]["center"][0], bboxs[0]["center"][1]
+ pos = [fx, fy]
+
+
+
+ def track(self):
+ cap = cv2.VideoCapture(0)
+ while self.state:
+ ret, frame = cap.read()
+ frame = cv2.flip(frame, 1)
+ img, faces = self.detector.findFaces(frame, draw=False)
+ if faces:
+ x, y = faces[0]["center"][0], faces[0]["center"][1]
+ # Show the camera and a point in the center of the face
+ # cv2.circle(frame, (x, y), 5, (0, 255, 0), -1)
+ # cv2.imshow("Frame", frame)
+ # if cv2.waitKey(1) & 0xFF == ord('q'):
+ # break
+ self.sender_pose.emit(
+ x / 640,
+ y / 480
+ )
+
+ else:
+ self.sender_pose.emit(0.5, 0.5)
+ cap.release()
+ cv2.destroyAllWindows()
diff --git a/modules/main_window.py b/modules/main_window.py
index cef6cb1..44e7ff3 100644
--- a/modules/main_window.py
+++ b/modules/main_window.py
@@ -11,8 +11,8 @@
class MainWindow(QMainWindow):
- signal_start_detection = pyqtSignal()
- signal_stop_detection = pyqtSignal()
+ signal_start_tracking = pyqtSignal()
+ signal_stop_tracking = pyqtSignal()
signal_start_listening = pyqtSignal()
signal_ajust_noise = pyqtSignal()
@@ -37,8 +37,8 @@ def set_layout(self):
self.webview.load(QUrl("https://v2.ubicate.osuc.dev/map?place=B12"))
self.webview.hide()
- # self.layout.addWidget(self.face, 3)
- # self.layout.addWidget(self.webview, 20)
+ self.layout.addWidget(self.face, 3)
+ self.layout.addWidget(self.webview, 20)
self.h_layout.addWidget(self.face, 1)
self.h_layout.addWidget(self.webview, 1)
@@ -86,10 +86,11 @@ def keyPressEvent(self, event):
if event.key() == Qt.Key.Key_D:
self.face.setMouseTracking(False)
- self.signal_start_detection.emit()
+ self.signal_start_tracking.emit()
+
if event.key() == Qt.Key.Key_S:
self.face.setMouseTracking(True)
- self.signal_stop_detection.emit()
+ self.signal_stop_tracking.emit()
if event.key() == Qt.Key.Key_L:
self.signal_start_listening.emit()
diff --git a/requirements.txt b/requirements.txt
index e9dc665..b924b66 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -7,4 +7,6 @@ PyAudio == 0.2.14
pyttsx3 == 2.91
ollama == 0.3.1
PyQt6 == 6.7.0
-torch == 2.4.0+cu124
\ No newline at end of file
+torch == 2.4.0+cu124
+cvzone == 1.6.1
+mediapipe == 0.10.14
\ No newline at end of file