-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathinference.py
More file actions
49 lines (39 loc) · 1.49 KB
/
inference.py
File metadata and controls
49 lines (39 loc) · 1.49 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
from tqdm import tqdm
import gc
import os
import numpy as np
import cv2
from PIL import Image
from torchvision import transforms
import torch
def get_query_embedding(model, device, query_img_file):
"""
Given a query image file path run through the model and return the embeddings
Args:
model : model instance
device : cuda or cpu
query_img_file : location of the query image file
Returns:
The resulting embeddings
"""
model.eval()
# Read image
image = Image.open(query_img_file).convert("RGB")
# Create transforms
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
transforms_test = transforms.Compose([transforms.Resize(460),
transforms.FiveCrop(448),
transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
transforms.Lambda(lambda crops: torch.stack([transforms.Normalize(mean = mean, std = std)(crop) for crop in crops])),
])
image = transforms_test(image)
# Predict
with torch.no_grad():
# Move image to device and get crops
image = image.to(device)
ncrops, c, h, w = image.size()
# Get output
output = model.get_embedding(image.view(-1, c, h, w))
output = output.view(ncrops, -1).mean(0)
return output