diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..53d4cb0 --- /dev/null +++ b/.gitignore @@ -0,0 +1,169 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# Data +data/ + +# Models +models/ +*.pt + +# D2-Net +d2-net/ + +# results +results/ + +# scratch +scratch/ \ No newline at end of file diff --git a/README.md b/README.md index 185b8c6..ca7becf 100644 --- a/README.md +++ b/README.md @@ -192,3 +192,5 @@ python train.py --save-path /path/to/fast-model.pt --net 'Fast_Quad_L2Net_ConfCF ``` Note that you can fully configure the training (i.e. select the data sources, change the batch size, learning rate, number of epochs etc.). One easy way to improve the model is to train for more epochs, e.g. `--epochs 50`. For more details about all parameters, run `python train.py --help`. + + diff --git a/experiment_SVDface.py b/experiment_SVDface.py new file mode 100644 index 0000000..0aa0e12 --- /dev/null +++ b/experiment_SVDface.py @@ -0,0 +1,147 @@ +import os +import time +import csv +import cv2 +import torch +import numpy as np + + +SEED = 123 +np.random.seed(SEED) + +''' +------------------------------------------- +| | +| (x1, y1) | +| ------------------------ | +| | | | +| | | | +| | ROI | | +| | | | +| | | | +| | | | +| ------------------------ | +| (x2, y2) | +| | +| | +| | +------------------------------------------- +''' + +def compute_svd(data_dir, N, tau, device): + ''' + Input: + data_dir: directory of the image + N: size of the ROI + tau: relaxation of singular values + device: CPU or GPU + ''' + ## Load Image + img= cv2.imread(data_dir, 0) + img=img.astype(float) + print(img.shape) + height, width= img.shape + ## Create a one matrix for I2 representation + SF= torch.ones(height, width) + + img_t= torch.from_numpy(img) + img_t.to(device) + ROI_number= 0 + for i in range (1, height): + for j in range(1, width): + + ROI= img_t[i:i+N, j:j+N] + # print(ROI.shape) + u, s, v_t = torch.linalg.svd(ROI, full_matrices= False) ## SVD: ROI-> [mxn]; u-> [mxm]; s-> [mxn]; v_t-> [nxn] + ## relaxation of singular values + s+= tau + + ## Normalize the largest singular values + index_max= torch.argmax(s) + s_sum= torch.sum(s) + SF[i,j]= s[index_max]/s_sum + + ROI_number+= 1 + + svd_img= SF.cpu().detach().numpy() + ## Scale to 0 -255 + svd_img_scaled = ((svd_img - svd_img.min()) * (1/(svd_img.max() - svd_img.min()) * 255)).astype('uint8') + + return svd_img_scaled + + +def main(): + start = time.time() + torch.manual_seed(0) + ## To do: Use pytorch SVD on GPU + ## SVDFace Hyperparameters + # N= 10 + # tau= 80 + + ## SVD Face paper settings + N= 3 + # tau= 5 + # tau= 10 + tau= 20 + + data_dir= './data/oxbuild_images-v1/' + + for filename in os.listdir(data_dir): + # for filename in ls_data[0]: + filename= data_dir.split('/')[-1] + imagename= filename.split('.')[0] + print(imagename) + dir_filename= data_dir+'{}' .format(filename) + print(dir_filename) + + ## Compute SVD Transform, CUDA for pytorch + use_cuda= torch.cuda.is_available() + device= torch.device("cuda:0" if use_cuda else "cpu") + svd_img_scaled= compute_svd(dir_filename, N, tau, device) + + # Writing the SVDFace image + filename1 = './results/oxbuild_images-v1_svd_n3t20/{}_SVDtau{}.jpg' .format(imagename, tau) + cv2.imwrite(filename1, svd_img_scaled) + print('Image saved: {}' .format(filename1)) + + end = time.time() + print('Time elapsed:\t',end - start) + +## Uncomment to resume from a checkpoint (csv file) + ## SVD Face paper settings + N= 3 + tau= 20 + + data_dir= './data/oxbuild_images-v1/' + todo_dir= './results/to_do.csv' + + ## Read the csv file + ls_data= [] + with open(todo_dir, newline='') as f: + reader = csv.reader(f) + ls_data = list(reader) + print(len(ls_data[0])) + + for filename in ls_data[0]: + imagename= filename.split('.')[0] + print(imagename) + dir_filename= data_dir+'{}' .format(filename) + print(dir_filename) + + ## Compute SVD Transform, CUDA for pytorch + use_cuda= torch.cuda.is_available() + device= torch.device("cuda:0" if use_cuda else "cpu") + + svd_img_scaled= compute_svd(dir_filename, N, tau, device) + + # Writing the SVDFace image + filename1 = './results/oxbuild_images-v1_svd_n3t20/{}_SVDtau{}.jpg' .format(imagename, tau) + cv2.imwrite(filename1, svd_img_scaled) + print('Image saved: {}' .format(filename1)) + + end = time.time() + print('Time elapsed:\t',end - start) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/experiment_load.py b/experiment_load.py new file mode 100644 index 0000000..b901f92 --- /dev/null +++ b/experiment_load.py @@ -0,0 +1,10 @@ +##Load npz file .r2d2 +import numpy as np + +target_file= "/home/pfvaldez/Development/r2d2/d2-net/hpatches_sequences/hpatches-sequences-release/i_ajuntament/1.ppm.r2d2" +npz= np.load(target_file, mmap_mode='r') +# npz= np.load(cache_file) +# print(npz) + +d = dict(zip(("data1{}".format(k) for k in npz), (npz[k] for k in npz))) +print(d) \ No newline at end of file diff --git a/experiment_loadcache.py b/experiment_loadcache.py new file mode 100644 index 0000000..a8664e5 --- /dev/null +++ b/experiment_loadcache.py @@ -0,0 +1,12 @@ +import numpy as np + +cache_file= "/home/pfvaldez/Development/r2d2/d2-net/hpatches_sequences/cache/superpoint.npy" +data = np.load(cache_file, allow_pickle=True) +print(data) +# f = io.open(cache_file, mode="r", encoding="utf-8") +# file_list = [] +# for line in f.readlines(): +# file_list.append(line.strip()) # strips newline character at the end of the line +# for i in sorted(file_list): +# print(i) +# f.close() diff --git a/extract.py b/extract.py index c3fea02..b7224ed 100644 --- a/extract.py +++ b/extract.py @@ -2,7 +2,7 @@ # CC BY-NC-SA 3.0 # Available only for non-commercial use - +import time import os, pdb from PIL import Image import numpy as np @@ -163,7 +163,7 @@ def extract_keypoints(args): parser.add_argument("--model", type=str, required=True, help='model path') parser.add_argument("--images", type=str, required=True, nargs='+', help='images / list') - parser.add_argument("--tag", type=str, default='r2d2', help='output file tag') + parser.add_argument("--tag", type=str, default='i_r2d2', help='output file tag') # Changed tag to `i_r2d2` to track expriments parser.add_argument("--top-k", type=int, default=5000, help='number of keypoints') @@ -179,5 +179,9 @@ def extract_keypoints(args): parser.add_argument("--gpu", type=int, nargs='+', default=[0], help='use -1 for CPU') args = parser.parse_args() + start_time= time.time() extract_keypoints(args) + end_time= time.time() + print("Finished extract, time in seconds =", end_time - start_time) + diff --git a/train.py b/train.py index 10d23d9..fc8707a 100644 --- a/train.py +++ b/train.py @@ -1,7 +1,7 @@ # Copyright 2019-present NAVER Corp. # CC BY-NC-SA 3.0 # Available only for non-commercial use - +import time import os, pdb import torch import torch.optim as optim @@ -70,6 +70,9 @@ def forward_backward(self, inputs): if __name__ == '__main__': + + torch.cuda.empty_cache() + import argparse parser = argparse.ArgumentParser("Train R2D2") @@ -86,7 +89,7 @@ def forward_backward(self, inputs): parser.add_argument("--N", type=int, default=16, help="patch size for repeatability") parser.add_argument("--epochs", type=int, default=25, help='number of training epochs') - parser.add_argument("--batch-size", "--bs", type=int, default=8, help="batch size") + parser.add_argument("--batch-size", "--bs", type=int, default=4, help="batch size") parser.add_argument("--learning-rate", "--lr", type=str, default=1e-4) parser.add_argument("--weight-decay", "--wd", type=float, default=5e-4) @@ -98,6 +101,9 @@ def forward_backward(self, inputs): iscuda = common.torch_set_gpu(args.gpu) common.mkdir_for(args.save_path) + start_time = time.time() + + ## TO DO: Insert SVD Here # Create data loader from datasets import * db = [data_sources[key] for key in args.train_data] @@ -132,6 +138,8 @@ def forward_backward(self, inputs): print(f"\n>> Starting epoch {epoch}...") train() + end_time= time.time() + print("Current time in seconds since epoch =", end_time - start_time) print(f"\n>> Saving model to {args.save_path}") torch.save({'net': args.net, 'state_dict': net.state_dict()}, args.save_path) diff --git a/write_checkpoint.py b/write_checkpoint.py new file mode 100644 index 0000000..9ebca08 --- /dev/null +++ b/write_checkpoint.py @@ -0,0 +1,29 @@ +''' +This python script writes a csv file with the list of images that are yet to be processed. +''' +import os +import re + +data_dir= './data/oxbuild_images-v1/' +ls_data= list(os.listdir(data_dir)) +print(len(ls_data)) + +# Compare the list of files in results directory +ls_incomplete= list(os.listdir('./results/oxbuild_images-v1_svd_n3t20/')) +# Define the regex pattern +pattern = re.compile(r'_SVDtau20') +# Iterate through each element in the list and remove the substring +for i in range(len(ls_incomplete)): + ls_incomplete[i] = pattern.sub('', ls_incomplete[i]) + +# Print the modified list +print(len(ls_incomplete)) + +# How to subtract two lists? +to_do= list(set(ls_data) - set(ls_incomplete)) +print(len(to_do)) +# How to convert list to csv? +import csv +with open('./results/to_do.csv', 'w') as myfile: + wr = csv.writer(myfile, quoting=csv.QUOTE_ALL) + wr.writerow(to_do)