diff --git a/.gitignore b/.gitignore deleted file mode 100644 index a3d41ec..0000000 --- a/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.pth - -dump_match_pairs - diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 0bb117c..0000000 --- a/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2022 onandon - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/README.md b/README.md deleted file mode 100644 index bf9752c..0000000 --- a/README.md +++ /dev/null @@ -1,41 +0,0 @@ -# SuperCATs -For more information, check out the paper on [paper link](https://ieeexplore.ieee.org/document/9954872). Also check out project page here [Project Page link].
-*This paper is accepted in ICCE-Asia'22* - - - ->**Cost Aggregation with Transformers for Sparse Correspondence**

->Abstract : In this work, we introduce a novel network, namely SuperCATs, which aims to find a correspondence field between visually similar images. SuperCATs stands on the shoulder of the recently proposed matching networks, SuperGlue and CATs, taking the merits of both for constructing an integrative framework. Specifically, given keypoints and corresponding descriptors, we first apply attentional aggregation consisting of self- and cross- graph neural network to obtain feature descriptors. Subsequently, we construct a cost volume using the descriptors, which then undergoes a tranformer aggregator for cost aggregation. With this approach, we manage to replace the handcrafted module based on solving an optimal transport problem initially included in SuperGlue with a transformer well known for its global receptive fields, making our approach more robust to severe deformations. We conduct experiments to demonstrate the effectiveness of the proposed method, and show that the proposed model is on par with SuperGlue for both indoor and outdoor scenes. - - -# Network -Overview of our model is illustrated below: -![overview](fig/overview.png) -Structure of Transformer Aggregator is illustrated below: -![aggregator](fig/aggregator.png) - -# Training -To train the SuperGlue with default parameters, run the following command: -``` -python train.py -``` -Additional useful command line parameters - -* Use `--epoch` to set the number of epochs (default: `20`). -* Use `--train_path` to set the path to the directory of training images. -* Use `--eval_output_dir` to set the path to the directory in which the visualizations is written (default: `dump_match_pairs/`). -* Use `--show_keypoints` to visualize the detected keypoints (default: `False`). -* Use `--viz_extension` to set the visualization file extension (default: `png`). Use pdf for highest-quality. - -# BibTex -If you find this research useful, please consider citing: -```BibTex -@inproceedings{lee2022cost, - title={Cost Aggregation with Transformers for Sparse Correspondence}, - author={Lee, Seungjun and An, Seungjun and Hong, Sunghwan and Cho, Seokju and Nam, Jisu and Hong, Susung and Kim, Seungryong}, - booktitle={2022 IEEE International Conference on Consumer Electronics-Asia (ICCE-Asia)}, - pages={1--4}, - year={2022}, - organization={IEEE} -} -``` diff --git a/css/app.css b/css/app.css new file mode 100644 index 0000000..851c71c --- /dev/null +++ b/css/app.css @@ -0,0 +1,159 @@ +/* latin-ext */ +@font-face { + font-family: 'Lato'; + font-style: italic; + font-weight: 400; + src: local('Lato Italic'), local('Lato-Italic'), url(https://fonts.gstatic.com/s/lato/v15/S6u8w4BMUTPHjxsAUi-qNiXg7eU0.woff2) format('woff2'); + unicode-range: U+0100-024F, U+0259, U+1E00-1EFF, U+2020, U+20A0-20AB, U+20AD-20CF, U+2113, U+2C60-2C7F, U+A720-A7FF; + } + /* latin */ + @font-face { + font-family: 'Lato'; + font-style: italic; + font-weight: 400; + src: local('Lato Italic'), local('Lato-Italic'), url(https://fonts.gstatic.com/s/lato/v15/S6u8w4BMUTPHjxsAXC-qNiXg7Q.woff2) format('woff2'); + unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02BB-02BC, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2122, U+2191, U+2193, U+2212, U+2215, U+FEFF, U+FFFD; + } + /* latin-ext */ + @font-face { + font-family: 'Lato'; + font-style: italic; + font-weight: 700; + src: local('Lato Bold Italic'), local('Lato-BoldItalic'), url(https://fonts.gstatic.com/s/lato/v15/S6u_w4BMUTPHjxsI5wq_FQftx9897sxZ.woff2) format('woff2'); + unicode-range: U+0100-024F, U+0259, U+1E00-1EFF, U+2020, U+20A0-20AB, U+20AD-20CF, U+2113, U+2C60-2C7F, U+A720-A7FF; + } + /* latin */ + @font-face { + font-family: 'Lato'; + font-style: italic; + font-weight: 700; + src: local('Lato Bold Italic'), local('Lato-BoldItalic'), url(https://fonts.gstatic.com/s/lato/v15/S6u_w4BMUTPHjxsI5wq_Gwftx9897g.woff2) format('woff2'); + unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02BB-02BC, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2122, U+2191, U+2193, U+2212, U+2215, U+FEFF, U+FFFD; + } + /* latin-ext */ + @font-face { + font-family: 'Lato'; + font-style: normal; + font-weight: 400; + src: local('Lato Regular'), local('Lato-Regular'), url(https://fonts.gstatic.com/s/lato/v15/S6uyw4BMUTPHjxAwXiWtFCfQ7A.woff2) format('woff2'); + unicode-range: U+0100-024F, U+0259, U+1E00-1EFF, U+2020, U+20A0-20AB, U+20AD-20CF, U+2113, U+2C60-2C7F, U+A720-A7FF; + } + /* latin */ + @font-face { + font-family: 'Lato'; + font-style: normal; + font-weight: 400; + src: local('Lato Regular'), local('Lato-Regular'), url(https://fonts.gstatic.com/s/lato/v15/S6uyw4BMUTPHjx4wXiWtFCc.woff2) format('woff2'); + unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02BB-02BC, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2122, U+2191, U+2193, U+2212, U+2215, U+FEFF, U+FFFD; + } + /* latin-ext */ + @font-face { + font-family: 'Lato'; + font-style: normal; + font-weight: 700; + src: local('Lato Bold'), local('Lato-Bold'), url(https://fonts.gstatic.com/s/lato/v15/S6u9w4BMUTPHh6UVSwaPGQ3q5d0N7w.woff2) format('woff2'); + unicode-range: U+0100-024F, U+0259, U+1E00-1EFF, U+2020, U+20A0-20AB, U+20AD-20CF, U+2113, U+2C60-2C7F, U+A720-A7FF; + } + /* latin */ + @font-face { + font-family: 'Lato'; + font-style: normal; + font-weight: 700; + src: local('Lato Bold'), local('Lato-Bold'), url(https://fonts.gstatic.com/s/lato/v15/S6u9w4BMUTPHh6UVSwiPGQ3q5d0.woff2) format('woff2'); + unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02BB-02BC, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2122, U+2191, U+2193, U+2212, U+2215, U+FEFF, U+FFFD; + } + + a { + color: #1772d0; + text-decoration: none; + } + + a:focus, + a:hover { + color: #f09228; + text-decoration: none; + } + + body, + td, + th, + tr, + p, + a { + font-family: 'Lato', Verdana, Helvetica, sans-serif; + font-size: 14px + } + + strong { + font-family: 'Lato', Verdana, Helvetica, sans-serif; + font-size: 14px; + } + + heading { + font-family: 'Lato', Verdana, Helvetica, sans-serif; + font-size: 22px; + } + + papertitle { + font-family: 'Lato', Verdana, Helvetica, sans-serif; + font-size: 14px; + font-weight: 700 + } + + name { + font-family: 'Lato', Verdana, Helvetica, sans-serif; + font-size: 32px; + } + + .one { + width: 160px; + height: 160px; + position: relative; + } + + .two { + width: 160px; + height: 160px; + position: absolute; + transition: opacity .2s ease-in-out; + -moz-transition: opacity .2s ease-in-out; + -webkit-transition: opacity .2s ease-in-out; + } + + .fade { + transition: opacity .2s ease-in-out; + -moz-transition: opacity .2s ease-in-out; + -webkit-transition: opacity .2s ease-in-out; + } + + span.highlight { + background-color: #ffffd0; + } + + .CodeMirror { + font-size: .8em; + height: auto; + } + + .CodeMirror-scroll { + overflow-y: hidden; + overflow-x: auto; + } + + #header_img { + margin-top: 2em; + margin-bottom: 1em; + } + + .list-inline { + list-style: none; + margin-left: -0.5em; + margin-right: -0.5em; + padding-left: 0; + } + + .list-inline > li { + display: inline-block; + margin-left: 0.5em; + margin-right: 0.5em; + } \ No newline at end of file diff --git a/css/bootstrap.min.css b/css/bootstrap.min.css new file mode 100644 index 0000000..2df367c --- /dev/null +++ b/css/bootstrap.min.css @@ -0,0 +1,11 @@ +@import url("https://fonts.googleapis.com/css?family=Source+Sans+Pro:300,400,700");/*! + * bootswatch v3.3.5 + * Homepage: http://bootswatch.com + * Copyright 2012-2015 Thomas Park + * Licensed under MIT + * Based on Bootstrap +*//*! + * Bootstrap v3.3.5 (http://getbootstrap.com) + * Copyright 2011-2015 Twitter, Inc. + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) + *//*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */html{font-family:sans-serif;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,menu,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background-color:transparent}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:bold}dfn{font-style:italic}h1{font-size:2em;margin:0.67em 0}mark{background:#ff0;color:#000}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-0.5em}sub{bottom:-0.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:1em 40px}hr{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;height:0}pre{overflow:auto}code,kbd,pre,samp{font-family:monospace, monospace;font-size:1em}button,input,optgroup,select,textarea{color:inherit;font:inherit;margin:0}button{overflow:visible}button,select{text-transform:none}button,html input[type="button"],input[type="reset"],input[type="submit"]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}input{line-height:normal}input[type="checkbox"],input[type="radio"]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:0}input[type="number"]::-webkit-inner-spin-button,input[type="number"]::-webkit-outer-spin-button{height:auto}input[type="search"]{-webkit-appearance:textfield;-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box}input[type="search"]::-webkit-search-cancel-button,input[type="search"]::-webkit-search-decoration{-webkit-appearance:none}fieldset{border:1px solid #c0c0c0;margin:0 2px;padding:0.35em 0.625em 0.75em}legend{border:0;padding:0}textarea{overflow:auto}optgroup{font-weight:bold}table{border-collapse:collapse;border-spacing:0}td,th{padding:0}/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */@media print{*,*:before,*:after{background:transparent !important;color:#000 !important;-webkit-box-shadow:none !important;box-shadow:none !important;text-shadow:none !important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}a[href^="#"]:after,a[href^="javascript:"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}p,h2,h3{orphans:3;widows:3}h2,h3{page-break-after:avoid}.navbar{display:none}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000 !important}.label{border:1px solid #000}.table{border-collapse:collapse !important}.table td,.table th{background-color:#fff !important}.table-bordered th,.table-bordered td{border:1px solid #ddd !important}}@font-face{font-family:'Glyphicons Halflings';src:url('../fonts/glyphicons-halflings-regular.eot');src:url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'),url('../fonts/glyphicons-halflings-regular.woff2') format('woff2'),url('../fonts/glyphicons-halflings-regular.woff') format('woff'),url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'),url('../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg')}.glyphicon{position:relative;top:1px;display:inline-block;font-family:'Glyphicons Halflings';font-style:normal;font-weight:normal;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.glyphicon-asterisk:before{content:"\2a"}.glyphicon-plus:before{content:"\2b"}.glyphicon-euro:before,.glyphicon-eur:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:before{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\270f"}.glyphicon-glass:before{content:"\e001"}.glyphicon-music:before{content:"\e002"}.glyphicon-search:before{content:"\e003"}.glyphicon-heart:before{content:"\e005"}.glyphicon-star:before{content:"\e006"}.glyphicon-star-empty:before{content:"\e007"}.glyphicon-user:before{content:"\e008"}.glyphicon-film:before{content:"\e009"}.glyphicon-th-large:before{content:"\e010"}.glyphicon-th:before{content:"\e011"}.glyphicon-th-list:before{content:"\e012"}.glyphicon-ok:before{content:"\e013"}.glyphicon-remove:before{content:"\e014"}.glyphicon-zoom-in:before{content:"\e015"}.glyphicon-zoom-out:before{content:"\e016"}.glyphicon-off:before{content:"\e017"}.glyphicon-signal:before{content:"\e018"}.glyphicon-cog:before{content:"\e019"}.glyphicon-trash:before{content:"\e020"}.glyphicon-home:before{content:"\e021"}.glyphicon-file:before{content:"\e022"}.glyphicon-time:before{content:"\e023"}.glyphicon-road:before{content:"\e024"}.glyphicon-download-alt:before{content:"\e025"}.glyphicon-download:before{content:"\e026"}.glyphicon-upload:before{content:"\e027"}.glyphicon-inbox:before{content:"\e028"}.glyphicon-play-circle:before{content:"\e029"}.glyphicon-repeat:before{content:"\e030"}.glyphicon-refresh:before{content:"\e031"}.glyphicon-list-alt:before{content:"\e032"}.glyphicon-lock:before{content:"\e033"}.glyphicon-flag:before{content:"\e034"}.glyphicon-headphones:before{content:"\e035"}.glyphicon-volume-off:before{content:"\e036"}.glyphicon-volume-down:before{content:"\e037"}.glyphicon-volume-up:before{content:"\e038"}.glyphicon-qrcode:before{content:"\e039"}.glyphicon-barcode:before{content:"\e040"}.glyphicon-tag:before{content:"\e041"}.glyphicon-tags:before{content:"\e042"}.glyphicon-book:before{content:"\e043"}.glyphicon-bookmark:before{content:"\e044"}.glyphicon-print:before{content:"\e045"}.glyphicon-camera:before{content:"\e046"}.glyphicon-font:before{content:"\e047"}.glyphicon-bold:before{content:"\e048"}.glyphicon-italic:before{content:"\e049"}.glyphicon-text-height:before{content:"\e050"}.glyphicon-text-width:before{content:"\e051"}.glyphicon-align-left:before{content:"\e052"}.glyphicon-align-center:before{content:"\e053"}.glyphicon-align-right:before{content:"\e054"}.glyphicon-align-justify:before{content:"\e055"}.glyphicon-list:before{content:"\e056"}.glyphicon-indent-left:before{content:"\e057"}.glyphicon-indent-right:before{content:"\e058"}.glyphicon-facetime-video:before{content:"\e059"}.glyphicon-picture:before{content:"\e060"}.glyphicon-map-marker:before{content:"\e062"}.glyphicon-adjust:before{content:"\e063"}.glyphicon-tint:before{content:"\e064"}.glyphicon-edit:before{content:"\e065"}.glyphicon-share:before{content:"\e066"}.glyphicon-check:before{content:"\e067"}.glyphicon-move:before{content:"\e068"}.glyphicon-step-backward:before{content:"\e069"}.glyphicon-fast-backward:before{content:"\e070"}.glyphicon-backward:before{content:"\e071"}.glyphicon-play:before{content:"\e072"}.glyphicon-pause:before{content:"\e073"}.glyphicon-stop:before{content:"\e074"}.glyphicon-forward:before{content:"\e075"}.glyphicon-fast-forward:before{content:"\e076"}.glyphicon-step-forward:before{content:"\e077"}.glyphicon-eject:before{content:"\e078"}.glyphicon-chevron-left:before{content:"\e079"}.glyphicon-chevron-right:before{content:"\e080"}.glyphicon-plus-sign:before{content:"\e081"}.glyphicon-minus-sign:before{content:"\e082"}.glyphicon-remove-sign:before{content:"\e083"}.glyphicon-ok-sign:before{content:"\e084"}.glyphicon-question-sign:before{content:"\e085"}.glyphicon-info-sign:before{content:"\e086"}.glyphicon-screenshot:before{content:"\e087"}.glyphicon-remove-circle:before{content:"\e088"}.glyphicon-ok-circle:before{content:"\e089"}.glyphicon-ban-circle:before{content:"\e090"}.glyphicon-arrow-left:before{content:"\e091"}.glyphicon-arrow-right:before{content:"\e092"}.glyphicon-arrow-up:before{content:"\e093"}.glyphicon-arrow-down:before{content:"\e094"}.glyphicon-share-alt:before{content:"\e095"}.glyphicon-resize-full:before{content:"\e096"}.glyphicon-resize-small:before{content:"\e097"}.glyphicon-exclamation-sign:before{content:"\e101"}.glyphicon-gift:before{content:"\e102"}.glyphicon-leaf:before{content:"\e103"}.glyphicon-fire:before{content:"\e104"}.glyphicon-eye-open:before{content:"\e105"}.glyphicon-eye-close:before{content:"\e106"}.glyphicon-warning-sign:before{content:"\e107"}.glyphicon-plane:before{content:"\e108"}.glyphicon-calendar:before{content:"\e109"}.glyphicon-random:before{content:"\e110"}.glyphicon-comment:before{content:"\e111"}.glyphicon-magnet:before{content:"\e112"}.glyphicon-chevron-up:before{content:"\e113"}.glyphicon-chevron-down:before{content:"\e114"}.glyphicon-retweet:before{content:"\e115"}.glyphicon-shopping-cart:before{content:"\e116"}.glyphicon-folder-close:before{content:"\e117"}.glyphicon-folder-open:before{content:"\e118"}.glyphicon-resize-vertical:before{content:"\e119"}.glyphicon-resize-horizontal:before{content:"\e120"}.glyphicon-hdd:before{content:"\e121"}.glyphicon-bullhorn:before{content:"\e122"}.glyphicon-bell:before{content:"\e123"}.glyphicon-certificate:before{content:"\e124"}.glyphicon-thumbs-up:before{content:"\e125"}.glyphicon-thumbs-down:before{content:"\e126"}.glyphicon-hand-right:before{content:"\e127"}.glyphicon-hand-left:before{content:"\e128"}.glyphicon-hand-up:before{content:"\e129"}.glyphicon-hand-down:before{content:"\e130"}.glyphicon-circle-arrow-right:before{content:"\e131"}.glyphicon-circle-arrow-left:before{content:"\e132"}.glyphicon-circle-arrow-up:before{content:"\e133"}.glyphicon-circle-arrow-down:before{content:"\e134"}.glyphicon-globe:before{content:"\e135"}.glyphicon-wrench:before{content:"\e136"}.glyphicon-tasks:before{content:"\e137"}.glyphicon-filter:before{content:"\e138"}.glyphicon-briefcase:before{content:"\e139"}.glyphicon-fullscreen:before{content:"\e140"}.glyphicon-dashboard:before{content:"\e141"}.glyphicon-paperclip:before{content:"\e142"}.glyphicon-heart-empty:before{content:"\e143"}.glyphicon-link:before{content:"\e144"}.glyphicon-phone:before{content:"\e145"}.glyphicon-pushpin:before{content:"\e146"}.glyphicon-usd:before{content:"\e148"}.glyphicon-gbp:before{content:"\e149"}.glyphicon-sort:before{content:"\e150"}.glyphicon-sort-by-alphabet:before{content:"\e151"}.glyphicon-sort-by-alphabet-alt:before{content:"\e152"}.glyphicon-sort-by-order:before{content:"\e153"}.glyphicon-sort-by-order-alt:before{content:"\e154"}.glyphicon-sort-by-attributes:before{content:"\e155"}.glyphicon-sort-by-attributes-alt:before{content:"\e156"}.glyphicon-unchecked:before{content:"\e157"}.glyphicon-expand:before{content:"\e158"}.glyphicon-collapse-down:before{content:"\e159"}.glyphicon-collapse-up:before{content:"\e160"}.glyphicon-log-in:before{content:"\e161"}.glyphicon-flash:before{content:"\e162"}.glyphicon-log-out:before{content:"\e163"}.glyphicon-new-window:before{content:"\e164"}.glyphicon-record:before{content:"\e165"}.glyphicon-save:before{content:"\e166"}.glyphicon-open:before{content:"\e167"}.glyphicon-saved:before{content:"\e168"}.glyphicon-import:before{content:"\e169"}.glyphicon-export:before{content:"\e170"}.glyphicon-send:before{content:"\e171"}.glyphicon-floppy-disk:before{content:"\e172"}.glyphicon-floppy-saved:before{content:"\e173"}.glyphicon-floppy-remove:before{content:"\e174"}.glyphicon-floppy-save:before{content:"\e175"}.glyphicon-floppy-open:before{content:"\e176"}.glyphicon-credit-card:before{content:"\e177"}.glyphicon-transfer:before{content:"\e178"}.glyphicon-cutlery:before{content:"\e179"}.glyphicon-header:before{content:"\e180"}.glyphicon-compressed:before{content:"\e181"}.glyphicon-earphone:before{content:"\e182"}.glyphicon-phone-alt:before{content:"\e183"}.glyphicon-tower:before{content:"\e184"}.glyphicon-stats:before{content:"\e185"}.glyphicon-sd-video:before{content:"\e186"}.glyphicon-hd-video:before{content:"\e187"}.glyphicon-subtitles:before{content:"\e188"}.glyphicon-sound-stereo:before{content:"\e189"}.glyphicon-sound-dolby:before{content:"\e190"}.glyphicon-sound-5-1:before{content:"\e191"}.glyphicon-sound-6-1:before{content:"\e192"}.glyphicon-sound-7-1:before{content:"\e193"}.glyphicon-copyright-mark:before{content:"\e194"}.glyphicon-registration-mark:before{content:"\e195"}.glyphicon-cloud-download:before{content:"\e197"}.glyphicon-cloud-upload:before{content:"\e198"}.glyphicon-tree-conifer:before{content:"\e199"}.glyphicon-tree-deciduous:before{content:"\e200"}.glyphicon-cd:before{content:"\e201"}.glyphicon-save-file:before{content:"\e202"}.glyphicon-open-file:before{content:"\e203"}.glyphicon-level-up:before{content:"\e204"}.glyphicon-copy:before{content:"\e205"}.glyphicon-paste:before{content:"\e206"}.glyphicon-alert:before{content:"\e209"}.glyphicon-equalizer:before{content:"\e210"}.glyphicon-king:before{content:"\e211"}.glyphicon-queen:before{content:"\e212"}.glyphicon-pawn:before{content:"\e213"}.glyphicon-bishop:before{content:"\e214"}.glyphicon-knight:before{content:"\e215"}.glyphicon-baby-formula:before{content:"\e216"}.glyphicon-tent:before{content:"\26fa"}.glyphicon-blackboard:before{content:"\e218"}.glyphicon-bed:before{content:"\e219"}.glyphicon-apple:before{content:"\f8ff"}.glyphicon-erase:before{content:"\e221"}.glyphicon-hourglass:before{content:"\231b"}.glyphicon-lamp:before{content:"\e223"}.glyphicon-duplicate:before{content:"\e224"}.glyphicon-piggy-bank:before{content:"\e225"}.glyphicon-scissors:before{content:"\e226"}.glyphicon-bitcoin:before{content:"\e227"}.glyphicon-btc:before{content:"\e227"}.glyphicon-xbt:before{content:"\e227"}.glyphicon-yen:before{content:"\00a5"}.glyphicon-jpy:before{content:"\00a5"}.glyphicon-ruble:before{content:"\20bd"}.glyphicon-rub:before{content:"\20bd"}.glyphicon-scale:before{content:"\e230"}.glyphicon-ice-lolly:before{content:"\e231"}.glyphicon-ice-lolly-tasted:before{content:"\e232"}.glyphicon-education:before{content:"\e233"}.glyphicon-option-horizontal:before{content:"\e234"}.glyphicon-option-vertical:before{content:"\e235"}.glyphicon-menu-hamburger:before{content:"\e236"}.glyphicon-modal-window:before{content:"\e237"}.glyphicon-oil:before{content:"\e238"}.glyphicon-grain:before{content:"\e239"}.glyphicon-sunglasses:before{content:"\e240"}.glyphicon-text-size:before{content:"\e241"}.glyphicon-text-color:before{content:"\e242"}.glyphicon-text-background:before{content:"\e243"}.glyphicon-object-align-top:before{content:"\e244"}.glyphicon-object-align-bottom:before{content:"\e245"}.glyphicon-object-align-horizontal:before{content:"\e246"}.glyphicon-object-align-left:before{content:"\e247"}.glyphicon-object-align-vertical:before{content:"\e248"}.glyphicon-object-align-right:before{content:"\e249"}.glyphicon-triangle-right:before{content:"\e250"}.glyphicon-triangle-left:before{content:"\e251"}.glyphicon-triangle-bottom:before{content:"\e252"}.glyphicon-triangle-top:before{content:"\e253"}.glyphicon-console:before{content:"\e254"}.glyphicon-superscript:before{content:"\e255"}.glyphicon-subscript:before{content:"\e256"}.glyphicon-menu-left:before{content:"\e257"}.glyphicon-menu-right:before{content:"\e258"}.glyphicon-menu-down:before{content:"\e259"}.glyphicon-menu-up:before{content:"\e260"}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}*:before,*:after{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:10px;-webkit-tap-highlight-color:rgba(0,0,0,0)}body{font-family:"Source Sans Pro",Calibri,Candara,Arial,sans-serif;font-size:15px;line-height:1.42857143;color:#333333;background-color:#ffffff}input,button,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}a{color:#2780e3;text-decoration:none}a:hover,a:focus{color:#165ba8;text-decoration:underline}a:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}figure{margin:0}img{vertical-align:middle}.img-responsive,.thumbnail>img,.thumbnail a>img,.carousel-inner>.item>img,.carousel-inner>.item>a>img{display:block;max-width:100%;height:auto}.img-rounded{border-radius:0}.img-thumbnail{padding:4px;line-height:1.42857143;background-color:#ffffff;border:1px solid #dddddd;border-radius:0;-webkit-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out;display:inline-block;max-width:100%;height:auto}.img-circle{border-radius:50%}hr{margin-top:21px;margin-bottom:21px;border:0;border-top:1px solid #e6e6e6}.sr-only{position:absolute;width:1px;height:1px;margin:-1px;padding:0;overflow:hidden;clip:rect(0, 0, 0, 0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}[role="button"]{cursor:pointer}h1,h2,h3,h4,h5,h6,.h1,.h2,.h3,.h4,.h5,.h6{font-family:"Source Sans Pro",Calibri,Candara,Arial,sans-serif;font-weight:300;line-height:1.1;color:inherit}h1 small,h2 small,h3 small,h4 small,h5 small,h6 small,.h1 small,.h2 small,.h3 small,.h4 small,.h5 small,.h6 small,h1 .small,h2 .small,h3 .small,h4 .small,h5 .small,h6 .small,.h1 .small,.h2 .small,.h3 .small,.h4 .small,.h5 .small,.h6 .small{font-weight:normal;line-height:1;color:#999999}h1,.h1,h2,.h2,h3,.h3{margin-top:21px;margin-bottom:10.5px}h1 small,.h1 small,h2 small,.h2 small,h3 small,.h3 small,h1 .small,.h1 .small,h2 .small,.h2 .small,h3 .small,.h3 .small{font-size:65%}h4,.h4,h5,.h5,h6,.h6{margin-top:10.5px;margin-bottom:10.5px}h4 small,.h4 small,h5 small,.h5 small,h6 small,.h6 small,h4 .small,.h4 .small,h5 .small,.h5 .small,h6 .small,.h6 .small{font-size:75%}h1,.h1{font-size:39px}h2,.h2{font-size:32px}h3,.h3{font-size:26px}h4,.h4{font-size:19px}h5,.h5{font-size:15px}h6,.h6{font-size:13px}p{margin:0 0 10.5px}.lead{margin-bottom:21px;font-size:17px;font-weight:300;line-height:1.4}@media (min-width:768px){.lead{font-size:22.5px}}small,.small{font-size:86%}mark,.mark{background-color:#ff7518;padding:.2em}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}.text-justify{text-align:justify}.text-nowrap{white-space:nowrap}.text-lowercase{text-transform:lowercase}.text-uppercase{text-transform:uppercase}.text-capitalize{text-transform:capitalize}.text-muted{color:#999999}.text-primary{color:#2780e3}a.text-primary:hover,a.text-primary:focus{color:#1967be}.text-success{color:#ffffff}a.text-success:hover,a.text-success:focus{color:#e6e6e6}.text-info{color:#ffffff}a.text-info:hover,a.text-info:focus{color:#e6e6e6}.text-warning{color:#ffffff}a.text-warning:hover,a.text-warning:focus{color:#e6e6e6}.text-danger{color:#ffffff}a.text-danger:hover,a.text-danger:focus{color:#e6e6e6}.bg-primary{color:#fff;background-color:#2780e3}a.bg-primary:hover,a.bg-primary:focus{background-color:#1967be}.bg-success{background-color:#3fb618}a.bg-success:hover,a.bg-success:focus{background-color:#2f8912}.bg-info{background-color:#9954bb}a.bg-info:hover,a.bg-info:focus{background-color:#7e3f9d}.bg-warning{background-color:#ff7518}a.bg-warning:hover,a.bg-warning:focus{background-color:#e45c00}.bg-danger{background-color:#ff0039}a.bg-danger:hover,a.bg-danger:focus{background-color:#cc002e}.page-header{padding-bottom:9.5px;margin:42px 0 21px;border-bottom:1px solid #e6e6e6}ul,ol{margin-top:0;margin-bottom:10.5px}ul ul,ol ul,ul ol,ol ol{margin-bottom:0}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;list-style:none;margin-left:-5px}.list-inline>li{display:inline-block;padding-left:5px;padding-right:5px}dl{margin-top:0;margin-bottom:21px}dt,dd{line-height:1.42857143}dt{font-weight:bold}dd{margin-left:0}@media (min-width:768px){.dl-horizontal dt{float:left;width:160px;clear:left;text-align:right;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}}abbr[title],abbr[data-original-title]{cursor:help;border-bottom:1px dotted #999999}.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:10.5px 21px;margin:0 0 21px;font-size:18.75px;border-left:5px solid #e6e6e6}blockquote p:last-child,blockquote ul:last-child,blockquote ol:last-child{margin-bottom:0}blockquote footer,blockquote small,blockquote .small{display:block;font-size:80%;line-height:1.42857143;color:#999999}blockquote footer:before,blockquote small:before,blockquote .small:before{content:'\2014 \00A0'}.blockquote-reverse,blockquote.pull-right{padding-right:15px;padding-left:0;border-right:5px solid #e6e6e6;border-left:0;text-align:right}.blockquote-reverse footer:before,blockquote.pull-right footer:before,.blockquote-reverse small:before,blockquote.pull-right small:before,.blockquote-reverse .small:before,blockquote.pull-right .small:before{content:''}.blockquote-reverse footer:after,blockquote.pull-right footer:after,.blockquote-reverse small:after,blockquote.pull-right small:after,.blockquote-reverse .small:after,blockquote.pull-right .small:after{content:'\00A0 \2014'}address{margin-bottom:21px;font-style:normal;line-height:1.42857143}code,kbd,pre,samp{font-family:Menlo,Monaco,Consolas,"Courier New",monospace}code{padding:2px 4px;font-size:90%;color:#c7254e;background-color:#f9f2f4;border-radius:0}kbd{padding:2px 4px;font-size:90%;color:#ffffff;background-color:#333333;border-radius:0;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.25);box-shadow:inset 0 -1px 0 rgba(0,0,0,0.25)}kbd kbd{padding:0;font-size:100%;font-weight:bold;-webkit-box-shadow:none;box-shadow:none}pre{display:block;padding:10px;margin:0 0 10.5px;font-size:14px;line-height:1.42857143;word-break:break-all;word-wrap:break-word;color:#333333;background-color:#f5f5f5;border:1px solid #cccccc;border-radius:0}pre code{padding:0;font-size:inherit;color:inherit;white-space:pre-wrap;background-color:transparent;border-radius:0}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{margin-right:auto;margin-left:auto;padding-left:15px;padding-right:15px}@media (min-width:768px){.container{width:750px}}@media (min-width:992px){.container{width:970px}}@media (min-width:1200px){.container{width:1170px}}.container-fluid{margin-right:auto;margin-left:auto;padding-left:15px;padding-right:15px}.row{margin-left:-15px;margin-right:-15px}.col-xs-1,.col-sm-1,.col-md-1,.col-lg-1,.col-xs-2,.col-sm-2,.col-md-2,.col-lg-2,.col-xs-3,.col-sm-3,.col-md-3,.col-lg-3,.col-xs-4,.col-sm-4,.col-md-4,.col-lg-4,.col-xs-5,.col-sm-5,.col-md-5,.col-lg-5,.col-xs-6,.col-sm-6,.col-md-6,.col-lg-6,.col-xs-7,.col-sm-7,.col-md-7,.col-lg-7,.col-xs-8,.col-sm-8,.col-md-8,.col-lg-8,.col-xs-9,.col-sm-9,.col-md-9,.col-lg-9,.col-xs-10,.col-sm-10,.col-md-10,.col-lg-10,.col-xs-11,.col-sm-11,.col-md-11,.col-lg-11,.col-xs-12,.col-sm-12,.col-md-12,.col-lg-12{position:relative;min-height:1px;padding-left:15px;padding-right:15px}.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11,.col-xs-12{float:left}.col-xs-12{width:100%}.col-xs-11{width:91.66666667%}.col-xs-10{width:83.33333333%}.col-xs-9{width:75%}.col-xs-8{width:66.66666667%}.col-xs-7{width:58.33333333%}.col-xs-6{width:50%}.col-xs-5{width:41.66666667%}.col-xs-4{width:33.33333333%}.col-xs-3{width:25%}.col-xs-2{width:16.66666667%}.col-xs-1{width:8.33333333%}.col-xs-pull-12{right:100%}.col-xs-pull-11{right:91.66666667%}.col-xs-pull-10{right:83.33333333%}.col-xs-pull-9{right:75%}.col-xs-pull-8{right:66.66666667%}.col-xs-pull-7{right:58.33333333%}.col-xs-pull-6{right:50%}.col-xs-pull-5{right:41.66666667%}.col-xs-pull-4{right:33.33333333%}.col-xs-pull-3{right:25%}.col-xs-pull-2{right:16.66666667%}.col-xs-pull-1{right:8.33333333%}.col-xs-pull-0{right:auto}.col-xs-push-12{left:100%}.col-xs-push-11{left:91.66666667%}.col-xs-push-10{left:83.33333333%}.col-xs-push-9{left:75%}.col-xs-push-8{left:66.66666667%}.col-xs-push-7{left:58.33333333%}.col-xs-push-6{left:50%}.col-xs-push-5{left:41.66666667%}.col-xs-push-4{left:33.33333333%}.col-xs-push-3{left:25%}.col-xs-push-2{left:16.66666667%}.col-xs-push-1{left:8.33333333%}.col-xs-push-0{left:auto}.col-xs-offset-12{margin-left:100%}.col-xs-offset-11{margin-left:91.66666667%}.col-xs-offset-10{margin-left:83.33333333%}.col-xs-offset-9{margin-left:75%}.col-xs-offset-8{margin-left:66.66666667%}.col-xs-offset-7{margin-left:58.33333333%}.col-xs-offset-6{margin-left:50%}.col-xs-offset-5{margin-left:41.66666667%}.col-xs-offset-4{margin-left:33.33333333%}.col-xs-offset-3{margin-left:25%}.col-xs-offset-2{margin-left:16.66666667%}.col-xs-offset-1{margin-left:8.33333333%}.col-xs-offset-0{margin-left:0%}@media (min-width:768px){.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12{float:left}.col-sm-12{width:100%}.col-sm-11{width:91.66666667%}.col-sm-10{width:83.33333333%}.col-sm-9{width:75%}.col-sm-8{width:66.66666667%}.col-sm-7{width:58.33333333%}.col-sm-6{width:50%}.col-sm-5{width:41.66666667%}.col-sm-4{width:33.33333333%}.col-sm-3{width:25%}.col-sm-2{width:16.66666667%}.col-sm-1{width:8.33333333%}.col-sm-pull-12{right:100%}.col-sm-pull-11{right:91.66666667%}.col-sm-pull-10{right:83.33333333%}.col-sm-pull-9{right:75%}.col-sm-pull-8{right:66.66666667%}.col-sm-pull-7{right:58.33333333%}.col-sm-pull-6{right:50%}.col-sm-pull-5{right:41.66666667%}.col-sm-pull-4{right:33.33333333%}.col-sm-pull-3{right:25%}.col-sm-pull-2{right:16.66666667%}.col-sm-pull-1{right:8.33333333%}.col-sm-pull-0{right:auto}.col-sm-push-12{left:100%}.col-sm-push-11{left:91.66666667%}.col-sm-push-10{left:83.33333333%}.col-sm-push-9{left:75%}.col-sm-push-8{left:66.66666667%}.col-sm-push-7{left:58.33333333%}.col-sm-push-6{left:50%}.col-sm-push-5{left:41.66666667%}.col-sm-push-4{left:33.33333333%}.col-sm-push-3{left:25%}.col-sm-push-2{left:16.66666667%}.col-sm-push-1{left:8.33333333%}.col-sm-push-0{left:auto}.col-sm-offset-12{margin-left:100%}.col-sm-offset-11{margin-left:91.66666667%}.col-sm-offset-10{margin-left:83.33333333%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-8{margin-left:66.66666667%}.col-sm-offset-7{margin-left:58.33333333%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-5{margin-left:41.66666667%}.col-sm-offset-4{margin-left:33.33333333%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-2{margin-left:16.66666667%}.col-sm-offset-1{margin-left:8.33333333%}.col-sm-offset-0{margin-left:0%}}@media (min-width:992px){.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,.col-md-12{float:left}.col-md-12{width:100%}.col-md-11{width:91.66666667%}.col-md-10{width:83.33333333%}.col-md-9{width:75%}.col-md-8{width:66.66666667%}.col-md-7{width:58.33333333%}.col-md-6{width:50%}.col-md-5{width:41.66666667%}.col-md-4{width:33.33333333%}.col-md-3{width:25%}.col-md-2{width:16.66666667%}.col-md-1{width:8.33333333%}.col-md-pull-12{right:100%}.col-md-pull-11{right:91.66666667%}.col-md-pull-10{right:83.33333333%}.col-md-pull-9{right:75%}.col-md-pull-8{right:66.66666667%}.col-md-pull-7{right:58.33333333%}.col-md-pull-6{right:50%}.col-md-pull-5{right:41.66666667%}.col-md-pull-4{right:33.33333333%}.col-md-pull-3{right:25%}.col-md-pull-2{right:16.66666667%}.col-md-pull-1{right:8.33333333%}.col-md-pull-0{right:auto}.col-md-push-12{left:100%}.col-md-push-11{left:91.66666667%}.col-md-push-10{left:83.33333333%}.col-md-push-9{left:75%}.col-md-push-8{left:66.66666667%}.col-md-push-7{left:58.33333333%}.col-md-push-6{left:50%}.col-md-push-5{left:41.66666667%}.col-md-push-4{left:33.33333333%}.col-md-push-3{left:25%}.col-md-push-2{left:16.66666667%}.col-md-push-1{left:8.33333333%}.col-md-push-0{left:auto}.col-md-offset-12{margin-left:100%}.col-md-offset-11{margin-left:91.66666667%}.col-md-offset-10{margin-left:83.33333333%}.col-md-offset-9{margin-left:75%}.col-md-offset-8{margin-left:66.66666667%}.col-md-offset-7{margin-left:58.33333333%}.col-md-offset-6{margin-left:50%}.col-md-offset-5{margin-left:41.66666667%}.col-md-offset-4{margin-left:33.33333333%}.col-md-offset-3{margin-left:25%}.col-md-offset-2{margin-left:16.66666667%}.col-md-offset-1{margin-left:8.33333333%}.col-md-offset-0{margin-left:0%}}@media (min-width:1200px){.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12{float:left}.col-lg-12{width:100%}.col-lg-11{width:91.66666667%}.col-lg-10{width:83.33333333%}.col-lg-9{width:75%}.col-lg-8{width:66.66666667%}.col-lg-7{width:58.33333333%}.col-lg-6{width:50%}.col-lg-5{width:41.66666667%}.col-lg-4{width:33.33333333%}.col-lg-3{width:25%}.col-lg-2{width:16.66666667%}.col-lg-1{width:8.33333333%}.col-lg-pull-12{right:100%}.col-lg-pull-11{right:91.66666667%}.col-lg-pull-10{right:83.33333333%}.col-lg-pull-9{right:75%}.col-lg-pull-8{right:66.66666667%}.col-lg-pull-7{right:58.33333333%}.col-lg-pull-6{right:50%}.col-lg-pull-5{right:41.66666667%}.col-lg-pull-4{right:33.33333333%}.col-lg-pull-3{right:25%}.col-lg-pull-2{right:16.66666667%}.col-lg-pull-1{right:8.33333333%}.col-lg-pull-0{right:auto}.col-lg-push-12{left:100%}.col-lg-push-11{left:91.66666667%}.col-lg-push-10{left:83.33333333%}.col-lg-push-9{left:75%}.col-lg-push-8{left:66.66666667%}.col-lg-push-7{left:58.33333333%}.col-lg-push-6{left:50%}.col-lg-push-5{left:41.66666667%}.col-lg-push-4{left:33.33333333%}.col-lg-push-3{left:25%}.col-lg-push-2{left:16.66666667%}.col-lg-push-1{left:8.33333333%}.col-lg-push-0{left:auto}.col-lg-offset-12{margin-left:100%}.col-lg-offset-11{margin-left:91.66666667%}.col-lg-offset-10{margin-left:83.33333333%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-8{margin-left:66.66666667%}.col-lg-offset-7{margin-left:58.33333333%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-5{margin-left:41.66666667%}.col-lg-offset-4{margin-left:33.33333333%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-2{margin-left:16.66666667%}.col-lg-offset-1{margin-left:8.33333333%}.col-lg-offset-0{margin-left:0%}}table{background-color:transparent}caption{padding-top:8px;padding-bottom:8px;color:#999999;text-align:left}th{text-align:left}.table{width:100%;max-width:100%;margin-bottom:21px}.table>thead>tr>th,.table>tbody>tr>th,.table>tfoot>tr>th,.table>thead>tr>td,.table>tbody>tr>td,.table>tfoot>tr>td{padding:8px;line-height:1.42857143;vertical-align:top;border-top:1px solid #dddddd}.table>thead>tr>th{vertical-align:bottom;border-bottom:2px solid #dddddd}.table>caption+thead>tr:first-child>th,.table>colgroup+thead>tr:first-child>th,.table>thead:first-child>tr:first-child>th,.table>caption+thead>tr:first-child>td,.table>colgroup+thead>tr:first-child>td,.table>thead:first-child>tr:first-child>td{border-top:0}.table>tbody+tbody{border-top:2px solid #dddddd}.table .table{background-color:#ffffff}.table-condensed>thead>tr>th,.table-condensed>tbody>tr>th,.table-condensed>tfoot>tr>th,.table-condensed>thead>tr>td,.table-condensed>tbody>tr>td,.table-condensed>tfoot>tr>td{padding:5px}.table-bordered{border:1px solid #dddddd}.table-bordered>thead>tr>th,.table-bordered>tbody>tr>th,.table-bordered>tfoot>tr>th,.table-bordered>thead>tr>td,.table-bordered>tbody>tr>td,.table-bordered>tfoot>tr>td{border:1px solid #dddddd}.table-bordered>thead>tr>th,.table-bordered>thead>tr>td{border-bottom-width:2px}.table-striped>tbody>tr:nth-of-type(odd){background-color:#f9f9f9}.table-hover>tbody>tr:hover{background-color:#f5f5f5}table col[class*="col-"]{position:static;float:none;display:table-column}table td[class*="col-"],table th[class*="col-"]{position:static;float:none;display:table-cell}.table>thead>tr>td.active,.table>tbody>tr>td.active,.table>tfoot>tr>td.active,.table>thead>tr>th.active,.table>tbody>tr>th.active,.table>tfoot>tr>th.active,.table>thead>tr.active>td,.table>tbody>tr.active>td,.table>tfoot>tr.active>td,.table>thead>tr.active>th,.table>tbody>tr.active>th,.table>tfoot>tr.active>th{background-color:#f5f5f5}.table-hover>tbody>tr>td.active:hover,.table-hover>tbody>tr>th.active:hover,.table-hover>tbody>tr.active:hover>td,.table-hover>tbody>tr:hover>.active,.table-hover>tbody>tr.active:hover>th{background-color:#e8e8e8}.table>thead>tr>td.success,.table>tbody>tr>td.success,.table>tfoot>tr>td.success,.table>thead>tr>th.success,.table>tbody>tr>th.success,.table>tfoot>tr>th.success,.table>thead>tr.success>td,.table>tbody>tr.success>td,.table>tfoot>tr.success>td,.table>thead>tr.success>th,.table>tbody>tr.success>th,.table>tfoot>tr.success>th{background-color:#3fb618}.table-hover>tbody>tr>td.success:hover,.table-hover>tbody>tr>th.success:hover,.table-hover>tbody>tr.success:hover>td,.table-hover>tbody>tr:hover>.success,.table-hover>tbody>tr.success:hover>th{background-color:#379f15}.table>thead>tr>td.info,.table>tbody>tr>td.info,.table>tfoot>tr>td.info,.table>thead>tr>th.info,.table>tbody>tr>th.info,.table>tfoot>tr>th.info,.table>thead>tr.info>td,.table>tbody>tr.info>td,.table>tfoot>tr.info>td,.table>thead>tr.info>th,.table>tbody>tr.info>th,.table>tfoot>tr.info>th{background-color:#9954bb}.table-hover>tbody>tr>td.info:hover,.table-hover>tbody>tr>th.info:hover,.table-hover>tbody>tr.info:hover>td,.table-hover>tbody>tr:hover>.info,.table-hover>tbody>tr.info:hover>th{background-color:#8d46b0}.table>thead>tr>td.warning,.table>tbody>tr>td.warning,.table>tfoot>tr>td.warning,.table>thead>tr>th.warning,.table>tbody>tr>th.warning,.table>tfoot>tr>th.warning,.table>thead>tr.warning>td,.table>tbody>tr.warning>td,.table>tfoot>tr.warning>td,.table>thead>tr.warning>th,.table>tbody>tr.warning>th,.table>tfoot>tr.warning>th{background-color:#ff7518}.table-hover>tbody>tr>td.warning:hover,.table-hover>tbody>tr>th.warning:hover,.table-hover>tbody>tr.warning:hover>td,.table-hover>tbody>tr:hover>.warning,.table-hover>tbody>tr.warning:hover>th{background-color:#fe6600}.table>thead>tr>td.danger,.table>tbody>tr>td.danger,.table>tfoot>tr>td.danger,.table>thead>tr>th.danger,.table>tbody>tr>th.danger,.table>tfoot>tr>th.danger,.table>thead>tr.danger>td,.table>tbody>tr.danger>td,.table>tfoot>tr.danger>td,.table>thead>tr.danger>th,.table>tbody>tr.danger>th,.table>tfoot>tr.danger>th{background-color:#ff0039}.table-hover>tbody>tr>td.danger:hover,.table-hover>tbody>tr>th.danger:hover,.table-hover>tbody>tr.danger:hover>td,.table-hover>tbody>tr:hover>.danger,.table-hover>tbody>tr.danger:hover>th{background-color:#e60033}.table-responsive{overflow-x:auto;min-height:0.01%}@media screen and (max-width:767px){.table-responsive{width:100%;margin-bottom:15.75px;overflow-y:hidden;-ms-overflow-style:-ms-autohiding-scrollbar;border:1px solid #dddddd}.table-responsive>.table{margin-bottom:0}.table-responsive>.table>thead>tr>th,.table-responsive>.table>tbody>tr>th,.table-responsive>.table>tfoot>tr>th,.table-responsive>.table>thead>tr>td,.table-responsive>.table>tbody>tr>td,.table-responsive>.table>tfoot>tr>td{white-space:nowrap}.table-responsive>.table-bordered{border:0}.table-responsive>.table-bordered>thead>tr>th:first-child,.table-responsive>.table-bordered>tbody>tr>th:first-child,.table-responsive>.table-bordered>tfoot>tr>th:first-child,.table-responsive>.table-bordered>thead>tr>td:first-child,.table-responsive>.table-bordered>tbody>tr>td:first-child,.table-responsive>.table-bordered>tfoot>tr>td:first-child{border-left:0}.table-responsive>.table-bordered>thead>tr>th:last-child,.table-responsive>.table-bordered>tbody>tr>th:last-child,.table-responsive>.table-bordered>tfoot>tr>th:last-child,.table-responsive>.table-bordered>thead>tr>td:last-child,.table-responsive>.table-bordered>tbody>tr>td:last-child,.table-responsive>.table-bordered>tfoot>tr>td:last-child{border-right:0}.table-responsive>.table-bordered>tbody>tr:last-child>th,.table-responsive>.table-bordered>tfoot>tr:last-child>th,.table-responsive>.table-bordered>tbody>tr:last-child>td,.table-responsive>.table-bordered>tfoot>tr:last-child>td{border-bottom:0}}fieldset{padding:0;margin:0;border:0;min-width:0}legend{display:block;width:100%;padding:0;margin-bottom:21px;font-size:22.5px;line-height:inherit;color:#333333;border:0;border-bottom:1px solid #e5e5e5}label{display:inline-block;max-width:100%;margin-bottom:5px;font-weight:bold}input[type="search"]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type="radio"],input[type="checkbox"]{margin:4px 0 0;margin-top:1px \9;line-height:normal}input[type="file"]{display:block}input[type="range"]{display:block;width:100%}select[multiple],select[size]{height:auto}input[type="file"]:focus,input[type="radio"]:focus,input[type="checkbox"]:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}output{display:block;padding-top:11px;font-size:15px;line-height:1.42857143;color:#333333}.form-control{display:block;width:100%;height:43px;padding:10px 18px;font-size:15px;line-height:1.42857143;color:#333333;background-color:#ffffff;background-image:none;border:1px solid #cccccc;border-radius:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-webkit-transition:border-color ease-in-out .15s,-webkit-box-shadow ease-in-out .15s;-o-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s}.form-control:focus{border-color:#66afe9;outline:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(102,175,233,0.6);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(102,175,233,0.6)}.form-control::-moz-placeholder{color:#999999;opacity:1}.form-control:-ms-input-placeholder{color:#999999}.form-control::-webkit-input-placeholder{color:#999999}.form-control[disabled],.form-control[readonly],fieldset[disabled] .form-control{background-color:#e6e6e6;opacity:1}.form-control[disabled],fieldset[disabled] .form-control{cursor:not-allowed}textarea.form-control{height:auto}input[type="search"]{-webkit-appearance:none}@media screen and (-webkit-min-device-pixel-ratio:0){input[type="date"].form-control,input[type="time"].form-control,input[type="datetime-local"].form-control,input[type="month"].form-control{line-height:43px}input[type="date"].input-sm,input[type="time"].input-sm,input[type="datetime-local"].input-sm,input[type="month"].input-sm,.input-group-sm input[type="date"],.input-group-sm input[type="time"],.input-group-sm input[type="datetime-local"],.input-group-sm input[type="month"]{line-height:31px}input[type="date"].input-lg,input[type="time"].input-lg,input[type="datetime-local"].input-lg,input[type="month"].input-lg,.input-group-lg input[type="date"],.input-group-lg input[type="time"],.input-group-lg input[type="datetime-local"],.input-group-lg input[type="month"]{line-height:64px}}.form-group{margin-bottom:15px}.radio,.checkbox{position:relative;display:block;margin-top:10px;margin-bottom:10px}.radio label,.checkbox label{min-height:21px;padding-left:20px;margin-bottom:0;font-weight:normal;cursor:pointer}.radio input[type="radio"],.radio-inline input[type="radio"],.checkbox input[type="checkbox"],.checkbox-inline input[type="checkbox"]{position:absolute;margin-left:-20px;margin-top:4px \9}.radio+.radio,.checkbox+.checkbox{margin-top:-5px}.radio-inline,.checkbox-inline{position:relative;display:inline-block;padding-left:20px;margin-bottom:0;vertical-align:middle;font-weight:normal;cursor:pointer}.radio-inline+.radio-inline,.checkbox-inline+.checkbox-inline{margin-top:0;margin-left:10px}input[type="radio"][disabled],input[type="checkbox"][disabled],input[type="radio"].disabled,input[type="checkbox"].disabled,fieldset[disabled] input[type="radio"],fieldset[disabled] input[type="checkbox"]{cursor:not-allowed}.radio-inline.disabled,.checkbox-inline.disabled,fieldset[disabled] .radio-inline,fieldset[disabled] .checkbox-inline{cursor:not-allowed}.radio.disabled label,.checkbox.disabled label,fieldset[disabled] .radio label,fieldset[disabled] .checkbox label{cursor:not-allowed}.form-control-static{padding-top:11px;padding-bottom:11px;margin-bottom:0;min-height:36px}.form-control-static.input-lg,.form-control-static.input-sm{padding-left:0;padding-right:0}.input-sm{height:31px;padding:5px 10px;font-size:13px;line-height:1.5;border-radius:0}select.input-sm{height:31px;line-height:31px}textarea.input-sm,select[multiple].input-sm{height:auto}.form-group-sm .form-control{height:31px;padding:5px 10px;font-size:13px;line-height:1.5;border-radius:0}.form-group-sm select.form-control{height:31px;line-height:31px}.form-group-sm textarea.form-control,.form-group-sm select[multiple].form-control{height:auto}.form-group-sm .form-control-static{height:31px;min-height:34px;padding:6px 10px;font-size:13px;line-height:1.5}.input-lg{height:64px;padding:18px 30px;font-size:19px;line-height:1.3333333;border-radius:0}select.input-lg{height:64px;line-height:64px}textarea.input-lg,select[multiple].input-lg{height:auto}.form-group-lg .form-control{height:64px;padding:18px 30px;font-size:19px;line-height:1.3333333;border-radius:0}.form-group-lg select.form-control{height:64px;line-height:64px}.form-group-lg textarea.form-control,.form-group-lg select[multiple].form-control{height:auto}.form-group-lg .form-control-static{height:64px;min-height:40px;padding:19px 30px;font-size:19px;line-height:1.3333333}.has-feedback{position:relative}.has-feedback .form-control{padding-right:53.75px}.form-control-feedback{position:absolute;top:0;right:0;z-index:2;display:block;width:43px;height:43px;line-height:43px;text-align:center;pointer-events:none}.input-lg+.form-control-feedback,.input-group-lg+.form-control-feedback,.form-group-lg .form-control+.form-control-feedback{width:64px;height:64px;line-height:64px}.input-sm+.form-control-feedback,.input-group-sm+.form-control-feedback,.form-group-sm .form-control+.form-control-feedback{width:31px;height:31px;line-height:31px}.has-success .help-block,.has-success .control-label,.has-success .radio,.has-success .checkbox,.has-success .radio-inline,.has-success .checkbox-inline,.has-success.radio label,.has-success.checkbox label,.has-success.radio-inline label,.has-success.checkbox-inline label{color:#ffffff}.has-success .form-control{border-color:#ffffff;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.has-success .form-control:focus{border-color:#e6e6e6;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #fff;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #fff}.has-success .input-group-addon{color:#ffffff;border-color:#ffffff;background-color:#3fb618}.has-success .form-control-feedback{color:#ffffff}.has-warning .help-block,.has-warning .control-label,.has-warning .radio,.has-warning .checkbox,.has-warning .radio-inline,.has-warning .checkbox-inline,.has-warning.radio label,.has-warning.checkbox label,.has-warning.radio-inline label,.has-warning.checkbox-inline label{color:#ffffff}.has-warning .form-control{border-color:#ffffff;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.has-warning .form-control:focus{border-color:#e6e6e6;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #fff;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #fff}.has-warning .input-group-addon{color:#ffffff;border-color:#ffffff;background-color:#ff7518}.has-warning .form-control-feedback{color:#ffffff}.has-error .help-block,.has-error .control-label,.has-error .radio,.has-error .checkbox,.has-error .radio-inline,.has-error .checkbox-inline,.has-error.radio label,.has-error.checkbox label,.has-error.radio-inline label,.has-error.checkbox-inline label{color:#ffffff}.has-error .form-control{border-color:#ffffff;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.has-error .form-control:focus{border-color:#e6e6e6;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #fff;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #fff}.has-error .input-group-addon{color:#ffffff;border-color:#ffffff;background-color:#ff0039}.has-error .form-control-feedback{color:#ffffff}.has-feedback label~.form-control-feedback{top:26px}.has-feedback label.sr-only~.form-control-feedback{top:0}.help-block{display:block;margin-top:5px;margin-bottom:10px;color:#737373}@media (min-width:768px){.form-inline .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .form-control-static{display:inline-block}.form-inline .input-group{display:inline-table;vertical-align:middle}.form-inline .input-group .input-group-addon,.form-inline .input-group .input-group-btn,.form-inline .input-group .form-control{width:auto}.form-inline .input-group>.form-control{width:100%}.form-inline .control-label{margin-bottom:0;vertical-align:middle}.form-inline .radio,.form-inline .checkbox{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.form-inline .radio label,.form-inline .checkbox label{padding-left:0}.form-inline .radio input[type="radio"],.form-inline .checkbox input[type="checkbox"]{position:relative;margin-left:0}.form-inline .has-feedback .form-control-feedback{top:0}}.form-horizontal .radio,.form-horizontal .checkbox,.form-horizontal .radio-inline,.form-horizontal .checkbox-inline{margin-top:0;margin-bottom:0;padding-top:11px}.form-horizontal .radio,.form-horizontal .checkbox{min-height:32px}.form-horizontal .form-group{margin-left:-15px;margin-right:-15px}@media (min-width:768px){.form-horizontal .control-label{text-align:right;margin-bottom:0;padding-top:11px}}.form-horizontal .has-feedback .form-control-feedback{right:15px}@media (min-width:768px){.form-horizontal .form-group-lg .control-label{padding-top:24.9999994px;font-size:19px}}@media (min-width:768px){.form-horizontal .form-group-sm .control-label{padding-top:6px;font-size:13px}}.btn{display:inline-block;margin-bottom:0;font-weight:normal;text-align:center;vertical-align:middle;-ms-touch-action:manipulation;touch-action:manipulation;cursor:pointer;background-image:none;border:1px solid transparent;white-space:nowrap;padding:10px 18px;font-size:15px;line-height:1.42857143;border-radius:0;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.btn:focus,.btn:active:focus,.btn.active:focus,.btn.focus,.btn:active.focus,.btn.active.focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn:hover,.btn:focus,.btn.focus{color:#ffffff;text-decoration:none}.btn:active,.btn.active{outline:0;background-image:none;-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,0.125);box-shadow:inset 0 3px 5px rgba(0,0,0,0.125)}.btn.disabled,.btn[disabled],fieldset[disabled] .btn{cursor:not-allowed;opacity:0.65;filter:alpha(opacity=65);-webkit-box-shadow:none;box-shadow:none}a.btn.disabled,fieldset[disabled] a.btn{pointer-events:none}.btn-default{color:#ffffff;background-color:#222222;border-color:#222222}.btn-default:focus,.btn-default.focus{color:#ffffff;background-color:#090909;border-color:#000000}.btn-default:hover{color:#ffffff;background-color:#090909;border-color:#040404}.btn-default:active,.btn-default.active,.open>.dropdown-toggle.btn-default{color:#ffffff;background-color:#090909;border-color:#040404}.btn-default:active:hover,.btn-default.active:hover,.open>.dropdown-toggle.btn-default:hover,.btn-default:active:focus,.btn-default.active:focus,.open>.dropdown-toggle.btn-default:focus,.btn-default:active.focus,.btn-default.active.focus,.open>.dropdown-toggle.btn-default.focus{color:#ffffff;background-color:#000000;border-color:#000000}.btn-default:active,.btn-default.active,.open>.dropdown-toggle.btn-default{background-image:none}.btn-default.disabled,.btn-default[disabled],fieldset[disabled] .btn-default,.btn-default.disabled:hover,.btn-default[disabled]:hover,fieldset[disabled] .btn-default:hover,.btn-default.disabled:focus,.btn-default[disabled]:focus,fieldset[disabled] .btn-default:focus,.btn-default.disabled.focus,.btn-default[disabled].focus,fieldset[disabled] .btn-default.focus,.btn-default.disabled:active,.btn-default[disabled]:active,fieldset[disabled] .btn-default:active,.btn-default.disabled.active,.btn-default[disabled].active,fieldset[disabled] .btn-default.active{background-color:#222222;border-color:#222222}.btn-default .badge{color:#222222;background-color:#ffffff}.btn-primary{color:#ffffff;background-color:#2780e3;border-color:#2780e3}.btn-primary:focus,.btn-primary.focus{color:#ffffff;background-color:#1967be;border-color:#10427b}.btn-primary:hover{color:#ffffff;background-color:#1967be;border-color:#1862b5}.btn-primary:active,.btn-primary.active,.open>.dropdown-toggle.btn-primary{color:#ffffff;background-color:#1967be;border-color:#1862b5}.btn-primary:active:hover,.btn-primary.active:hover,.open>.dropdown-toggle.btn-primary:hover,.btn-primary:active:focus,.btn-primary.active:focus,.open>.dropdown-toggle.btn-primary:focus,.btn-primary:active.focus,.btn-primary.active.focus,.open>.dropdown-toggle.btn-primary.focus{color:#ffffff;background-color:#15569f;border-color:#10427b}.btn-primary:active,.btn-primary.active,.open>.dropdown-toggle.btn-primary{background-image:none}.btn-primary.disabled,.btn-primary[disabled],fieldset[disabled] .btn-primary,.btn-primary.disabled:hover,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary:hover,.btn-primary.disabled:focus,.btn-primary[disabled]:focus,fieldset[disabled] .btn-primary:focus,.btn-primary.disabled.focus,.btn-primary[disabled].focus,fieldset[disabled] .btn-primary.focus,.btn-primary.disabled:active,.btn-primary[disabled]:active,fieldset[disabled] .btn-primary:active,.btn-primary.disabled.active,.btn-primary[disabled].active,fieldset[disabled] .btn-primary.active{background-color:#2780e3;border-color:#2780e3}.btn-primary .badge{color:#2780e3;background-color:#ffffff}.btn-success{color:#ffffff;background-color:#3fb618;border-color:#3fb618}.btn-success:focus,.btn-success.focus{color:#ffffff;background-color:#2f8912;border-color:#184509}.btn-success:hover{color:#ffffff;background-color:#2f8912;border-color:#2c8011}.btn-success:active,.btn-success.active,.open>.dropdown-toggle.btn-success{color:#ffffff;background-color:#2f8912;border-color:#2c8011}.btn-success:active:hover,.btn-success.active:hover,.open>.dropdown-toggle.btn-success:hover,.btn-success:active:focus,.btn-success.active:focus,.open>.dropdown-toggle.btn-success:focus,.btn-success:active.focus,.btn-success.active.focus,.open>.dropdown-toggle.btn-success.focus{color:#ffffff;background-color:#24690e;border-color:#184509}.btn-success:active,.btn-success.active,.open>.dropdown-toggle.btn-success{background-image:none}.btn-success.disabled,.btn-success[disabled],fieldset[disabled] .btn-success,.btn-success.disabled:hover,.btn-success[disabled]:hover,fieldset[disabled] .btn-success:hover,.btn-success.disabled:focus,.btn-success[disabled]:focus,fieldset[disabled] .btn-success:focus,.btn-success.disabled.focus,.btn-success[disabled].focus,fieldset[disabled] .btn-success.focus,.btn-success.disabled:active,.btn-success[disabled]:active,fieldset[disabled] .btn-success:active,.btn-success.disabled.active,.btn-success[disabled].active,fieldset[disabled] .btn-success.active{background-color:#3fb618;border-color:#3fb618}.btn-success .badge{color:#3fb618;background-color:#ffffff}.btn-info{color:#ffffff;background-color:#9954bb;border-color:#9954bb}.btn-info:focus,.btn-info.focus{color:#ffffff;background-color:#7e3f9d;border-color:#522967}.btn-info:hover{color:#ffffff;background-color:#7e3f9d;border-color:#783c96}.btn-info:active,.btn-info.active,.open>.dropdown-toggle.btn-info{color:#ffffff;background-color:#7e3f9d;border-color:#783c96}.btn-info:active:hover,.btn-info.active:hover,.open>.dropdown-toggle.btn-info:hover,.btn-info:active:focus,.btn-info.active:focus,.open>.dropdown-toggle.btn-info:focus,.btn-info:active.focus,.btn-info.active.focus,.open>.dropdown-toggle.btn-info.focus{color:#ffffff;background-color:#6a3484;border-color:#522967}.btn-info:active,.btn-info.active,.open>.dropdown-toggle.btn-info{background-image:none}.btn-info.disabled,.btn-info[disabled],fieldset[disabled] .btn-info,.btn-info.disabled:hover,.btn-info[disabled]:hover,fieldset[disabled] .btn-info:hover,.btn-info.disabled:focus,.btn-info[disabled]:focus,fieldset[disabled] .btn-info:focus,.btn-info.disabled.focus,.btn-info[disabled].focus,fieldset[disabled] .btn-info.focus,.btn-info.disabled:active,.btn-info[disabled]:active,fieldset[disabled] .btn-info:active,.btn-info.disabled.active,.btn-info[disabled].active,fieldset[disabled] .btn-info.active{background-color:#9954bb;border-color:#9954bb}.btn-info .badge{color:#9954bb;background-color:#ffffff}.btn-warning{color:#ffffff;background-color:#ff7518;border-color:#ff7518}.btn-warning:focus,.btn-warning.focus{color:#ffffff;background-color:#e45c00;border-color:#983d00}.btn-warning:hover{color:#ffffff;background-color:#e45c00;border-color:#da5800}.btn-warning:active,.btn-warning.active,.open>.dropdown-toggle.btn-warning{color:#ffffff;background-color:#e45c00;border-color:#da5800}.btn-warning:active:hover,.btn-warning.active:hover,.open>.dropdown-toggle.btn-warning:hover,.btn-warning:active:focus,.btn-warning.active:focus,.open>.dropdown-toggle.btn-warning:focus,.btn-warning:active.focus,.btn-warning.active.focus,.open>.dropdown-toggle.btn-warning.focus{color:#ffffff;background-color:#c04d00;border-color:#983d00}.btn-warning:active,.btn-warning.active,.open>.dropdown-toggle.btn-warning{background-image:none}.btn-warning.disabled,.btn-warning[disabled],fieldset[disabled] .btn-warning,.btn-warning.disabled:hover,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning:hover,.btn-warning.disabled:focus,.btn-warning[disabled]:focus,fieldset[disabled] .btn-warning:focus,.btn-warning.disabled.focus,.btn-warning[disabled].focus,fieldset[disabled] .btn-warning.focus,.btn-warning.disabled:active,.btn-warning[disabled]:active,fieldset[disabled] .btn-warning:active,.btn-warning.disabled.active,.btn-warning[disabled].active,fieldset[disabled] .btn-warning.active{background-color:#ff7518;border-color:#ff7518}.btn-warning .badge{color:#ff7518;background-color:#ffffff}.btn-danger{color:#ffffff;background-color:#ff0039;border-color:#ff0039}.btn-danger:focus,.btn-danger.focus{color:#ffffff;background-color:#cc002e;border-color:#80001c}.btn-danger:hover{color:#ffffff;background-color:#cc002e;border-color:#c2002b}.btn-danger:active,.btn-danger.active,.open>.dropdown-toggle.btn-danger{color:#ffffff;background-color:#cc002e;border-color:#c2002b}.btn-danger:active:hover,.btn-danger.active:hover,.open>.dropdown-toggle.btn-danger:hover,.btn-danger:active:focus,.btn-danger.active:focus,.open>.dropdown-toggle.btn-danger:focus,.btn-danger:active.focus,.btn-danger.active.focus,.open>.dropdown-toggle.btn-danger.focus{color:#ffffff;background-color:#a80026;border-color:#80001c}.btn-danger:active,.btn-danger.active,.open>.dropdown-toggle.btn-danger{background-image:none}.btn-danger.disabled,.btn-danger[disabled],fieldset[disabled] .btn-danger,.btn-danger.disabled:hover,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger:hover,.btn-danger.disabled:focus,.btn-danger[disabled]:focus,fieldset[disabled] .btn-danger:focus,.btn-danger.disabled.focus,.btn-danger[disabled].focus,fieldset[disabled] .btn-danger.focus,.btn-danger.disabled:active,.btn-danger[disabled]:active,fieldset[disabled] .btn-danger:active,.btn-danger.disabled.active,.btn-danger[disabled].active,fieldset[disabled] .btn-danger.active{background-color:#ff0039;border-color:#ff0039}.btn-danger .badge{color:#ff0039;background-color:#ffffff}.btn-link{color:#2780e3;font-weight:normal;border-radius:0}.btn-link,.btn-link:active,.btn-link.active,.btn-link[disabled],fieldset[disabled] .btn-link{background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link,.btn-link:hover,.btn-link:focus,.btn-link:active{border-color:transparent}.btn-link:hover,.btn-link:focus{color:#165ba8;text-decoration:underline;background-color:transparent}.btn-link[disabled]:hover,fieldset[disabled] .btn-link:hover,.btn-link[disabled]:focus,fieldset[disabled] .btn-link:focus{color:#999999;text-decoration:none}.btn-lg,.btn-group-lg>.btn{padding:18px 30px;font-size:19px;line-height:1.3333333;border-radius:0}.btn-sm,.btn-group-sm>.btn{padding:5px 10px;font-size:13px;line-height:1.5;border-radius:0}.btn-xs,.btn-group-xs>.btn{padding:1px 5px;font-size:13px;line-height:1.5;border-radius:0}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:5px}input[type="submit"].btn-block,input[type="reset"].btn-block,input[type="button"].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity 0.15s linear;-o-transition:opacity 0.15s linear;transition:opacity 0.15s linear}.fade.in{opacity:1}.collapse{display:none}.collapse.in{display:block}tr.collapse.in{display:table-row}tbody.collapse.in{display:table-row-group}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition-property:height, visibility;-o-transition-property:height, visibility;transition-property:height, visibility;-webkit-transition-duration:0.35s;-o-transition-duration:0.35s;transition-duration:0.35s;-webkit-transition-timing-function:ease;-o-transition-timing-function:ease;transition-timing-function:ease}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px dashed;border-top:4px solid \9;border-right:4px solid transparent;border-left:4px solid transparent}.dropup,.dropdown{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;list-style:none;font-size:15px;text-align:left;background-color:#ffffff;border:1px solid #cccccc;border:1px solid rgba(0,0,0,0.15);border-radius:0;-webkit-box-shadow:0 6px 12px rgba(0,0,0,0.175);box-shadow:0 6px 12px rgba(0,0,0,0.175);-webkit-background-clip:padding-box;background-clip:padding-box}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9.5px 0;overflow:hidden;background-color:#e5e5e5}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:normal;line-height:1.42857143;color:#333333;white-space:nowrap}.dropdown-menu>li>a:hover,.dropdown-menu>li>a:focus{text-decoration:none;color:#ffffff;background-color:#2780e3}.dropdown-menu>.active>a,.dropdown-menu>.active>a:hover,.dropdown-menu>.active>a:focus{color:#ffffff;text-decoration:none;outline:0;background-color:#2780e3}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{color:#999999}.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{text-decoration:none;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);cursor:not-allowed}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-menu-right{left:auto;right:0}.dropdown-menu-left{left:0;right:auto}.dropdown-header{display:block;padding:3px 20px;font-size:13px;line-height:1.42857143;color:#999999;white-space:nowrap}.dropdown-backdrop{position:fixed;left:0;right:0;bottom:0;top:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{border-top:0;border-bottom:4px dashed;border-bottom:4px solid \9;content:""}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:2px}@media (min-width:768px){.navbar-right .dropdown-menu{left:auto;right:0}.navbar-right .dropdown-menu-left{left:0;right:auto}}.btn-group,.btn-group-vertical{position:relative;display:inline-block;vertical-align:middle}.btn-group>.btn,.btn-group-vertical>.btn{position:relative;float:left}.btn-group>.btn:hover,.btn-group-vertical>.btn:hover,.btn-group>.btn:focus,.btn-group-vertical>.btn:focus,.btn-group>.btn:active,.btn-group-vertical>.btn:active,.btn-group>.btn.active,.btn-group-vertical>.btn.active{z-index:2}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group{margin-left:-1px}.btn-toolbar{margin-left:-5px}.btn-toolbar .btn,.btn-toolbar .btn-group,.btn-toolbar .input-group{float:left}.btn-toolbar>.btn,.btn-toolbar>.btn-group,.btn-toolbar>.input-group{margin-left:5px}.btn-group>.btn:not(:first-child):not(:last-child):not(.dropdown-toggle){border-radius:0}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:first-child:not(:last-child):not(.dropdown-toggle){border-bottom-right-radius:0;border-top-right-radius:0}.btn-group>.btn:last-child:not(:first-child),.btn-group>.dropdown-toggle:not(:first-child){border-bottom-left-radius:0;border-top-left-radius:0}.btn-group>.btn-group{float:left}.btn-group>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-bottom-right-radius:0;border-top-right-radius:0}.btn-group>.btn-group:last-child:not(:first-child)>.btn:first-child{border-bottom-left-radius:0;border-top-left-radius:0}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group>.btn+.dropdown-toggle{padding-left:8px;padding-right:8px}.btn-group>.btn-lg+.dropdown-toggle{padding-left:12px;padding-right:12px}.btn-group.open .dropdown-toggle{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,0.125);box-shadow:inset 0 3px 5px rgba(0,0,0,0.125)}.btn-group.open .dropdown-toggle.btn-link{-webkit-box-shadow:none;box-shadow:none}.btn .caret{margin-left:0}.btn-lg .caret{border-width:5px 5px 0;border-bottom-width:0}.dropup .btn-lg .caret{border-width:0 5px 5px}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group,.btn-group-vertical>.btn-group>.btn{display:block;float:none;width:100%;max-width:100%}.btn-group-vertical>.btn-group>.btn{float:none}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:first-child):not(:last-child){border-radius:0}.btn-group-vertical>.btn:first-child:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:last-child:not(:first-child){border-bottom-left-radius:0;border-top-right-radius:0;border-top-left-radius:0}.btn-group-vertical>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group-vertical>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group-vertical>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-right-radius:0;border-top-left-radius:0}.btn-group-justified{display:table;width:100%;table-layout:fixed;border-collapse:separate}.btn-group-justified>.btn,.btn-group-justified>.btn-group{float:none;display:table-cell;width:1%}.btn-group-justified>.btn-group .btn{width:100%}.btn-group-justified>.btn-group .dropdown-menu{left:auto}[data-toggle="buttons"]>.btn input[type="radio"],[data-toggle="buttons"]>.btn-group>.btn input[type="radio"],[data-toggle="buttons"]>.btn input[type="checkbox"],[data-toggle="buttons"]>.btn-group>.btn input[type="checkbox"]{position:absolute;clip:rect(0, 0, 0, 0);pointer-events:none}.input-group{position:relative;display:table;border-collapse:separate}.input-group[class*="col-"]{float:none;padding-left:0;padding-right:0}.input-group .form-control{position:relative;z-index:2;float:left;width:100%;margin-bottom:0}.input-group-lg>.form-control,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.btn{height:64px;padding:18px 30px;font-size:19px;line-height:1.3333333;border-radius:0}select.input-group-lg>.form-control,select.input-group-lg>.input-group-addon,select.input-group-lg>.input-group-btn>.btn{height:64px;line-height:64px}textarea.input-group-lg>.form-control,textarea.input-group-lg>.input-group-addon,textarea.input-group-lg>.input-group-btn>.btn,select[multiple].input-group-lg>.form-control,select[multiple].input-group-lg>.input-group-addon,select[multiple].input-group-lg>.input-group-btn>.btn{height:auto}.input-group-sm>.form-control,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.btn{height:31px;padding:5px 10px;font-size:13px;line-height:1.5;border-radius:0}select.input-group-sm>.form-control,select.input-group-sm>.input-group-addon,select.input-group-sm>.input-group-btn>.btn{height:31px;line-height:31px}textarea.input-group-sm>.form-control,textarea.input-group-sm>.input-group-addon,textarea.input-group-sm>.input-group-btn>.btn,select[multiple].input-group-sm>.form-control,select[multiple].input-group-sm>.input-group-addon,select[multiple].input-group-sm>.input-group-btn>.btn{height:auto}.input-group-addon,.input-group-btn,.input-group .form-control{display:table-cell}.input-group-addon:not(:first-child):not(:last-child),.input-group-btn:not(:first-child):not(:last-child),.input-group .form-control:not(:first-child):not(:last-child){border-radius:0}.input-group-addon,.input-group-btn{width:1%;white-space:nowrap;vertical-align:middle}.input-group-addon{padding:10px 18px;font-size:15px;font-weight:normal;line-height:1;color:#333333;text-align:center;background-color:#e6e6e6;border:1px solid #cccccc;border-radius:0}.input-group-addon.input-sm{padding:5px 10px;font-size:13px;border-radius:0}.input-group-addon.input-lg{padding:18px 30px;font-size:19px;border-radius:0}.input-group-addon input[type="radio"],.input-group-addon input[type="checkbox"]{margin-top:0}.input-group .form-control:first-child,.input-group-addon:first-child,.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group>.btn,.input-group-btn:first-child>.dropdown-toggle,.input-group-btn:last-child>.btn:not(:last-child):not(.dropdown-toggle),.input-group-btn:last-child>.btn-group:not(:last-child)>.btn{border-bottom-right-radius:0;border-top-right-radius:0}.input-group-addon:first-child{border-right:0}.input-group .form-control:last-child,.input-group-addon:last-child,.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group>.btn,.input-group-btn:last-child>.dropdown-toggle,.input-group-btn:first-child>.btn:not(:first-child),.input-group-btn:first-child>.btn-group:not(:first-child)>.btn{border-bottom-left-radius:0;border-top-left-radius:0}.input-group-addon:last-child{border-left:0}.input-group-btn{position:relative;font-size:0;white-space:nowrap}.input-group-btn>.btn{position:relative}.input-group-btn>.btn+.btn{margin-left:-1px}.input-group-btn>.btn:hover,.input-group-btn>.btn:focus,.input-group-btn>.btn:active{z-index:2}.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group{margin-right:-1px}.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group{z-index:2;margin-left:-1px}.nav{margin-bottom:0;padding-left:0;list-style:none}.nav>li{position:relative;display:block}.nav>li>a{position:relative;display:block;padding:10px 15px}.nav>li>a:hover,.nav>li>a:focus{text-decoration:none;background-color:#e6e6e6}.nav>li.disabled>a{color:#999999}.nav>li.disabled>a:hover,.nav>li.disabled>a:focus{color:#999999;text-decoration:none;background-color:transparent;cursor:not-allowed}.nav .open>a,.nav .open>a:hover,.nav .open>a:focus{background-color:#e6e6e6;border-color:#2780e3}.nav .nav-divider{height:1px;margin:9.5px 0;overflow:hidden;background-color:#e5e5e5}.nav>li>a>img{max-width:none}.nav-tabs{border-bottom:1px solid #dddddd}.nav-tabs>li{float:left;margin-bottom:-1px}.nav-tabs>li>a{margin-right:2px;line-height:1.42857143;border:1px solid transparent;border-radius:0 0 0 0}.nav-tabs>li>a:hover{border-color:#e6e6e6 #e6e6e6 #dddddd}.nav-tabs>li.active>a,.nav-tabs>li.active>a:hover,.nav-tabs>li.active>a:focus{color:#555555;background-color:#ffffff;border:1px solid #dddddd;border-bottom-color:transparent;cursor:default}.nav-tabs.nav-justified{width:100%;border-bottom:0}.nav-tabs.nav-justified>li{float:none}.nav-tabs.nav-justified>li>a{text-align:center;margin-bottom:5px}.nav-tabs.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-tabs.nav-justified>li{display:table-cell;width:1%}.nav-tabs.nav-justified>li>a{margin-bottom:0}}.nav-tabs.nav-justified>li>a{margin-right:0;border-radius:0}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:hover,.nav-tabs.nav-justified>.active>a:focus{border:1px solid #dddddd}@media (min-width:768px){.nav-tabs.nav-justified>li>a{border-bottom:1px solid #dddddd;border-radius:0 0 0 0}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:hover,.nav-tabs.nav-justified>.active>a:focus{border-bottom-color:#ffffff}}.nav-pills>li{float:left}.nav-pills>li>a{border-radius:0}.nav-pills>li+li{margin-left:2px}.nav-pills>li.active>a,.nav-pills>li.active>a:hover,.nav-pills>li.active>a:focus{color:#ffffff;background-color:#2780e3}.nav-stacked>li{float:none}.nav-stacked>li+li{margin-top:2px;margin-left:0}.nav-justified{width:100%}.nav-justified>li{float:none}.nav-justified>li>a{text-align:center;margin-bottom:5px}.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-justified>li{display:table-cell;width:1%}.nav-justified>li>a{margin-bottom:0}}.nav-tabs-justified{border-bottom:0}.nav-tabs-justified>li>a{margin-right:0;border-radius:0}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:hover,.nav-tabs-justified>.active>a:focus{border:1px solid #dddddd}@media (min-width:768px){.nav-tabs-justified>li>a{border-bottom:1px solid #dddddd;border-radius:0 0 0 0}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:hover,.nav-tabs-justified>.active>a:focus{border-bottom-color:#ffffff}}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-right-radius:0;border-top-left-radius:0}.navbar{position:relative;min-height:50px;margin-bottom:21px;border:1px solid transparent}@media (min-width:768px){.navbar{border-radius:0}}@media (min-width:768px){.navbar-header{float:left}}.navbar-collapse{overflow-x:visible;padding-right:15px;padding-left:15px;border-top:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.1);box-shadow:inset 0 1px 0 rgba(255,255,255,0.1);-webkit-overflow-scrolling:touch}.navbar-collapse.in{overflow-y:auto}@media (min-width:768px){.navbar-collapse{width:auto;border-top:0;-webkit-box-shadow:none;box-shadow:none}.navbar-collapse.collapse{display:block !important;height:auto !important;padding-bottom:0;overflow:visible !important}.navbar-collapse.in{overflow-y:visible}.navbar-fixed-top .navbar-collapse,.navbar-static-top .navbar-collapse,.navbar-fixed-bottom .navbar-collapse{padding-left:0;padding-right:0}}.navbar-fixed-top .navbar-collapse,.navbar-fixed-bottom .navbar-collapse{max-height:340px}@media (max-device-width:480px) and (orientation:landscape){.navbar-fixed-top .navbar-collapse,.navbar-fixed-bottom .navbar-collapse{max-height:200px}}.container>.navbar-header,.container-fluid>.navbar-header,.container>.navbar-collapse,.container-fluid>.navbar-collapse{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.container>.navbar-header,.container-fluid>.navbar-header,.container>.navbar-collapse,.container-fluid>.navbar-collapse{margin-right:0;margin-left:0}}.navbar-static-top{z-index:1000;border-width:0 0 1px}@media (min-width:768px){.navbar-static-top{border-radius:0}}.navbar-fixed-top,.navbar-fixed-bottom{position:fixed;right:0;left:0;z-index:1030}@media (min-width:768px){.navbar-fixed-top,.navbar-fixed-bottom{border-radius:0}}.navbar-fixed-top{top:0;border-width:0 0 1px}.navbar-fixed-bottom{bottom:0;margin-bottom:0;border-width:1px 0 0}.navbar-brand{float:left;padding:14.5px 15px;font-size:19px;line-height:21px;height:50px}.navbar-brand:hover,.navbar-brand:focus{text-decoration:none}.navbar-brand>img{display:block}@media (min-width:768px){.navbar>.container .navbar-brand,.navbar>.container-fluid .navbar-brand{margin-left:-15px}}.navbar-toggle{position:relative;float:right;margin-right:15px;padding:9px 10px;margin-top:8px;margin-bottom:8px;background-color:transparent;background-image:none;border:1px solid transparent;border-radius:0}.navbar-toggle:focus{outline:0}.navbar-toggle .icon-bar{display:block;width:22px;height:2px;border-radius:1px}.navbar-toggle .icon-bar+.icon-bar{margin-top:4px}@media (min-width:768px){.navbar-toggle{display:none}}.navbar-nav{margin:7.25px -15px}.navbar-nav>li>a{padding-top:10px;padding-bottom:10px;line-height:21px}@media (max-width:767px){.navbar-nav .open .dropdown-menu{position:static;float:none;width:auto;margin-top:0;background-color:transparent;border:0;-webkit-box-shadow:none;box-shadow:none}.navbar-nav .open .dropdown-menu>li>a,.navbar-nav .open .dropdown-menu .dropdown-header{padding:5px 15px 5px 25px}.navbar-nav .open .dropdown-menu>li>a{line-height:21px}.navbar-nav .open .dropdown-menu>li>a:hover,.navbar-nav .open .dropdown-menu>li>a:focus{background-image:none}}@media (min-width:768px){.navbar-nav{float:left;margin:0}.navbar-nav>li{float:left}.navbar-nav>li>a{padding-top:14.5px;padding-bottom:14.5px}}.navbar-form{margin-left:-15px;margin-right:-15px;padding:10px 15px;border-top:1px solid transparent;border-bottom:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.1);box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.1);margin-top:3.5px;margin-bottom:3.5px}@media (min-width:768px){.navbar-form .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.navbar-form .form-control{display:inline-block;width:auto;vertical-align:middle}.navbar-form .form-control-static{display:inline-block}.navbar-form .input-group{display:inline-table;vertical-align:middle}.navbar-form .input-group .input-group-addon,.navbar-form .input-group .input-group-btn,.navbar-form .input-group .form-control{width:auto}.navbar-form .input-group>.form-control{width:100%}.navbar-form .control-label{margin-bottom:0;vertical-align:middle}.navbar-form .radio,.navbar-form .checkbox{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.navbar-form .radio label,.navbar-form .checkbox label{padding-left:0}.navbar-form .radio input[type="radio"],.navbar-form .checkbox input[type="checkbox"]{position:relative;margin-left:0}.navbar-form .has-feedback .form-control-feedback{top:0}}@media (max-width:767px){.navbar-form .form-group{margin-bottom:5px}.navbar-form .form-group:last-child{margin-bottom:0}}@media (min-width:768px){.navbar-form{width:auto;border:0;margin-left:0;margin-right:0;padding-top:0;padding-bottom:0;-webkit-box-shadow:none;box-shadow:none}}.navbar-nav>li>.dropdown-menu{margin-top:0;border-top-right-radius:0;border-top-left-radius:0}.navbar-fixed-bottom .navbar-nav>li>.dropdown-menu{margin-bottom:0;border-top-right-radius:0;border-top-left-radius:0;border-bottom-right-radius:0;border-bottom-left-radius:0}.navbar-btn{margin-top:3.5px;margin-bottom:3.5px}.navbar-btn.btn-sm{margin-top:9.5px;margin-bottom:9.5px}.navbar-btn.btn-xs{margin-top:14px;margin-bottom:14px}.navbar-text{margin-top:14.5px;margin-bottom:14.5px}@media (min-width:768px){.navbar-text{float:left;margin-left:15px;margin-right:15px}}@media (min-width:768px){.navbar-left{float:left !important}.navbar-right{float:right !important;margin-right:-15px}.navbar-right~.navbar-right{margin-right:0}}.navbar-default{background-color:#222222;border-color:#121212}.navbar-default .navbar-brand{color:#ffffff}.navbar-default .navbar-brand:hover,.navbar-default .navbar-brand:focus{color:#ffffff;background-color:none}.navbar-default .navbar-text{color:#ffffff}.navbar-default .navbar-nav>li>a{color:#ffffff}.navbar-default .navbar-nav>li>a:hover,.navbar-default .navbar-nav>li>a:focus{color:#ffffff;background-color:#090909}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.active>a:hover,.navbar-default .navbar-nav>.active>a:focus{color:#ffffff;background-color:#090909}.navbar-default .navbar-nav>.disabled>a,.navbar-default .navbar-nav>.disabled>a:hover,.navbar-default .navbar-nav>.disabled>a:focus{color:#cccccc;background-color:transparent}.navbar-default .navbar-toggle{border-color:transparent}.navbar-default .navbar-toggle:hover,.navbar-default .navbar-toggle:focus{background-color:#090909}.navbar-default .navbar-toggle .icon-bar{background-color:#ffffff}.navbar-default .navbar-collapse,.navbar-default .navbar-form{border-color:#121212}.navbar-default .navbar-nav>.open>a,.navbar-default .navbar-nav>.open>a:hover,.navbar-default .navbar-nav>.open>a:focus{background-color:#090909;color:#ffffff}@media (max-width:767px){.navbar-default .navbar-nav .open .dropdown-menu>li>a{color:#ffffff}.navbar-default .navbar-nav .open .dropdown-menu>li>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>li>a:focus{color:#ffffff;background-color:#090909}.navbar-default .navbar-nav .open .dropdown-menu>.active>a,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:focus{color:#ffffff;background-color:#090909}.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:focus{color:#cccccc;background-color:transparent}}.navbar-default .navbar-link{color:#ffffff}.navbar-default .navbar-link:hover{color:#ffffff}.navbar-default .btn-link{color:#ffffff}.navbar-default .btn-link:hover,.navbar-default .btn-link:focus{color:#ffffff}.navbar-default .btn-link[disabled]:hover,fieldset[disabled] .navbar-default .btn-link:hover,.navbar-default .btn-link[disabled]:focus,fieldset[disabled] .navbar-default .btn-link:focus{color:#cccccc}.navbar-inverse{background-color:#2780e3;border-color:#1967be}.navbar-inverse .navbar-brand{color:#ffffff}.navbar-inverse .navbar-brand:hover,.navbar-inverse .navbar-brand:focus{color:#ffffff;background-color:none}.navbar-inverse .navbar-text{color:#ffffff}.navbar-inverse .navbar-nav>li>a{color:#ffffff}.navbar-inverse .navbar-nav>li>a:hover,.navbar-inverse .navbar-nav>li>a:focus{color:#ffffff;background-color:#1967be}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.active>a:hover,.navbar-inverse .navbar-nav>.active>a:focus{color:#ffffff;background-color:#1967be}.navbar-inverse .navbar-nav>.disabled>a,.navbar-inverse .navbar-nav>.disabled>a:hover,.navbar-inverse .navbar-nav>.disabled>a:focus{color:#ffffff;background-color:transparent}.navbar-inverse .navbar-toggle{border-color:transparent}.navbar-inverse .navbar-toggle:hover,.navbar-inverse .navbar-toggle:focus{background-color:#1967be}.navbar-inverse .navbar-toggle .icon-bar{background-color:#ffffff}.navbar-inverse .navbar-collapse,.navbar-inverse .navbar-form{border-color:#1a6ecc}.navbar-inverse .navbar-nav>.open>a,.navbar-inverse .navbar-nav>.open>a:hover,.navbar-inverse .navbar-nav>.open>a:focus{background-color:#1967be;color:#ffffff}@media (max-width:767px){.navbar-inverse .navbar-nav .open .dropdown-menu>.dropdown-header{border-color:#1967be}.navbar-inverse .navbar-nav .open .dropdown-menu .divider{background-color:#1967be}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a{color:#ffffff}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:focus{color:#ffffff;background-color:#1967be}.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:focus{color:#ffffff;background-color:#1967be}.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:focus{color:#ffffff;background-color:transparent}}.navbar-inverse .navbar-link{color:#ffffff}.navbar-inverse .navbar-link:hover{color:#ffffff}.navbar-inverse .btn-link{color:#ffffff}.navbar-inverse .btn-link:hover,.navbar-inverse .btn-link:focus{color:#ffffff}.navbar-inverse .btn-link[disabled]:hover,fieldset[disabled] .navbar-inverse .btn-link:hover,.navbar-inverse .btn-link[disabled]:focus,fieldset[disabled] .navbar-inverse .btn-link:focus{color:#ffffff}.breadcrumb{padding:8px 15px;margin-bottom:21px;list-style:none;background-color:#f5f5f5;border-radius:0}.breadcrumb>li{display:inline-block}.breadcrumb>li+li:before{content:"/\00a0";padding:0 5px;color:#cccccc}.breadcrumb>.active{color:#999999}.pagination{display:inline-block;padding-left:0;margin:21px 0;border-radius:0}.pagination>li{display:inline}.pagination>li>a,.pagination>li>span{position:relative;float:left;padding:10px 18px;line-height:1.42857143;text-decoration:none;color:#2780e3;background-color:#ffffff;border:1px solid #dddddd;margin-left:-1px}.pagination>li:first-child>a,.pagination>li:first-child>span{margin-left:0;border-bottom-left-radius:0;border-top-left-radius:0}.pagination>li:last-child>a,.pagination>li:last-child>span{border-bottom-right-radius:0;border-top-right-radius:0}.pagination>li>a:hover,.pagination>li>span:hover,.pagination>li>a:focus,.pagination>li>span:focus{z-index:3;color:#165ba8;background-color:#e6e6e6;border-color:#dddddd}.pagination>.active>a,.pagination>.active>span,.pagination>.active>a:hover,.pagination>.active>span:hover,.pagination>.active>a:focus,.pagination>.active>span:focus{z-index:2;color:#999999;background-color:#f5f5f5;border-color:#dddddd;cursor:default}.pagination>.disabled>span,.pagination>.disabled>span:hover,.pagination>.disabled>span:focus,.pagination>.disabled>a,.pagination>.disabled>a:hover,.pagination>.disabled>a:focus{color:#999999;background-color:#ffffff;border-color:#dddddd;cursor:not-allowed}.pagination-lg>li>a,.pagination-lg>li>span{padding:18px 30px;font-size:19px;line-height:1.3333333}.pagination-lg>li:first-child>a,.pagination-lg>li:first-child>span{border-bottom-left-radius:0;border-top-left-radius:0}.pagination-lg>li:last-child>a,.pagination-lg>li:last-child>span{border-bottom-right-radius:0;border-top-right-radius:0}.pagination-sm>li>a,.pagination-sm>li>span{padding:5px 10px;font-size:13px;line-height:1.5}.pagination-sm>li:first-child>a,.pagination-sm>li:first-child>span{border-bottom-left-radius:0;border-top-left-radius:0}.pagination-sm>li:last-child>a,.pagination-sm>li:last-child>span{border-bottom-right-radius:0;border-top-right-radius:0}.pager{padding-left:0;margin:21px 0;list-style:none;text-align:center}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#ffffff;border:1px solid #dddddd;border-radius:0}.pager li>a:hover,.pager li>a:focus{text-decoration:none;background-color:#e6e6e6}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:hover,.pager .disabled>a:focus,.pager .disabled>span{color:#999999;background-color:#ffffff;cursor:not-allowed}.label{display:inline;padding:.2em .6em .3em;font-size:75%;font-weight:bold;line-height:1;color:#ffffff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25em}a.label:hover,a.label:focus{color:#ffffff;text-decoration:none;cursor:pointer}.label:empty{display:none}.btn .label{position:relative;top:-1px}.label-default{background-color:#222222}.label-default[href]:hover,.label-default[href]:focus{background-color:#090909}.label-primary{background-color:#2780e3}.label-primary[href]:hover,.label-primary[href]:focus{background-color:#1967be}.label-success{background-color:#3fb618}.label-success[href]:hover,.label-success[href]:focus{background-color:#2f8912}.label-info{background-color:#9954bb}.label-info[href]:hover,.label-info[href]:focus{background-color:#7e3f9d}.label-warning{background-color:#ff7518}.label-warning[href]:hover,.label-warning[href]:focus{background-color:#e45c00}.label-danger{background-color:#ff0039}.label-danger[href]:hover,.label-danger[href]:focus{background-color:#cc002e}.badge{display:inline-block;min-width:10px;padding:3px 7px;font-size:13px;font-weight:bold;color:#ffffff;line-height:1;vertical-align:middle;white-space:nowrap;text-align:center;background-color:#2780e3;border-radius:10px}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.btn-xs .badge,.btn-group-xs>.btn .badge{top:0;padding:1px 5px}a.badge:hover,a.badge:focus{color:#ffffff;text-decoration:none;cursor:pointer}.list-group-item.active>.badge,.nav-pills>.active>a>.badge{color:#2780e3;background-color:#ffffff}.list-group-item>.badge{float:right}.list-group-item>.badge+.badge{margin-right:5px}.nav-pills>li>a>.badge{margin-left:3px}.jumbotron{padding-top:30px;padding-bottom:30px;margin-bottom:30px;color:inherit;background-color:#e6e6e6}.jumbotron h1,.jumbotron .h1{color:inherit}.jumbotron p{margin-bottom:15px;font-size:23px;font-weight:200}.jumbotron>hr{border-top-color:#cccccc}.container .jumbotron,.container-fluid .jumbotron{border-radius:0}.jumbotron .container{max-width:100%}@media screen and (min-width:768px){.jumbotron{padding-top:48px;padding-bottom:48px}.container .jumbotron,.container-fluid .jumbotron{padding-left:60px;padding-right:60px}.jumbotron h1,.jumbotron .h1{font-size:68px}}.thumbnail{display:block;padding:4px;margin-bottom:21px;line-height:1.42857143;background-color:#ffffff;border:1px solid #dddddd;border-radius:0;-webkit-transition:border .2s ease-in-out;-o-transition:border .2s ease-in-out;transition:border .2s ease-in-out}.thumbnail>img,.thumbnail a>img{margin-left:auto;margin-right:auto}a.thumbnail:hover,a.thumbnail:focus,a.thumbnail.active{border-color:#2780e3}.thumbnail .caption{padding:9px;color:#333333}.alert{padding:15px;margin-bottom:21px;border:1px solid transparent;border-radius:0}.alert h4{margin-top:0;color:inherit}.alert .alert-link{font-weight:bold}.alert>p,.alert>ul{margin-bottom:0}.alert>p+p{margin-top:5px}.alert-dismissable,.alert-dismissible{padding-right:35px}.alert-dismissable .close,.alert-dismissible .close{position:relative;top:-2px;right:-21px;color:inherit}.alert-success{background-color:#3fb618;border-color:#4e9f15;color:#ffffff}.alert-success hr{border-top-color:#438912}.alert-success .alert-link{color:#e6e6e6}.alert-info{background-color:#9954bb;border-color:#7643a8;color:#ffffff}.alert-info hr{border-top-color:#693c96}.alert-info .alert-link{color:#e6e6e6}.alert-warning{background-color:#ff7518;border-color:#ff4309;color:#ffffff}.alert-warning hr{border-top-color:#ee3800}.alert-warning .alert-link{color:#e6e6e6}.alert-danger{background-color:#ff0039;border-color:#f0005e;color:#ffffff}.alert-danger hr{border-top-color:#d60054}.alert-danger .alert-link{color:#e6e6e6}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{overflow:hidden;height:21px;margin-bottom:21px;background-color:#cccccc;border-radius:0;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1);box-shadow:inset 0 1px 2px rgba(0,0,0,0.1)}.progress-bar{float:left;width:0%;height:100%;font-size:13px;line-height:21px;color:#ffffff;text-align:center;background-color:#2780e3;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);-webkit-transition:width 0.6s ease;-o-transition:width 0.6s ease;transition:width 0.6s ease}.progress-striped .progress-bar,.progress-bar-striped{background-image:-webkit-linear-gradient(45deg, rgba(255,255,255,0.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,0.15) 50%, rgba(255,255,255,0.15) 75%, transparent 75%, transparent);background-image:-o-linear-gradient(45deg, rgba(255,255,255,0.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,0.15) 50%, rgba(255,255,255,0.15) 75%, transparent 75%, transparent);background-image:linear-gradient(45deg, rgba(255,255,255,0.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,0.15) 50%, rgba(255,255,255,0.15) 75%, transparent 75%, transparent);-webkit-background-size:40px 40px;background-size:40px 40px}.progress.active .progress-bar,.progress-bar.active{-webkit-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-bar-success{background-color:#3fb618}.progress-striped .progress-bar-success{background-image:-webkit-linear-gradient(45deg, rgba(255,255,255,0.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,0.15) 50%, rgba(255,255,255,0.15) 75%, transparent 75%, transparent);background-image:-o-linear-gradient(45deg, rgba(255,255,255,0.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,0.15) 50%, rgba(255,255,255,0.15) 75%, transparent 75%, transparent);background-image:linear-gradient(45deg, rgba(255,255,255,0.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,0.15) 50%, rgba(255,255,255,0.15) 75%, transparent 75%, transparent)}.progress-bar-info{background-color:#9954bb}.progress-striped .progress-bar-info{background-image:-webkit-linear-gradient(45deg, rgba(255,255,255,0.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,0.15) 50%, rgba(255,255,255,0.15) 75%, transparent 75%, transparent);background-image:-o-linear-gradient(45deg, rgba(255,255,255,0.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,0.15) 50%, rgba(255,255,255,0.15) 75%, transparent 75%, transparent);background-image:linear-gradient(45deg, rgba(255,255,255,0.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,0.15) 50%, rgba(255,255,255,0.15) 75%, transparent 75%, transparent)}.progress-bar-warning{background-color:#ff7518}.progress-striped .progress-bar-warning{background-image:-webkit-linear-gradient(45deg, rgba(255,255,255,0.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,0.15) 50%, rgba(255,255,255,0.15) 75%, transparent 75%, transparent);background-image:-o-linear-gradient(45deg, rgba(255,255,255,0.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,0.15) 50%, rgba(255,255,255,0.15) 75%, transparent 75%, transparent);background-image:linear-gradient(45deg, rgba(255,255,255,0.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,0.15) 50%, rgba(255,255,255,0.15) 75%, transparent 75%, transparent)}.progress-bar-danger{background-color:#ff0039}.progress-striped .progress-bar-danger{background-image:-webkit-linear-gradient(45deg, rgba(255,255,255,0.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,0.15) 50%, rgba(255,255,255,0.15) 75%, transparent 75%, transparent);background-image:-o-linear-gradient(45deg, rgba(255,255,255,0.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,0.15) 50%, rgba(255,255,255,0.15) 75%, transparent 75%, transparent);background-image:linear-gradient(45deg, rgba(255,255,255,0.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,0.15) 50%, rgba(255,255,255,0.15) 75%, transparent 75%, transparent)}.media{margin-top:15px}.media:first-child{margin-top:0}.media,.media-body{zoom:1;overflow:hidden}.media-body{width:10000px}.media-object{display:block}.media-object.img-thumbnail{max-width:none}.media-right,.media>.pull-right{padding-left:10px}.media-left,.media>.pull-left{padding-right:10px}.media-left,.media-right,.media-body{display:table-cell;vertical-align:top}.media-middle{vertical-align:middle}.media-bottom{vertical-align:bottom}.media-heading{margin-top:0;margin-bottom:5px}.media-list{padding-left:0;list-style:none}.list-group{margin-bottom:20px;padding-left:0}.list-group-item{position:relative;display:block;padding:10px 15px;margin-bottom:-1px;background-color:#ffffff;border:1px solid #dddddd}.list-group-item:first-child{border-top-right-radius:0;border-top-left-radius:0}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:0;border-bottom-left-radius:0}a.list-group-item,button.list-group-item{color:#555555}a.list-group-item .list-group-item-heading,button.list-group-item .list-group-item-heading{color:#333333}a.list-group-item:hover,button.list-group-item:hover,a.list-group-item:focus,button.list-group-item:focus{text-decoration:none;color:#555555;background-color:#f5f5f5}button.list-group-item{width:100%;text-align:left}.list-group-item.disabled,.list-group-item.disabled:hover,.list-group-item.disabled:focus{background-color:#e6e6e6;color:#999999;cursor:not-allowed}.list-group-item.disabled .list-group-item-heading,.list-group-item.disabled:hover .list-group-item-heading,.list-group-item.disabled:focus .list-group-item-heading{color:inherit}.list-group-item.disabled .list-group-item-text,.list-group-item.disabled:hover .list-group-item-text,.list-group-item.disabled:focus .list-group-item-text{color:#999999}.list-group-item.active,.list-group-item.active:hover,.list-group-item.active:focus{z-index:2;color:#ffffff;background-color:#2780e3;border-color:#dddddd}.list-group-item.active .list-group-item-heading,.list-group-item.active:hover .list-group-item-heading,.list-group-item.active:focus .list-group-item-heading,.list-group-item.active .list-group-item-heading>small,.list-group-item.active:hover .list-group-item-heading>small,.list-group-item.active:focus .list-group-item-heading>small,.list-group-item.active .list-group-item-heading>.small,.list-group-item.active:hover .list-group-item-heading>.small,.list-group-item.active:focus .list-group-item-heading>.small{color:inherit}.list-group-item.active .list-group-item-text,.list-group-item.active:hover .list-group-item-text,.list-group-item.active:focus .list-group-item-text{color:#dceafa}.list-group-item-success{color:#ffffff;background-color:#3fb618}a.list-group-item-success,button.list-group-item-success{color:#ffffff}a.list-group-item-success .list-group-item-heading,button.list-group-item-success .list-group-item-heading{color:inherit}a.list-group-item-success:hover,button.list-group-item-success:hover,a.list-group-item-success:focus,button.list-group-item-success:focus{color:#ffffff;background-color:#379f15}a.list-group-item-success.active,button.list-group-item-success.active,a.list-group-item-success.active:hover,button.list-group-item-success.active:hover,a.list-group-item-success.active:focus,button.list-group-item-success.active:focus{color:#fff;background-color:#ffffff;border-color:#ffffff}.list-group-item-info{color:#ffffff;background-color:#9954bb}a.list-group-item-info,button.list-group-item-info{color:#ffffff}a.list-group-item-info .list-group-item-heading,button.list-group-item-info .list-group-item-heading{color:inherit}a.list-group-item-info:hover,button.list-group-item-info:hover,a.list-group-item-info:focus,button.list-group-item-info:focus{color:#ffffff;background-color:#8d46b0}a.list-group-item-info.active,button.list-group-item-info.active,a.list-group-item-info.active:hover,button.list-group-item-info.active:hover,a.list-group-item-info.active:focus,button.list-group-item-info.active:focus{color:#fff;background-color:#ffffff;border-color:#ffffff}.list-group-item-warning{color:#ffffff;background-color:#ff7518}a.list-group-item-warning,button.list-group-item-warning{color:#ffffff}a.list-group-item-warning .list-group-item-heading,button.list-group-item-warning .list-group-item-heading{color:inherit}a.list-group-item-warning:hover,button.list-group-item-warning:hover,a.list-group-item-warning:focus,button.list-group-item-warning:focus{color:#ffffff;background-color:#fe6600}a.list-group-item-warning.active,button.list-group-item-warning.active,a.list-group-item-warning.active:hover,button.list-group-item-warning.active:hover,a.list-group-item-warning.active:focus,button.list-group-item-warning.active:focus{color:#fff;background-color:#ffffff;border-color:#ffffff}.list-group-item-danger{color:#ffffff;background-color:#ff0039}a.list-group-item-danger,button.list-group-item-danger{color:#ffffff}a.list-group-item-danger .list-group-item-heading,button.list-group-item-danger .list-group-item-heading{color:inherit}a.list-group-item-danger:hover,button.list-group-item-danger:hover,a.list-group-item-danger:focus,button.list-group-item-danger:focus{color:#ffffff;background-color:#e60033}a.list-group-item-danger.active,button.list-group-item-danger.active,a.list-group-item-danger.active:hover,button.list-group-item-danger.active:hover,a.list-group-item-danger.active:focus,button.list-group-item-danger.active:focus{color:#fff;background-color:#ffffff;border-color:#ffffff}.list-group-item-heading{margin-top:0;margin-bottom:5px}.list-group-item-text{margin-bottom:0;line-height:1.3}.panel{margin-bottom:21px;background-color:#ffffff;border:1px solid transparent;border-radius:0;-webkit-box-shadow:0 1px 1px rgba(0,0,0,0.05);box-shadow:0 1px 1px rgba(0,0,0,0.05)}.panel-body{padding:15px}.panel-heading{padding:10px 15px;border-bottom:1px solid transparent;border-top-right-radius:-1;border-top-left-radius:-1}.panel-heading>.dropdown .dropdown-toggle{color:inherit}.panel-title{margin-top:0;margin-bottom:0;font-size:17px;color:inherit}.panel-title>a,.panel-title>small,.panel-title>.small,.panel-title>small>a,.panel-title>.small>a{color:inherit}.panel-footer{padding:10px 15px;background-color:#f5f5f5;border-top:1px solid #dddddd;border-bottom-right-radius:-1;border-bottom-left-radius:-1}.panel>.list-group,.panel>.panel-collapse>.list-group{margin-bottom:0}.panel>.list-group .list-group-item,.panel>.panel-collapse>.list-group .list-group-item{border-width:1px 0;border-radius:0}.panel>.list-group:first-child .list-group-item:first-child,.panel>.panel-collapse>.list-group:first-child .list-group-item:first-child{border-top:0;border-top-right-radius:-1;border-top-left-radius:-1}.panel>.list-group:last-child .list-group-item:last-child,.panel>.panel-collapse>.list-group:last-child .list-group-item:last-child{border-bottom:0;border-bottom-right-radius:-1;border-bottom-left-radius:-1}.panel>.panel-heading+.panel-collapse>.list-group .list-group-item:first-child{border-top-right-radius:0;border-top-left-radius:0}.panel-heading+.list-group .list-group-item:first-child{border-top-width:0}.list-group+.panel-footer{border-top-width:0}.panel>.table,.panel>.table-responsive>.table,.panel>.panel-collapse>.table{margin-bottom:0}.panel>.table caption,.panel>.table-responsive>.table caption,.panel>.panel-collapse>.table caption{padding-left:15px;padding-right:15px}.panel>.table:first-child,.panel>.table-responsive:first-child>.table:first-child{border-top-right-radius:-1;border-top-left-radius:-1}.panel>.table:first-child>thead:first-child>tr:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child{border-top-left-radius:-1;border-top-right-radius:-1}.panel>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table:first-child>thead:first-child>tr:first-child th:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:first-child{border-top-left-radius:-1}.panel>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table:first-child>thead:first-child>tr:first-child th:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:last-child{border-top-right-radius:-1}.panel>.table:last-child,.panel>.table-responsive:last-child>.table:last-child{border-bottom-right-radius:-1;border-bottom-left-radius:-1}.panel>.table:last-child>tbody:last-child>tr:last-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child{border-bottom-left-radius:-1;border-bottom-right-radius:-1}.panel>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:first-child{border-bottom-left-radius:-1}.panel>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:last-child{border-bottom-right-radius:-1}.panel>.panel-body+.table,.panel>.panel-body+.table-responsive,.panel>.table+.panel-body,.panel>.table-responsive+.panel-body{border-top:1px solid #dddddd}.panel>.table>tbody:first-child>tr:first-child th,.panel>.table>tbody:first-child>tr:first-child td{border-top:0}.panel>.table-bordered,.panel>.table-responsive>.table-bordered{border:0}.panel>.table-bordered>thead>tr>th:first-child,.panel>.table-responsive>.table-bordered>thead>tr>th:first-child,.panel>.table-bordered>tbody>tr>th:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:first-child,.panel>.table-bordered>tfoot>tr>th:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:first-child,.panel>.table-bordered>thead>tr>td:first-child,.panel>.table-responsive>.table-bordered>thead>tr>td:first-child,.panel>.table-bordered>tbody>tr>td:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:first-child,.panel>.table-bordered>tfoot>tr>td:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:first-child{border-left:0}.panel>.table-bordered>thead>tr>th:last-child,.panel>.table-responsive>.table-bordered>thead>tr>th:last-child,.panel>.table-bordered>tbody>tr>th:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:last-child,.panel>.table-bordered>tfoot>tr>th:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:last-child,.panel>.table-bordered>thead>tr>td:last-child,.panel>.table-responsive>.table-bordered>thead>tr>td:last-child,.panel>.table-bordered>tbody>tr>td:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:last-child,.panel>.table-bordered>tfoot>tr>td:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:last-child{border-right:0}.panel>.table-bordered>thead>tr:first-child>td,.panel>.table-responsive>.table-bordered>thead>tr:first-child>td,.panel>.table-bordered>tbody>tr:first-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>td,.panel>.table-bordered>thead>tr:first-child>th,.panel>.table-responsive>.table-bordered>thead>tr:first-child>th,.panel>.table-bordered>tbody>tr:first-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>th{border-bottom:0}.panel>.table-bordered>tbody>tr:last-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>td,.panel>.table-bordered>tfoot>tr:last-child>td,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>td,.panel>.table-bordered>tbody>tr:last-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>th,.panel>.table-bordered>tfoot>tr:last-child>th,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}.panel>.table-responsive{border:0;margin-bottom:0}.panel-group{margin-bottom:21px}.panel-group .panel{margin-bottom:0;border-radius:0}.panel-group .panel+.panel{margin-top:5px}.panel-group .panel-heading{border-bottom:0}.panel-group .panel-heading+.panel-collapse>.panel-body,.panel-group .panel-heading+.panel-collapse>.list-group{border-top:1px solid #dddddd}.panel-group .panel-footer{border-top:0}.panel-group .panel-footer+.panel-collapse .panel-body{border-bottom:1px solid #dddddd}.panel-default{border-color:#dddddd}.panel-default>.panel-heading{color:#333333;background-color:#f5f5f5;border-color:#dddddd}.panel-default>.panel-heading+.panel-collapse>.panel-body{border-top-color:#dddddd}.panel-default>.panel-heading .badge{color:#f5f5f5;background-color:#333333}.panel-default>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#dddddd}.panel-primary{border-color:#2780e3}.panel-primary>.panel-heading{color:#ffffff;background-color:#2780e3;border-color:#2780e3}.panel-primary>.panel-heading+.panel-collapse>.panel-body{border-top-color:#2780e3}.panel-primary>.panel-heading .badge{color:#2780e3;background-color:#ffffff}.panel-primary>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#2780e3}.panel-success{border-color:#4e9f15}.panel-success>.panel-heading{color:#ffffff;background-color:#3fb618;border-color:#4e9f15}.panel-success>.panel-heading+.panel-collapse>.panel-body{border-top-color:#4e9f15}.panel-success>.panel-heading .badge{color:#3fb618;background-color:#ffffff}.panel-success>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#4e9f15}.panel-info{border-color:#7643a8}.panel-info>.panel-heading{color:#ffffff;background-color:#9954bb;border-color:#7643a8}.panel-info>.panel-heading+.panel-collapse>.panel-body{border-top-color:#7643a8}.panel-info>.panel-heading .badge{color:#9954bb;background-color:#ffffff}.panel-info>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#7643a8}.panel-warning{border-color:#ff4309}.panel-warning>.panel-heading{color:#ffffff;background-color:#ff7518;border-color:#ff4309}.panel-warning>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ff4309}.panel-warning>.panel-heading .badge{color:#ff7518;background-color:#ffffff}.panel-warning>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ff4309}.panel-danger{border-color:#f0005e}.panel-danger>.panel-heading{color:#ffffff;background-color:#ff0039;border-color:#f0005e}.panel-danger>.panel-heading+.panel-collapse>.panel-body{border-top-color:#f0005e}.panel-danger>.panel-heading .badge{color:#ff0039;background-color:#ffffff}.panel-danger>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#f0005e}.embed-responsive{position:relative;display:block;height:0;padding:0;overflow:hidden}.embed-responsive .embed-responsive-item,.embed-responsive iframe,.embed-responsive embed,.embed-responsive object,.embed-responsive video{position:absolute;top:0;left:0;bottom:0;height:100%;width:100%;border:0}.embed-responsive-16by9{padding-bottom:56.25%}.embed-responsive-4by3{padding-bottom:75%}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;border-radius:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.05);box-shadow:inset 0 1px 1px rgba(0,0,0,0.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,0.15)}.well-lg{padding:24px;border-radius:0}.well-sm{padding:9px;border-radius:0}.close{float:right;font-size:22.5px;font-weight:bold;line-height:1;color:#ffffff;text-shadow:0 1px 0 #ffffff;opacity:0.2;filter:alpha(opacity=20)}.close:hover,.close:focus{color:#ffffff;text-decoration:none;cursor:pointer;opacity:0.5;filter:alpha(opacity=50)}button.close{padding:0;cursor:pointer;background:transparent;border:0;-webkit-appearance:none}.modal-open{overflow:hidden}.modal{display:none;overflow:hidden;position:fixed;top:0;right:0;bottom:0;left:0;z-index:1050;-webkit-overflow-scrolling:touch;outline:0}.modal.fade .modal-dialog{-webkit-transform:translate(0, -25%);-ms-transform:translate(0, -25%);-o-transform:translate(0, -25%);transform:translate(0, -25%);-webkit-transition:-webkit-transform .3s ease-out;-o-transition:-o-transform .3s ease-out;transition:transform .3s ease-out}.modal.in .modal-dialog{-webkit-transform:translate(0, 0);-ms-transform:translate(0, 0);-o-transform:translate(0, 0);transform:translate(0, 0)}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal-dialog{position:relative;width:auto;margin:10px}.modal-content{position:relative;background-color:#ffffff;border:1px solid #999999;border:1px solid transparent;border-radius:0;-webkit-box-shadow:0 3px 9px rgba(0,0,0,0.5);box-shadow:0 3px 9px rgba(0,0,0,0.5);-webkit-background-clip:padding-box;background-clip:padding-box;outline:0}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000000}.modal-backdrop.fade{opacity:0;filter:alpha(opacity=0)}.modal-backdrop.in{opacity:0.5;filter:alpha(opacity=50)}.modal-header{padding:15px;border-bottom:1px solid #e5e5e5;min-height:16.42857143px}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:1.42857143}.modal-body{position:relative;padding:20px}.modal-footer{padding:20px;text-align:right;border-top:1px solid #e5e5e5}.modal-footer .btn+.btn{margin-left:5px;margin-bottom:0}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width:768px){.modal-dialog{width:600px;margin:30px auto}.modal-content{-webkit-box-shadow:0 5px 15px rgba(0,0,0,0.5);box-shadow:0 5px 15px rgba(0,0,0,0.5)}.modal-sm{width:300px}}@media (min-width:992px){.modal-lg{width:900px}}.tooltip{position:absolute;z-index:1070;display:block;font-family:"Source Sans Pro",Calibri,Candara,Arial,sans-serif;font-style:normal;font-weight:normal;letter-spacing:normal;line-break:auto;line-height:1.42857143;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;white-space:normal;word-break:normal;word-spacing:normal;word-wrap:normal;font-size:13px;opacity:0;filter:alpha(opacity=0)}.tooltip.in{opacity:0.9;filter:alpha(opacity=90)}.tooltip.top{margin-top:-3px;padding:5px 0}.tooltip.right{margin-left:3px;padding:0 5px}.tooltip.bottom{margin-top:3px;padding:5px 0}.tooltip.left{margin-left:-3px;padding:0 5px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#ffffff;text-align:center;background-color:#000000;border-radius:0}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-width:5px 5px 0;border-top-color:#000000}.tooltip.top-left .tooltip-arrow{bottom:0;right:5px;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000000}.tooltip.top-right .tooltip-arrow{bottom:0;left:5px;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000000}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-width:5px 5px 5px 0;border-right-color:#000000}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-width:5px 0 5px 5px;border-left-color:#000000}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-width:0 5px 5px;border-bottom-color:#000000}.tooltip.bottom-left .tooltip-arrow{top:0;right:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000000}.tooltip.bottom-right .tooltip-arrow{top:0;left:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000000}.popover{position:absolute;top:0;left:0;z-index:1060;display:none;max-width:276px;padding:1px;font-family:"Source Sans Pro",Calibri,Candara,Arial,sans-serif;font-style:normal;font-weight:normal;letter-spacing:normal;line-break:auto;line-height:1.42857143;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;white-space:normal;word-break:normal;word-spacing:normal;word-wrap:normal;font-size:15px;background-color:#ffffff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #cccccc;border:1px solid rgba(0,0,0,0.2);border-radius:0;-webkit-box-shadow:0 5px 10px rgba(0,0,0,0.2);box-shadow:0 5px 10px rgba(0,0,0,0.2)}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover-title{margin:0;padding:8px 14px;font-size:15px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-radius:-1 -1 0 0}.popover-content{padding:9px 14px}.popover>.arrow,.popover>.arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover>.arrow{border-width:11px}.popover>.arrow:after{border-width:10px;content:""}.popover.top>.arrow{left:50%;margin-left:-11px;border-bottom-width:0;border-top-color:#999999;border-top-color:rgba(0,0,0,0.25);bottom:-11px}.popover.top>.arrow:after{content:" ";bottom:1px;margin-left:-10px;border-bottom-width:0;border-top-color:#ffffff}.popover.right>.arrow{top:50%;left:-11px;margin-top:-11px;border-left-width:0;border-right-color:#999999;border-right-color:rgba(0,0,0,0.25)}.popover.right>.arrow:after{content:" ";left:1px;bottom:-10px;border-left-width:0;border-right-color:#ffffff}.popover.bottom>.arrow{left:50%;margin-left:-11px;border-top-width:0;border-bottom-color:#999999;border-bottom-color:rgba(0,0,0,0.25);top:-11px}.popover.bottom>.arrow:after{content:" ";top:1px;margin-left:-10px;border-top-width:0;border-bottom-color:#ffffff}.popover.left>.arrow{top:50%;right:-11px;margin-top:-11px;border-right-width:0;border-left-color:#999999;border-left-color:rgba(0,0,0,0.25)}.popover.left>.arrow:after{content:" ";right:1px;border-right-width:0;border-left-color:#ffffff;bottom:-10px}.carousel{position:relative}.carousel-inner{position:relative;overflow:hidden;width:100%}.carousel-inner>.item{display:none;position:relative;-webkit-transition:.6s ease-in-out left;-o-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>img,.carousel-inner>.item>a>img{line-height:1}@media all and (transform-3d),(-webkit-transform-3d){.carousel-inner>.item{-webkit-transition:-webkit-transform .6s ease-in-out;-o-transition:-o-transform .6s ease-in-out;transition:transform .6s ease-in-out;-webkit-backface-visibility:hidden;backface-visibility:hidden;-webkit-perspective:1000px;perspective:1000px}.carousel-inner>.item.next,.carousel-inner>.item.active.right{-webkit-transform:translate3d(100%, 0, 0);transform:translate3d(100%, 0, 0);left:0}.carousel-inner>.item.prev,.carousel-inner>.item.active.left{-webkit-transform:translate3d(-100%, 0, 0);transform:translate3d(-100%, 0, 0);left:0}.carousel-inner>.item.next.left,.carousel-inner>.item.prev.right,.carousel-inner>.item.active{-webkit-transform:translate3d(0, 0, 0);transform:translate3d(0, 0, 0);left:0}}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:0;left:0;bottom:0;width:15%;opacity:0.5;filter:alpha(opacity=50);font-size:20px;color:#ffffff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,0.6)}.carousel-control.left{background-image:-webkit-linear-gradient(left, rgba(0,0,0,0.5) 0, rgba(0,0,0,0.0001) 100%);background-image:-o-linear-gradient(left, rgba(0,0,0,0.5) 0, rgba(0,0,0,0.0001) 100%);background-image:-webkit-gradient(linear, left top, right top, from(rgba(0,0,0,0.5)), to(rgba(0,0,0,0.0001)));background-image:linear-gradient(to right, rgba(0,0,0,0.5) 0, rgba(0,0,0,0.0001) 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1)}.carousel-control.right{left:auto;right:0;background-image:-webkit-linear-gradient(left, rgba(0,0,0,0.0001) 0, rgba(0,0,0,0.5) 100%);background-image:-o-linear-gradient(left, rgba(0,0,0,0.0001) 0, rgba(0,0,0,0.5) 100%);background-image:-webkit-gradient(linear, left top, right top, from(rgba(0,0,0,0.0001)), to(rgba(0,0,0,0.5)));background-image:linear-gradient(to right, rgba(0,0,0,0.0001) 0, rgba(0,0,0,0.5) 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1)}.carousel-control:hover,.carousel-control:focus{outline:0;color:#ffffff;text-decoration:none;opacity:0.9;filter:alpha(opacity=90)}.carousel-control .icon-prev,.carousel-control .icon-next,.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right{position:absolute;top:50%;margin-top:-10px;z-index:5;display:inline-block}.carousel-control .icon-prev,.carousel-control .glyphicon-chevron-left{left:50%;margin-left:-10px}.carousel-control .icon-next,.carousel-control .glyphicon-chevron-right{right:50%;margin-right:-10px}.carousel-control .icon-prev,.carousel-control .icon-next{width:20px;height:20px;line-height:1;font-family:serif}.carousel-control .icon-prev:before{content:'\2039'}.carousel-control .icon-next:before{content:'\203a'}.carousel-indicators{position:absolute;bottom:10px;left:50%;z-index:15;width:60%;margin-left:-30%;padding-left:0;list-style:none;text-align:center}.carousel-indicators li{display:inline-block;width:10px;height:10px;margin:1px;text-indent:-999px;border:1px solid #ffffff;border-radius:10px;cursor:pointer;background-color:#000 \9;background-color:rgba(0,0,0,0)}.carousel-indicators .active{margin:0;width:12px;height:12px;background-color:#ffffff}.carousel-caption{position:absolute;left:15%;right:15%;bottom:20px;z-index:10;padding-top:20px;padding-bottom:20px;color:#ffffff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,0.6)}.carousel-caption .btn{text-shadow:none}@media screen and (min-width:768px){.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-prev,.carousel-control .icon-next{width:30px;height:30px;margin-top:-15px;font-size:30px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{margin-left:-15px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{margin-right:-15px}.carousel-caption{left:20%;right:20%;padding-bottom:30px}.carousel-indicators{bottom:20px}}.clearfix:before,.clearfix:after,.dl-horizontal dd:before,.dl-horizontal dd:after,.container:before,.container:after,.container-fluid:before,.container-fluid:after,.row:before,.row:after,.form-horizontal .form-group:before,.form-horizontal .form-group:after,.btn-toolbar:before,.btn-toolbar:after,.btn-group-vertical>.btn-group:before,.btn-group-vertical>.btn-group:after,.nav:before,.nav:after,.navbar:before,.navbar:after,.navbar-header:before,.navbar-header:after,.navbar-collapse:before,.navbar-collapse:after,.pager:before,.pager:after,.panel-body:before,.panel-body:after,.modal-footer:before,.modal-footer:after{content:" ";display:table}.clearfix:after,.dl-horizontal dd:after,.container:after,.container-fluid:after,.row:after,.form-horizontal .form-group:after,.btn-toolbar:after,.btn-group-vertical>.btn-group:after,.nav:after,.navbar:after,.navbar-header:after,.navbar-collapse:after,.pager:after,.panel-body:after,.modal-footer:after{clear:both}.center-block{display:block;margin-left:auto;margin-right:auto}.pull-right{float:right !important}.pull-left{float:left !important}.hide{display:none !important}.show{display:block !important}.invisible{visibility:hidden}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.hidden{display:none !important}.affix{position:fixed}@-ms-viewport{width:device-width}.visible-xs,.visible-sm,.visible-md,.visible-lg{display:none !important}.visible-xs-block,.visible-xs-inline,.visible-xs-inline-block,.visible-sm-block,.visible-sm-inline,.visible-sm-inline-block,.visible-md-block,.visible-md-inline,.visible-md-inline-block,.visible-lg-block,.visible-lg-inline,.visible-lg-inline-block{display:none !important}@media (max-width:767px){.visible-xs{display:block !important}table.visible-xs{display:table !important}tr.visible-xs{display:table-row !important}th.visible-xs,td.visible-xs{display:table-cell !important}}@media (max-width:767px){.visible-xs-block{display:block !important}}@media (max-width:767px){.visible-xs-inline{display:inline !important}}@media (max-width:767px){.visible-xs-inline-block{display:inline-block !important}}@media (min-width:768px) and (max-width:991px){.visible-sm{display:block !important}table.visible-sm{display:table !important}tr.visible-sm{display:table-row !important}th.visible-sm,td.visible-sm{display:table-cell !important}}@media (min-width:768px) and (max-width:991px){.visible-sm-block{display:block !important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline{display:inline !important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline-block{display:inline-block !important}}@media (min-width:992px) and (max-width:1199px){.visible-md{display:block !important}table.visible-md{display:table !important}tr.visible-md{display:table-row !important}th.visible-md,td.visible-md{display:table-cell !important}}@media (min-width:992px) and (max-width:1199px){.visible-md-block{display:block !important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline{display:inline !important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline-block{display:inline-block !important}}@media (min-width:1200px){.visible-lg{display:block !important}table.visible-lg{display:table !important}tr.visible-lg{display:table-row !important}th.visible-lg,td.visible-lg{display:table-cell !important}}@media (min-width:1200px){.visible-lg-block{display:block !important}}@media (min-width:1200px){.visible-lg-inline{display:inline !important}}@media (min-width:1200px){.visible-lg-inline-block{display:inline-block !important}}@media (max-width:767px){.hidden-xs{display:none !important}}@media (min-width:768px) and (max-width:991px){.hidden-sm{display:none !important}}@media (min-width:992px) and (max-width:1199px){.hidden-md{display:none !important}}@media (min-width:1200px){.hidden-lg{display:none !important}}.visible-print{display:none !important}@media print{.visible-print{display:block !important}table.visible-print{display:table !important}tr.visible-print{display:table-row !important}th.visible-print,td.visible-print{display:table-cell !important}}.visible-print-block{display:none !important}@media print{.visible-print-block{display:block !important}}.visible-print-inline{display:none !important}@media print{.visible-print-inline{display:inline !important}}.visible-print-inline-block{display:none !important}@media print{.visible-print-inline-block{display:inline-block !important}}@media print{.hidden-print{display:none !important}}.navbar-inverse .badge{background-color:#fff;color:#2780e3}body{-webkit-font-smoothing:antialiased}.text-primary,.text-primary:hover{color:#2780e3}.text-success,.text-success:hover{color:#3fb618}.text-danger,.text-danger:hover{color:#ff0039}.text-warning,.text-warning:hover{color:#ff7518}.text-info,.text-info:hover{color:#9954bb}table a:not(.btn),.table a:not(.btn){text-decoration:underline}table .dropdown-menu a,.table .dropdown-menu a{text-decoration:none}table .success,.table .success,table .warning,.table .warning,table .danger,.table .danger,table .info,.table .info{color:#fff}table .success a,.table .success a,table .warning a,.table .warning a,table .danger a,.table .danger a,table .info a,.table .info a{color:#fff}.has-warning .help-block,.has-warning .control-label,.has-warning .radio,.has-warning .checkbox,.has-warning .radio-inline,.has-warning .checkbox-inline,.has-warning.radio label,.has-warning.checkbox label,.has-warning.radio-inline label,.has-warning.checkbox-inline label,.has-warning .form-control-feedback{color:#ff7518}.has-warning .form-control,.has-warning .form-control:focus,.has-warning .input-group-addon{border:1px solid #ff7518}.has-error .help-block,.has-error .control-label,.has-error .radio,.has-error .checkbox,.has-error .radio-inline,.has-error .checkbox-inline,.has-error.radio label,.has-error.checkbox label,.has-error.radio-inline label,.has-error.checkbox-inline label,.has-error .form-control-feedback{color:#ff0039}.has-error .form-control,.has-error .form-control:focus,.has-error .input-group-addon{border:1px solid #ff0039}.has-success .help-block,.has-success .control-label,.has-success .radio,.has-success .checkbox,.has-success .radio-inline,.has-success .checkbox-inline,.has-success.radio label,.has-success.checkbox label,.has-success.radio-inline label,.has-success.checkbox-inline label,.has-success .form-control-feedback{color:#3fb618}.has-success .form-control,.has-success .form-control:focus,.has-success .input-group-addon{border:1px solid #3fb618}.nav-pills>li>a{border-radius:0}.dropdown-menu>li>a:hover,.dropdown-menu>li>a:focus{background-image:none}.close{text-decoration:none;text-shadow:none;opacity:0.4}.close:hover,.close:focus{opacity:1}.alert{border:none}.alert .alert-link{text-decoration:underline;color:#fff}.label{border-radius:0}.progress{height:8px;-webkit-box-shadow:none;box-shadow:none}.progress .progress-bar{font-size:8px;line-height:8px}.panel-heading,.panel-footer{border-top-right-radius:0;border-top-left-radius:0}.panel-default .close{color:#333333}a.list-group-item-success.active{background-color:#3fb618}a.list-group-item-success.active:hover,a.list-group-item-success.active:focus{background-color:#379f15}a.list-group-item-warning.active{background-color:#ff7518}a.list-group-item-warning.active:hover,a.list-group-item-warning.active:focus{background-color:#fe6600}a.list-group-item-danger.active{background-color:#ff0039}a.list-group-item-danger.active:hover,a.list-group-item-danger.active:focus{background-color:#e60033}.modal .close{color:#333333}.popover{color:#333333} diff --git a/gluecat.py b/gluecat.py deleted file mode 100644 index 4c67df2..0000000 --- a/gluecat.py +++ /dev/null @@ -1,466 +0,0 @@ -from pathlib import Path -import argparse -import random -import numpy as np -import matplotlib.cm as cm -import torch -import torch.nn as nn -from torch.autograd import Variable -import os -import torch.multiprocessing -from tqdm import tqdm - -import cv2 -from scipy.spatial.distance import cdist - -from models.utils import (compute_pose_error, compute_epipolar_error, - estimate_pose, make_matching_plot, - error_colormap, AverageTimer, pose_auc, read_image, - rotate_intrinsics, rotate_pose_inplane, - scale_intrinsics, read_image_modified, frame2tensor) - -from models.matching import Matching -from models.matchingsuperglue import Matching_ori -from sjlee.loss import loss_superglue - -torch.set_grad_enabled(True) -torch.multiprocessing.set_sharing_strategy('file_system') - -parser = argparse.ArgumentParser( - description='Image pair matching and pose evaluation with SuperGlue', - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - -parser.add_argument( - '--viz', action='store_true', - help='Visualize the matches and dump the plots') -parser.add_argument( - '--eval', action='store_true', - help='Perform the evaluation' - ' (requires ground truth pose and intrinsics)') - -parser.add_argument( - '--superglue', choices={'indoor', 'outdoor'}, default='indoor', - help='SuperGlue weights') -parser.add_argument( - '--max_keypoints', type=int, default=1023, - help='Maximum number of keypoints detected by Superpoint' - ' (\'-1\' keeps all keypoints)') -parser.add_argument( - '--keypoint_threshold', type=float, default=0.005, - help='SuperPoint keypoint detector confidence threshold') -parser.add_argument( - '--nms_radius', type=int, default=4, - help='SuperPoint Non Maximum Suppression (NMS) radius' - ' (Must be positive)') -parser.add_argument( - '--sinkhorn_iterations', type=int, default=20, - help='Number of Sinkhorn iterations performed by SuperGlue') -parser.add_argument( - '--match_threshold', type=float, default=0.2, - help='SuperGlue match threshold') - -parser.add_argument( - '--resize', type=int, nargs='+', default=[640, 480], - help='Resize the input image before running inference. If two numbers, ' - 'resize to the exact dimensions, if one number, resize the max ' - 'dimension, if -1, do not resize') -parser.add_argument( - '--resize_float', action='store_true', - help='Resize the image after casting uint8 to float') - -parser.add_argument( - '--cache', action='store_true', - help='Skip the pair if output .npz files are already found') -parser.add_argument( - '--show_keypoints', action='store_true', - help='Plot the keypoints in addition to the matches') -parser.add_argument( - '--fast_viz', action='store_true', - help='Use faster image visualization based on OpenCV instead of Matplotlib') -parser.add_argument( - '--viz_extension', type=str, default='png', choices=['png', 'pdf'], - help='Visualization file extension. Use pdf for highest-quality.') - -parser.add_argument( - '--opencv_display', action='store_true', - help='Visualize via OpenCV before saving output images') -parser.add_argument( - '--eval_pairs_list', type=str, default='assets/scannet_sample_pairs_with_gt.txt', - help='Path to the list of image pairs for evaluation') -parser.add_argument( - '--shuffle', action='store_true', - help='Shuffle ordering of pairs before processing') -parser.add_argument( - '--max_length', type=int, default=-1, - help='Maximum number of pairs to evaluate') - -parser.add_argument( - '--eval_input_dir', type=str, default='assets/scannet_sample_images/', - help='Path to the directory that contains the images') -parser.add_argument( - '--eval_output_dir', type=str, default='test_matches', - help='Path to the directory in which the .npz results and optional,' - 'visualizations are written') -parser.add_argument( - '--learning_rate', type=float, default=0.0001, #0.0001 - help='Learning rate') - -parser.add_argument( - '--batch_size', type=int, default=1, - help='batch_size') -parser.add_argument( - '--train_path', type=str, default='/home/cvlab09/projects/seungjun_an/dataset/train2014/', - help='Path to the directory of training imgs.') -parser.add_argument( - '--epoch', type=int, default=1, - help='Number of epoches') - - - - -if __name__ == '__main__': - opt = parser.parse_args() - print(opt) - - # make sure the flags are properly used - assert not (opt.opencv_display and not opt.viz), 'Must use --viz with --opencv_display' - assert not (opt.opencv_display and not opt.fast_viz), 'Cannot use --opencv_display without --fast_viz' - assert not (opt.fast_viz and not opt.viz), 'Must use --viz with --fast_viz' - assert not (opt.fast_viz and opt.viz_extension == 'pdf'), 'Cannot use pdf extension with --fast_viz' - - numOftrainSet = 20 - - # store viz results - eval_output_dir = Path(opt.eval_output_dir) - eval_output_dir.mkdir(exist_ok=True, parents=True) - print('Will write visualization images to', - 'directory \"{}\"'.format(eval_output_dir)) - config = { - 'superpoint': { - 'nms_radius': opt.nms_radius, - 'keypoint_threshold': opt.keypoint_threshold, - 'max_keypoints': opt.max_keypoints - }, - 'superglue': { - 'weights': opt.superglue, - 'sinkhorn_iterations': opt.sinkhorn_iterations, - 'match_threshold': opt.match_threshold, - } - } - matching = Matching(config).eval().to('cuda') - matching.load_state_dict(torch.load('/home/cvlab09/projects/seungjun_an/superglue_test/model_state_dict_epoch4.pth')) - - matching_ori = Matching_ori(config).eval().to('cuda') - - matching2 = Matching(config).eval().to('cuda') - sum_loss = 0. - - - device = 'cuda' - - for epoch in range(1, opt.epoch+1): - epoch_loss = 0 - - ##superglue.double().train() ########################################## - for i in range(numOftrainSet): - file_name =opt.train_path+str(i+1)+'.jpg' - image0, inp0, scales0 = read_image( - file_name, opt.resize, 0, opt.resize_float) - - if str(type(image0)) != '' : - continue - - width, height = image0.shape[:2] - - corners = np.array([[0, 0], [0, height], [width, 0], [width, height]], dtype=np.float32) - - warp = np.random.randint(-224, 224, size=(4, 2)).astype(np.float32) - - - - - # get the corresponding warped image - M = cv2.getPerspectiveTransform(corners, corners + warp) - warped = cv2.warpPerspective(src=image0, M=M, dsize=(image0.shape[1], image0.shape[0])) - - inp1 = frame2tensor(warped) - - #print(i) - - scores, data, pred = matching({'image0': inp0, 'image1': inp1}) - - - ################################################################################################# - - if data['skip_train'] : continue - - - key1, key2, des1, des2 = pred['keypoints0'], pred['keypoints1'], pred['descriptors0'], pred['descriptors0'] - #all match 만들자 - ################################################################### - - kp1 = key1.squeeze() - kp2 = key2.squeeze() - kp1_np = np.array(key1.cpu()).squeeze() - kp2_np = np.array(key2.cpu()).squeeze() - descs1 = des1.cpu().detach().numpy().squeeze().transpose(0, 1) - descs2 = des2.cpu().detach().numpy().squeeze().transpose(0, 1) - - - - # obtain the matching matrix of the image pair - kp1_projected = cv2.perspectiveTransform(kp1_np.reshape((1, -1, 2)), M)[0, :, :] - - if len(kp1_projected) == 1 or len(kp2_np) == 1 : continue - - dists = cdist(kp1_projected, kp2_np) - - min1 = np.argmin(dists, axis=0) - min2 = np.argmin(dists, axis=1) - - min1v = np.min(dists, axis=1) - min1f = min2[min1v < 3] - - xx = np.where(min2[min1] == np.arange(min1.shape[0]))[0] - matches = np.intersect1d(min1f, xx) - - missing1 = np.setdiff1d(np.arange(kp1_np.shape[0]), min1[matches]) - missing2 = np.setdiff1d(np.arange(kp2_np.shape[0]), matches) - - MN = np.concatenate([min1[matches][np.newaxis, :], matches[np.newaxis, :]]) - MN2 = np.concatenate([missing1[np.newaxis, :], (len(kp2)) * np.ones((1, len(missing1)), dtype=np.int64)]) - MN3 = np.concatenate([(len(kp1)) * np.ones((1, len(missing2)), dtype=np.int64), missing2[np.newaxis, :]]) - all_matches = np.concatenate([MN, MN2, MN3], axis=1) - all_matches = torch.tensor(all_matches).unsqueeze(1) - - - ####################################################################### - - if data['skip_train'] == True: # image has no keypoint - continue - - #print(Loss) - - - # for every 50 images, print progress and visualize the matches - - - - ### eval ### - # Visualize the matches. - print('model->eval') ############################################################################ - #matching.eval() - image0, image1 = pred['image0'].cpu().numpy()[0]*255., pred['image1'].cpu().numpy()[0]*255. - - image0, image1 = image0[0], image1[0] - - kpts0, kpts1 = pred['keypoints0'].cpu().numpy()[0], pred['keypoints1'].cpu().numpy()[0] - matches, conf = data['matches0'].cpu().detach().numpy(), data['matching_scores0'].cpu().detach().numpy() - - kpts0, kpts1 = kpts0[0], kpts1[0] - - image0 = read_image_modified(image0, opt.resize, opt.resize_float) - image1 = read_image_modified(image1, opt.resize, opt.resize_float) - valid = matches > -1 - mkpts0 = kpts0[valid] - mkpts1 = kpts1[matches[valid]] - mconf = conf[valid] - viz_path = eval_output_dir / '{}_trainedcatmatches.{}'.format(str(i), opt.viz_extension) - color = cm.jet(mconf) - stem = file_name - text = [] - - make_matching_plot( - image0, image1, kpts0, kpts1, mkpts0, mkpts1, color, - text, viz_path, stem, stem, opt.show_keypoints, - opt.fast_viz, opt.opencv_display, 'Matches') - - print('################################################################')################################ - - - - - scores, data, pred = matching_ori({'image0': inp0, 'image1': inp1}) - - - ################################################################################################# - - if data['skip_train'] : continue - - - key1, key2, des1, des2 = pred['keypoints0'], pred['keypoints1'], pred['descriptors0'], pred['descriptors0'] - #all match 만들자 - ################################################################### - - kp1 = key1.squeeze() - kp2 = key2.squeeze() - kp1_np = np.array(key1.cpu()).squeeze() - kp2_np = np.array(key2.cpu()).squeeze() - descs1 = des1.cpu().detach().numpy().squeeze().transpose(0, 1) - descs2 = des2.cpu().detach().numpy().squeeze().transpose(0, 1) - - - - # obtain the matching matrix of the image pair - kp1_projected = cv2.perspectiveTransform(kp1_np.reshape((1, -1, 2)), M)[0, :, :] - - if len(kp1_projected) == 1 or len(kp2_np) == 1 : continue - - dists = cdist(kp1_projected, kp2_np) - - min1 = np.argmin(dists, axis=0) - min2 = np.argmin(dists, axis=1) - - min1v = np.min(dists, axis=1) - min1f = min2[min1v < 3] - - xx = np.where(min2[min1] == np.arange(min1.shape[0]))[0] - matches = np.intersect1d(min1f, xx) - - missing1 = np.setdiff1d(np.arange(kp1_np.shape[0]), min1[matches]) - missing2 = np.setdiff1d(np.arange(kp2_np.shape[0]), matches) - - MN = np.concatenate([min1[matches][np.newaxis, :], matches[np.newaxis, :]]) - MN2 = np.concatenate([missing1[np.newaxis, :], (len(kp2)) * np.ones((1, len(missing1)), dtype=np.int64)]) - MN3 = np.concatenate([(len(kp1)) * np.ones((1, len(missing2)), dtype=np.int64), missing2[np.newaxis, :]]) - all_matches = np.concatenate([MN, MN2, MN3], axis=1) - all_matches = torch.tensor(all_matches).unsqueeze(1) - - - ####################################################################### - - if data['skip_train'] == True: # image has no keypoint - continue - - #print(Loss) - - - # for every 50 images, print progress and visualize the matches - - - - ### eval ### - # Visualize the matches. - print('model->eval') ############################################################################ - #matching.eval() - image0, image1 = pred['image0'].cpu().numpy()[0]*255., pred['image1'].cpu().numpy()[0]*255. - - image0, image1 = image0[0], image1[0] - - kpts0, kpts1 = pred['keypoints0'].cpu().numpy()[0], pred['keypoints1'].cpu().numpy()[0] - matches, conf = data['matches0'].cpu().detach().numpy(), data['matching_scores0'].cpu().detach().numpy() - - kpts0, kpts1 = kpts0[0], kpts1[0] - - image0 = read_image_modified(image0, opt.resize, opt.resize_float) - image1 = read_image_modified(image1, opt.resize, opt.resize_float) - valid = matches > -1 - mkpts0 = kpts0[valid] - mkpts1 = kpts1[matches[valid]] - mconf = conf[valid] - viz_path = eval_output_dir / '{}_originmatches.{}'.format(str(i), opt.viz_extension) - color = cm.jet(mconf) - stem = file_name - text = [] - - make_matching_plot( - image0, image1, kpts0, kpts1, mkpts0, mkpts1, color, - text, viz_path, stem, stem, opt.show_keypoints, - opt.fast_viz, opt.opencv_display, 'Matches') - - - - print('################################################################')################################ - - - - - scores, data, pred = matching2({'image0': inp0, 'image1': inp1}) - - - ################################################################################################# - - if data['skip_train'] : continue - - - key1, key2, des1, des2 = pred['keypoints0'], pred['keypoints1'], pred['descriptors0'], pred['descriptors0'] - #all match 만들자 - ################################################################### - - kp1 = key1.squeeze() - kp2 = key2.squeeze() - kp1_np = np.array(key1.cpu()).squeeze() - kp2_np = np.array(key2.cpu()).squeeze() - descs1 = des1.cpu().detach().numpy().squeeze().transpose(0, 1) - descs2 = des2.cpu().detach().numpy().squeeze().transpose(0, 1) - - - - # obtain the matching matrix of the image pair - kp1_projected = cv2.perspectiveTransform(kp1_np.reshape((1, -1, 2)), M)[0, :, :] - - if len(kp1_projected) == 1 or len(kp2_np) == 1 : continue - - dists = cdist(kp1_projected, kp2_np) - - min1 = np.argmin(dists, axis=0) - min2 = np.argmin(dists, axis=1) - - min1v = np.min(dists, axis=1) - min1f = min2[min1v < 3] - - xx = np.where(min2[min1] == np.arange(min1.shape[0]))[0] - matches = np.intersect1d(min1f, xx) - - missing1 = np.setdiff1d(np.arange(kp1_np.shape[0]), min1[matches]) - missing2 = np.setdiff1d(np.arange(kp2_np.shape[0]), matches) - - MN = np.concatenate([min1[matches][np.newaxis, :], matches[np.newaxis, :]]) - MN2 = np.concatenate([missing1[np.newaxis, :], (len(kp2)) * np.ones((1, len(missing1)), dtype=np.int64)]) - MN3 = np.concatenate([(len(kp1)) * np.ones((1, len(missing2)), dtype=np.int64), missing2[np.newaxis, :]]) - all_matches = np.concatenate([MN, MN2, MN3], axis=1) - all_matches = torch.tensor(all_matches).unsqueeze(1) - - - ####################################################################### - - if data['skip_train'] == True: # image has no keypoint - continue - - #print(Loss) - - - # for every 50 images, print progress and visualize the matches - - - - ### eval ### - # Visualize the matches. - print('model->eval') ############################################################################ - #matching.eval() - image0, image1 = pred['image0'].cpu().numpy()[0]*255., pred['image1'].cpu().numpy()[0]*255. - - image0, image1 = image0[0], image1[0] - - kpts0, kpts1 = pred['keypoints0'].cpu().numpy()[0], pred['keypoints1'].cpu().numpy()[0] - matches, conf = data['matches0'].cpu().detach().numpy(), data['matching_scores0'].cpu().detach().numpy() - - kpts0, kpts1 = kpts0[0], kpts1[0] - - image0 = read_image_modified(image0, opt.resize, opt.resize_float) - image1 = read_image_modified(image1, opt.resize, opt.resize_float) - valid = matches > -1 - mkpts0 = kpts0[valid] - mkpts1 = kpts1[matches[valid]] - mconf = conf[valid] - viz_path = eval_output_dir / '{}_notraincatmatches.{}'.format(str(i), opt.viz_extension) - color = cm.jet(mconf) - stem = file_name - text = [] - - make_matching_plot( - image0, image1, kpts0, kpts1, mkpts0, mkpts1, color, - text, viz_path, stem, stem, opt.show_keypoints, - opt.fast_viz, opt.opencv_display, 'Matches') - - diff --git a/fig/aggregator.png b/img/aggregator.png similarity index 100% rename from fig/aggregator.png rename to img/aggregator.png diff --git a/img/github.png b/img/github.png new file mode 100644 index 0000000..ea6ff54 Binary files /dev/null and b/img/github.png differ diff --git a/fig/overview.png b/img/overview.png similarity index 100% rename from fig/overview.png rename to img/overview.png diff --git a/img/paper_img.png b/img/paper_img.png new file mode 100644 index 0000000..edc9ccb Binary files /dev/null and b/img/paper_img.png differ diff --git a/fig/result1.png b/img/result1.png similarity index 100% rename from fig/result1.png rename to img/result1.png diff --git a/fig/result2.png b/img/result2.png similarity index 100% rename from fig/result2.png rename to img/result2.png diff --git a/index.html b/index.html new file mode 100644 index 0000000..4528f05 --- /dev/null +++ b/index.html @@ -0,0 +1,198 @@ + + + + + + + + + SuperCATs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+

+ + SuperCATs: Cost Aggregation with Transformers for Sparse Correspondence
+ + ICCE-Asia 2022 + +

+
+
+
+
    +
  • + Seungjun Lee +
  • +
  • + Seungjun An +
  • +
  • + Sunghwan Hong +
  • +
  • + Seokju Cho +
  • +
  • + Jisu Nam +
  • +
  • + Susung Hong +
  • +
  • + + Seungryong Kim + +
  • +
+ Korea University
+ +
+
+ +
+
+ +
+
+ + + +
+
+
+ + +
+
+
+

+ Comparison between SuperCATs (left) and SuperGlue (right). +

+
+
+ + +
+
+

+ Abstract +

+

+ In this work, we introduce a novel network, namely SuperCATs, which aims to find a correspondence field between visually similar images. SuperCATs stands on the shoulder of the recently proposed matching networks, SuperGlue and CATs, taking the merits of both for constructing an integrative framework. Specifically, given keypoints and corresponding descriptors, we first apply attentional aggregation consisting of self- and cross- graph neural network to obtain feature descriptors. Subsequently, we construct a cost volume using the descriptors, which then undergoes a tranformer aggregator for cost aggregation. With this approach, we manage to replace the handcrafted module based on solving an optimal transport problem initially included in SuperGlue with a transformer well known for its global receptive fields, making our approach more robust to severe deformations. We conduct experiments to demonstrate the effectiveness of the proposed method, and show that the proposed model is on par with SuperGlue for both indoor and outdoor scenes. +

+
+
+ + + +
+
+

+ Architecture +

+ +
+ +
+
+

+ Overall network architecture of SuperCATs. +

+
+ +
+ +
+
+

+ Structure of Transformer Aggregator. +

+
+
+
+ + +
+
+

+ Citation +

+
+ +
+
+
+ +
+
+

+ Acknowledgements +

+

+ Thanks to our family Podo (cat), Aru (dog) and Dubu (dog) for their support. We love you. +
+ The website template was borrowed from Michaël Gharbi. +

+
+
+
+ + diff --git a/make_load.py b/make_load.py deleted file mode 100644 index a2389fc..0000000 --- a/make_load.py +++ /dev/null @@ -1,152 +0,0 @@ -from pathlib import Path -import argparse -import random -import numpy as np -import matplotlib.cm as cm -import torch -import torch.nn as nn -from torch.autograd import Variable -import os -import torch.multiprocessing -from tqdm import tqdm - -import cv2 -from scipy.spatial.distance import cdist - -from models.utils import (compute_pose_error, compute_epipolar_error, - estimate_pose, make_matching_plot, - error_colormap, AverageTimer, pose_auc, read_image, - rotate_intrinsics, rotate_pose_inplane, - scale_intrinsics, read_image_modified, frame2tensor) - -from models.matching import Matching -from models.matchingsuperglue import Matching_ori -from sjlee.loss import loss_superglue - -torch.set_grad_enabled(True) -torch.multiprocessing.set_sharing_strategy('file_system') - -parser = argparse.ArgumentParser( - description='Image pair matching and pose evaluation with SuperGlue', - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - -parser.add_argument( - '--viz', action='store_true', - help='Visualize the matches and dump the plots') -parser.add_argument( - '--eval', action='store_true', - help='Perform the evaluation' - ' (requires ground truth pose and intrinsics)') - -parser.add_argument( - '--superglue', choices={'indoor', 'outdoor'}, default='indoor', - help='SuperGlue weights') -parser.add_argument( - '--max_keypoints', type=int, default=1023, - help='Maximum number of keypoints detected by Superpoint' - ' (\'-1\' keeps all keypoints)') -parser.add_argument( - '--keypoint_threshold', type=float, default=0.005, - help='SuperPoint keypoint detector confidence threshold') -parser.add_argument( - '--nms_radius', type=int, default=4, - help='SuperPoint Non Maximum Suppression (NMS) radius' - ' (Must be positive)') -parser.add_argument( - '--sinkhorn_iterations', type=int, default=20, - help='Number of Sinkhorn iterations performed by SuperGlue') -parser.add_argument( - '--match_threshold', type=float, default=0.2, - help='SuperGlue match threshold') - -parser.add_argument( - '--resize', type=int, nargs='+', default=[640, 480], - help='Resize the input image before running inference. If two numbers, ' - 'resize to the exact dimensions, if one number, resize the max ' - 'dimension, if -1, do not resize') -parser.add_argument( - '--resize_float', action='store_true', - help='Resize the image after casting uint8 to float') - -parser.add_argument( - '--cache', action='store_true', - help='Skip the pair if output .npz files are already found') -parser.add_argument( - '--show_keypoints', action='store_true', - help='Plot the keypoints in addition to the matches') -parser.add_argument( - '--fast_viz', action='store_true', - help='Use faster image visualization based on OpenCV instead of Matplotlib') -parser.add_argument( - '--viz_extension', type=str, default='png', choices=['png', 'pdf'], - help='Visualization file extension. Use pdf for highest-quality.') - -parser.add_argument( - '--opencv_display', action='store_true', - help='Visualize via OpenCV before saving output images') -parser.add_argument( - '--eval_pairs_list', type=str, default='assets/scannet_sample_pairs_with_gt.txt', - help='Path to the list of image pairs for evaluation') -parser.add_argument( - '--shuffle', action='store_true', - help='Shuffle ordering of pairs before processing') -parser.add_argument( - '--max_length', type=int, default=-1, - help='Maximum number of pairs to evaluate') - -parser.add_argument( - '--eval_input_dir', type=str, default='assets/scannet_sample_images/', - help='Path to the directory that contains the images') -parser.add_argument( - '--eval_output_dir', type=str, default='test_matches', - help='Path to the directory in which the .npz results and optional,' - 'visualizations are written') -parser.add_argument( - '--learning_rate', type=float, default=0.0001, #0.0001 - help='Learning rate') - -parser.add_argument( - '--batch_size', type=int, default=1, - help='batch_size') -parser.add_argument( - '--train_path', type=str, default='/home/cvlab09/projects/seungjun_an/dataset/train2014/', - help='Path to the directory of training imgs.') -parser.add_argument( - '--epoch', type=int, default=1, - help='Number of epoches') - - - - -if __name__ == '__main__': - opt = parser.parse_args() - print(opt) - - # make sure the flags are properly used - assert not (opt.opencv_display and not opt.viz), 'Must use --viz with --opencv_display' - assert not (opt.opencv_display and not opt.fast_viz), 'Cannot use --opencv_display without --fast_viz' - assert not (opt.fast_viz and not opt.viz), 'Must use --viz with --fast_viz' - assert not (opt.fast_viz and opt.viz_extension == 'pdf'), 'Cannot use pdf extension with --fast_viz' - - numOftrainSet = 10 - - # store viz results - eval_output_dir = Path(opt.eval_output_dir) - eval_output_dir.mkdir(exist_ok=True, parents=True) - print('Will write visualization images to', - 'directory \"{}\"'.format(eval_output_dir)) - config = { - 'superpoint': { - 'nms_radius': opt.nms_radius, - 'keypoint_threshold': opt.keypoint_threshold, - 'max_keypoints': opt.max_keypoints - }, - 'superglue': { - 'weights': opt.superglue, - 'sinkhorn_iterations': opt.sinkhorn_iterations, - 'match_threshold': opt.match_threshold, - } - } - matching = Matching(config).eval().to('cuda') - matching = torch.load('/home/cvlab09/projects/seungjun_an/superglue_test/model_epoch_1.pth') - torch.save(matching.state_dict(), 'model_state_dict_epoch_1.pth') \ No newline at end of file diff --git a/models/__init__.py b/models/__init__.py deleted file mode 100755 index e69de29..0000000 diff --git a/models/__pycache__/__init__.cpython-37.pyc b/models/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index 671b557..0000000 Binary files a/models/__pycache__/__init__.cpython-37.pyc and /dev/null differ diff --git a/models/__pycache__/__init__.cpython-38.pyc b/models/__pycache__/__init__.cpython-38.pyc deleted file mode 100644 index 94e80cc..0000000 Binary files a/models/__pycache__/__init__.cpython-38.pyc and /dev/null differ diff --git a/models/__pycache__/__init__.cpython-39.pyc b/models/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 3916c66..0000000 Binary files a/models/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/models/__pycache__/matching.cpython-38.pyc b/models/__pycache__/matching.cpython-38.pyc deleted file mode 100644 index ef8d31d..0000000 Binary files a/models/__pycache__/matching.cpython-38.pyc and /dev/null differ diff --git a/models/__pycache__/matching.cpython-39.pyc b/models/__pycache__/matching.cpython-39.pyc deleted file mode 100644 index f7cdf40..0000000 Binary files a/models/__pycache__/matching.cpython-39.pyc and /dev/null differ diff --git a/models/__pycache__/matchingForTraining.cpython-37.pyc b/models/__pycache__/matchingForTraining.cpython-37.pyc deleted file mode 100644 index 66b217a..0000000 Binary files a/models/__pycache__/matchingForTraining.cpython-37.pyc and /dev/null differ diff --git a/models/__pycache__/matchingForTraining.cpython-38.pyc b/models/__pycache__/matchingForTraining.cpython-38.pyc deleted file mode 100644 index c6f4350..0000000 Binary files a/models/__pycache__/matchingForTraining.cpython-38.pyc and /dev/null differ diff --git a/models/__pycache__/matchingForTraining.cpython-39.pyc b/models/__pycache__/matchingForTraining.cpython-39.pyc deleted file mode 100644 index 606416d..0000000 Binary files a/models/__pycache__/matchingForTraining.cpython-39.pyc and /dev/null differ diff --git a/models/__pycache__/matching_backup.cpython-38.pyc b/models/__pycache__/matching_backup.cpython-38.pyc deleted file mode 100644 index f1eabcb..0000000 Binary files a/models/__pycache__/matching_backup.cpython-38.pyc and /dev/null differ diff --git a/models/__pycache__/matchingsuperglue.cpython-38.pyc b/models/__pycache__/matchingsuperglue.cpython-38.pyc deleted file mode 100644 index feb3e4f..0000000 Binary files a/models/__pycache__/matchingsuperglue.cpython-38.pyc and /dev/null differ diff --git a/models/__pycache__/superglue.cpython-37.pyc b/models/__pycache__/superglue.cpython-37.pyc deleted file mode 100644 index 1bb1420..0000000 Binary files a/models/__pycache__/superglue.cpython-37.pyc and /dev/null differ diff --git a/models/__pycache__/superglue.cpython-38.pyc b/models/__pycache__/superglue.cpython-38.pyc deleted file mode 100644 index 0a3ad9f..0000000 Binary files a/models/__pycache__/superglue.cpython-38.pyc and /dev/null differ diff --git a/models/__pycache__/superglue.cpython-39.pyc b/models/__pycache__/superglue.cpython-39.pyc deleted file mode 100644 index 06813c1..0000000 Binary files a/models/__pycache__/superglue.cpython-39.pyc and /dev/null differ diff --git a/models/__pycache__/superglue2.cpython-38.pyc b/models/__pycache__/superglue2.cpython-38.pyc deleted file mode 100644 index c413c03..0000000 Binary files a/models/__pycache__/superglue2.cpython-38.pyc and /dev/null differ diff --git a/models/__pycache__/superglue2.cpython-39.pyc b/models/__pycache__/superglue2.cpython-39.pyc deleted file mode 100644 index a32f740..0000000 Binary files a/models/__pycache__/superglue2.cpython-39.pyc and /dev/null differ diff --git a/models/__pycache__/superpoint.cpython-37.pyc b/models/__pycache__/superpoint.cpython-37.pyc deleted file mode 100644 index a9890e3..0000000 Binary files a/models/__pycache__/superpoint.cpython-37.pyc and /dev/null differ diff --git a/models/__pycache__/superpoint.cpython-38.pyc b/models/__pycache__/superpoint.cpython-38.pyc deleted file mode 100644 index fb20221..0000000 Binary files a/models/__pycache__/superpoint.cpython-38.pyc and /dev/null differ diff --git a/models/__pycache__/superpoint.cpython-39.pyc b/models/__pycache__/superpoint.cpython-39.pyc deleted file mode 100644 index 8b1a999..0000000 Binary files a/models/__pycache__/superpoint.cpython-39.pyc and /dev/null differ diff --git a/models/__pycache__/utils.cpython-37.pyc b/models/__pycache__/utils.cpython-37.pyc deleted file mode 100644 index 74bd80f..0000000 Binary files a/models/__pycache__/utils.cpython-37.pyc and /dev/null differ diff --git a/models/__pycache__/utils.cpython-38.pyc b/models/__pycache__/utils.cpython-38.pyc deleted file mode 100644 index 3632a97..0000000 Binary files a/models/__pycache__/utils.cpython-38.pyc and /dev/null differ diff --git a/models/__pycache__/utils.cpython-39.pyc b/models/__pycache__/utils.cpython-39.pyc deleted file mode 100644 index b47e899..0000000 Binary files a/models/__pycache__/utils.cpython-39.pyc and /dev/null differ diff --git a/models/matching.py b/models/matching.py deleted file mode 100755 index f53f8ce..0000000 --- a/models/matching.py +++ /dev/null @@ -1,104 +0,0 @@ -# %BANNER_BEGIN% -# --------------------------------------------------------------------- -# %COPYRIGHT_BEGIN% -# -# Magic Leap, Inc. ("COMPANY") CONFIDENTIAL -# -# Unpublished Copyright (c) 2020 -# Magic Leap, Inc., All Rights Reserved. -# -# NOTICE: All information contained herein is, and remains the property -# of COMPANY. The intellectual and technical concepts contained herein -# are proprietary to COMPANY and may be covered by U.S. and Foreign -# Patents, patents in process, and are protected by trade secret or -# copyright law. Dissemination of this information or reproduction of -# this material is strictly forbidden unless prior written permission is -# obtained from COMPANY. Access to the source code contained herein is -# hereby forbidden to anyone except current COMPANY employees, managers -# or contractors who have executed Confidentiality and Non-disclosure -# agreements explicitly covering such access. -# -# The copyright notice above does not evidence any actual or intended -# publication or disclosure of this source code, which includes -# information that is confidential and/or proprietary, and is a trade -# secret, of COMPANY. ANY REPRODUCTION, MODIFICATION, DISTRIBUTION, -# PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH USE OF THIS -# SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS -# STRICTLY PROHIBITED, AND IN VIOLATION OF APPLICABLE LAWS AND -# INTERNATIONAL TREATIES. THE RECEIPT OR POSSESSION OF THIS SOURCE -# CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS -# TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS CONTENTS, OR TO MANUFACTURE, -# USE, OR SELL ANYTHING THAT IT MAY DESCRIBE, IN WHOLE OR IN PART. -# -# %COPYRIGHT_END% -# ---------------------------------------------------------------------- -# %AUTHORS_BEGIN% -# -# Originating Authors: Paul-Edouard Sarlin -# -# %AUTHORS_END% -# --------------------------------------------------------------------*/ -# %BANNER_END% - -import torch - -from .superpoint import SuperPoint -from .superglue2 import SuperGlue -from sjlee.IMC import SimpleSuperCATs - -class Matching(torch.nn.Module): - """ Image Matching Frontend (SuperPoint + SuperGlue) """ - def __init__(self, config={}): - super().__init__() - self.superpoint = SuperPoint(config.get('superpoint', {})) - self.superglue = SuperGlue(config.get('superglue', {})) - self.simsuper = SimpleSuperCATs(config.get('superglue', {})) - - def forward(self, data): - """ Run SuperPoint (optionally) and SuperGlue - SuperPoint is skipped if ['keypoints0', 'keypoints1'] exist in input - Args: - data: dictionary with minimal keys: ['image0', 'image1'] - """ - pred = {} - - # Extract SuperPoint (keypoints, scores, descriptors) if not provided - with torch.no_grad(): - if 'keypoints0' not in data: - pred0 = self.superpoint({'image': data['image0']}) - pred = {**pred, **{k+'0': v for k, v in pred0.items()}} - if 'keypoints1' not in data: - pred1 = self.superpoint({'image': data['image1']}) - pred = {**pred, **{k+'1': v for k, v in pred1.items()}} - - # Batch all features - # We should either have i) one image per batch, or - # ii) the same number of local features for all images in the batch. - data = {**data, **pred} - - for k in data: - if isinstance(data[k], (list, tuple)): - data[k] = torch.stack(data[k]) - data[k].requres_grad = True - - - data['keypoints0'], data['keypoints1'] = data['keypoints0'].unsqueeze(0), data['keypoints1'].unsqueeze(0) - data['scores0'], data['scores1'] = data['scores0'].transpose(0,1), data['scores1'].transpose(0,1) - data['descriptors0'], data['descriptors1'] = data['descriptors0'].transpose(0, 1), data['descriptors1'].transpose(0, 1) - - for k in data: - if k == 'file_name' or k == 'skip_train': - continue - data[k].requres_grad = True - data[k] = data[k] - ##print(data.keys()) - ##print(data['keypoints0'].size()) - ##print(data['scores0'].size()) - ##print(data['descriptors0'].size()) - - # Perform the matching - #pred = {**pred, **self.superglue(data)} - - scores, data2 = self.simsuper(data) - - return scores, data2, data diff --git a/models/matchingsuperglue.py b/models/matchingsuperglue.py deleted file mode 100755 index 06ad7fd..0000000 --- a/models/matchingsuperglue.py +++ /dev/null @@ -1,105 +0,0 @@ -# %BANNER_BEGIN% -# --------------------------------------------------------------------- -# %COPYRIGHT_BEGIN% -# -# Magic Leap, Inc. ("COMPANY") CONFIDENTIAL -# -# Unpublished Copyright (c) 2020 -# Magic Leap, Inc., All Rights Reserved. -# -# NOTICE: All information contained herein is, and remains the property -# of COMPANY. The intellectual and technical concepts contained herein -# are proprietary to COMPANY and may be covered by U.S. and Foreign -# Patents, patents in process, and are protected by trade secret or -# copyright law. Dissemination of this information or reproduction of -# this material is strictly forbidden unless prior written permission is -# obtained from COMPANY. Access to the source code contained herein is -# hereby forbidden to anyone except current COMPANY employees, managers -# or contractors who have executed Confidentiality and Non-disclosure -# agreements explicitly covering such access. -# -# The copyright notice above does not evidence any actual or intended -# publication or disclosure of this source code, which includes -# information that is confidential and/or proprietary, and is a trade -# secret, of COMPANY. ANY REPRODUCTION, MODIFICATION, DISTRIBUTION, -# PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH USE OF THIS -# SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS -# STRICTLY PROHIBITED, AND IN VIOLATION OF APPLICABLE LAWS AND -# INTERNATIONAL TREATIES. THE RECEIPT OR POSSESSION OF THIS SOURCE -# CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS -# TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS CONTENTS, OR TO MANUFACTURE, -# USE, OR SELL ANYTHING THAT IT MAY DESCRIBE, IN WHOLE OR IN PART. -# -# %COPYRIGHT_END% -# ---------------------------------------------------------------------- -# %AUTHORS_BEGIN% -# -# Originating Authors: Paul-Edouard Sarlin -# -# %AUTHORS_END% -# --------------------------------------------------------------------*/ -# %BANNER_END% - -import torch - -from .superpoint import SuperPoint -from .superglue2 import SuperGlue -from sjlee_backup.IMCsuperglue import SimpleSuperCATs - -class Matching_ori(torch.nn.Module): - """ Image Matching Frontend (SuperPoint + SuperGlue) """ - def __init__(self, config={}): - super().__init__() - self.superpoint = SuperPoint(config.get('superpoint', {})) - self.superglue = SuperGlue(config.get('superglue', {})) - self.simsuper = SimpleSuperCATs(config.get('superglue', {})) - - def forward(self, data): - """ Run SuperPoint (optionally) and SuperGlue - SuperPoint is skipped if ['keypoints0', 'keypoints1'] exist in input - Args: - data: dictionary with minimal keys: ['image0', 'image1'] - """ - pred = {} - - # Extract SuperPoint (keypoints, scores, descriptors) if not provided - with torch.no_grad(): - if 'keypoints0' not in data: - pred0 = self.superpoint({'image': data['image0']}) - pred = {**pred, **{k+'0': v for k, v in pred0.items()}} - if 'keypoints1' not in data: - pred1 = self.superpoint({'image': data['image1']}) - pred = {**pred, **{k+'1': v for k, v in pred1.items()}} - - # Batch all features - # We should either have i) one image per batch, or - # ii) the same number of local features for all images in the batch. - data = {**data, **pred} - - for k in data: - if isinstance(data[k], (list, tuple)): - data[k] = torch.stack(data[k]) - data[k].requres_grad = True - - self.superglue(data) - - data['keypoints0'], data['keypoints1'] = data['keypoints0'].unsqueeze(0), data['keypoints1'].unsqueeze(0) - data['scores0'], data['scores1'] = data['scores0'].transpose(0,1), data['scores1'].transpose(0,1) - data['descriptors0'], data['descriptors1'] = data['descriptors0'].transpose(0, 1), data['descriptors1'].transpose(0, 1) - - for k in data: - if k == 'file_name' or k == 'skip_train': - continue - data[k].requres_grad = True - - ##print(data.keys()) - ##print(data['keypoints0'].size()) - ##print(data['scores0'].size()) - ##print(data['descriptors0'].size()) - - # Perform the matching - #pred = {**pred, **self.superglue(data)} - - scores, data2 = self.simsuper(data) - - return scores, data2, data diff --git a/models/superglue2.py b/models/superglue2.py deleted file mode 100644 index 9605a75..0000000 --- a/models/superglue2.py +++ /dev/null @@ -1,290 +0,0 @@ -# %BANNER_BEGIN% -# --------------------------------------------------------------------- -# %COPYRIGHT_BEGIN% -# -# Magic Leap, Inc. ("COMPANY") CONFIDENTIAL -# -# Unpublished Copyright (c) 2020 -# Magic Leap, Inc., All Rights Reserved. -# -# NOTICE: All information contained herein is, and remains the property -# of COMPANY. The intellectual and technical concepts contained herein -# are proprietary to COMPANY and may be covered by U.S. and Foreign -# Patents, patents in process, and are protected by trade secret or -# copyright law. Dissemination of this information or reproduction of -# this material is strictly forbidden unless prior written permission is -# obtained from COMPANY. Access to the source code contained herein is -# hereby forbidden to anyone except current COMPANY employees, managers -# or contractors who have executed Confidentiality and Non-disclosure -# agreements explicitly covering such access. -# -# The copyright notice above does not evidence any actual or intended -# publication or disclosure of this source code, which includes -# information that is confidential and/or proprietary, and is a trade -# secret, of COMPANY. ANY REPRODUCTION, MODIFICATION, DISTRIBUTION, -# PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH USE OF THIS -# SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS -# STRICTLY PROHIBITED, AND IN VIOLATION OF APPLICABLE LAWS AND -# INTERNATIONAL TREATIES. THE RECEIPT OR POSSESSION OF THIS SOURCE -# CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS -# TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS CONTENTS, OR TO MANUFACTURE, -# USE, OR SELL ANYTHING THAT IT MAY DESCRIBE, IN WHOLE OR IN PART. -# -# %COPYRIGHT_END% -# ---------------------------------------------------------------------- -# %AUTHORS_BEGIN% -# -# Originating Authors: Paul-Edouard Sarlin -# -# %AUTHORS_END% -# --------------------------------------------------------------------*/ -# %BANNER_END% - -from copy import deepcopy -from pathlib import Path -from typing import List, Tuple - -import torch -from torch import nn - - -def MLP(channels: List[int], do_bn: bool = True) -> nn.Module: - """ Multi-layer perceptron """ - n = len(channels) - layers = [] - for i in range(1, n): - layers.append( - nn.Conv1d(channels[i - 1], channels[i], kernel_size=1, bias=True)) - if i < (n-1): - if do_bn: - layers.append(nn.BatchNorm1d(channels[i])) - layers.append(nn.ReLU()) - return nn.Sequential(*layers) - - -def normalize_keypoints(kpts, image_shape): - """ Normalize keypoints locations based on image image_shape""" - _, _, height, width = image_shape - one = kpts.new_tensor(1) - size = torch.stack([one*width, one*height])[None] - center = size / 2 - scaling = size.max(1, keepdim=True).values * 0.7 - return (kpts - center[:, None, :]) / scaling[:, None, :] - - -class KeypointEncoder(nn.Module): - """ Joint encoding of visual appearance and location using MLPs""" - def __init__(self, feature_dim: int, layers: List[int]) -> None: - super().__init__() - self.encoder = MLP([3] + layers + [feature_dim]) - nn.init.constant_(self.encoder[-1].bias, 0.0) - - def forward(self, kpts, scores): - inputs = [kpts.transpose(1, 2), scores.unsqueeze(1)] - return self.encoder(torch.cat(inputs, dim=1)) - - -def attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor) -> Tuple[torch.Tensor,torch.Tensor]: - dim = query.shape[1] - scores = torch.einsum('bdhn,bdhm->bhnm', query, key) / dim**.5 - prob = torch.nn.functional.softmax(scores, dim=-1) - return torch.einsum('bhnm,bdhm->bdhn', prob, value), prob - - -class MultiHeadedAttention(nn.Module): - """ Multi-head attention to increase model expressivitiy """ - def __init__(self, num_heads: int, d_model: int): - super().__init__() - assert d_model % num_heads == 0 - self.dim = d_model // num_heads - self.num_heads = num_heads - self.merge = nn.Conv1d(d_model, d_model, kernel_size=1) - self.proj = nn.ModuleList([deepcopy(self.merge) for _ in range(3)]) - - def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor) -> torch.Tensor: - batch_dim = query.size(0) - query, key, value = [l(x).view(batch_dim, self.dim, self.num_heads, -1) - for l, x in zip(self.proj, (query, key, value))] - x, _ = attention(query, key, value) - return self.merge(x.contiguous().view(batch_dim, self.dim*self.num_heads, -1)) - - -class AttentionalPropagation(nn.Module): - def __init__(self, feature_dim: int, num_heads: int): - super().__init__() - self.attn = MultiHeadedAttention(num_heads, feature_dim) - self.mlp = MLP([feature_dim*2, feature_dim*2, feature_dim]) - nn.init.constant_(self.mlp[-1].bias, 0.0) - - def forward(self, x: torch.Tensor, source: torch.Tensor) -> torch.Tensor: - message = self.attn(x, source, source) - return self.mlp(torch.cat([x, message], dim=1)) - - -class AttentionalGNN(nn.Module): - def __init__(self, feature_dim: int, layer_names: List[str]) -> None: - super().__init__() - self.layers = nn.ModuleList([ - AttentionalPropagation(feature_dim, 4) - for _ in range(len(layer_names))]) - self.names = layer_names - - def forward(self, desc0: torch.Tensor, desc1: torch.Tensor) -> Tuple[torch.Tensor,torch.Tensor]: - for layer, name in zip(self.layers, self.names): - if name == 'cross': - src0, src1 = desc1, desc0 - else: # if name == 'self': - src0, src1 = desc0, desc1 - delta0, delta1 = layer(desc0, src0), layer(desc1, src1) - desc0, desc1 = (desc0 + delta0), (desc1 + delta1) - return desc0, desc1 - - -def log_sinkhorn_iterations(Z: torch.Tensor, log_mu: torch.Tensor, log_nu: torch.Tensor, iters: int) -> torch.Tensor: - """ Perform Sinkhorn Normalization in Log-space for stability""" - u, v = torch.zeros_like(log_mu), torch.zeros_like(log_nu) - for _ in range(iters): - u = log_mu - torch.logsumexp(Z + v.unsqueeze(1), dim=2) - v = log_nu - torch.logsumexp(Z + u.unsqueeze(2), dim=1) - return Z + u.unsqueeze(2) + v.unsqueeze(1) - - -def log_optimal_transport(scores: torch.Tensor, alpha: torch.Tensor, iters: int) -> torch.Tensor: - """ Perform Differentiable Optimal Transport in Log-space for stability""" - b, m, n = scores.shape - one = scores.new_tensor(1) - ms, ns = (m*one).to(scores), (n*one).to(scores) - - bins0 = alpha.expand(b, m, 1) - bins1 = alpha.expand(b, 1, n) - alpha = alpha.expand(b, 1, 1) - - couplings = torch.cat([torch.cat([scores, bins0], -1), - torch.cat([bins1, alpha], -1)], 1) - - norm = - (ms + ns).log() - log_mu = torch.cat([norm.expand(m), ns.log()[None] + norm]) - log_nu = torch.cat([norm.expand(n), ms.log()[None] + norm]) - log_mu, log_nu = log_mu[None].expand(b, -1), log_nu[None].expand(b, -1) - - Z = log_sinkhorn_iterations(couplings, log_mu, log_nu, iters) - Z = Z - norm # multiply probabilities by M+N - return Z - - -def arange_like(x, dim: int): - return x.new_ones(x.shape[dim]).cumsum(0) - 1 # traceable in 1.1 - - -class SuperGlue(nn.Module): - """SuperGlue feature matching middle-end - - Given two sets of keypoints and locations, we determine the - correspondences by: - 1. Keypoint Encoding (normalization + visual feature and location fusion) - 2. Graph Neural Network with multiple self and cross-attention layers - 3. Final projection layer - 4. Optimal Transport Layer (a differentiable Hungarian matching algorithm) - 5. Thresholding matrix based on mutual exclusivity and a match_threshold - - The correspondence ids use -1 to indicate non-matching points. - - Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew - Rabinovich. SuperGlue: Learning Feature Matching with Graph Neural - Networks. In CVPR, 2020. https://arxiv.org/abs/1911.11763 - - """ - default_config = { - 'descriptor_dim': 256, - 'weights': 'indoor', - 'keypoint_encoder': [32, 64, 128, 256], - 'GNN_layers': ['self', 'cross'] * 9, - 'sinkhorn_iterations': 100, - 'match_threshold': 0.2, - } - - def __init__(self, config): - super().__init__() - self.config = {**self.default_config, **config} - - self.kenc = KeypointEncoder( - self.config['descriptor_dim'], self.config['keypoint_encoder']) - - self.gnn = AttentionalGNN( - feature_dim=self.config['descriptor_dim'], layer_names=self.config['GNN_layers']) - - self.final_proj = nn.Conv1d( - self.config['descriptor_dim'], self.config['descriptor_dim'], - kernel_size=1, bias=True) - - bin_score = torch.nn.Parameter(torch.tensor(1.)) - self.register_parameter('bin_score', bin_score) - - assert self.config['weights'] in ['indoor', 'outdoor'] - path = Path(__file__).parent - path = path / 'weights/superglue_{}.pth'.format(self.config['weights']) - self.load_state_dict(torch.load(str(path))) - print('Loaded SuperGlue model (\"{}\" weights)'.format( - self.config['weights'])) - - def forward(self, data): - """Run SuperGlue on a pair of keypoints and descriptors""" - desc0, desc1 = data['descriptors0'], data['descriptors1'] - kpts0, kpts1 = data['keypoints0'], data['keypoints1'] - - if kpts0.shape[1] == 0 or kpts1.shape[1] == 0: # no keypoints - shape0, shape1 = kpts0.shape[:-1], kpts1.shape[:-1] - return { - 'matches0': kpts0.new_full(shape0, -1, dtype=torch.int), - 'matches1': kpts1.new_full(shape1, -1, dtype=torch.int), - 'matching_scores0': kpts0.new_zeros(shape0), - 'matching_scores1': kpts1.new_zeros(shape1), - } - - # Keypoint normalization. - kpts0 = normalize_keypoints(kpts0, data['image0'].shape) - kpts1 = normalize_keypoints(kpts1, data['image1'].shape) - - # Keypoint MLP encoder. - desc0 = desc0 + self.kenc(kpts0, data['scores0']) - desc1 = desc1 + self.kenc(kpts1, data['scores1']) - - # Multi-layer Transformer network. - desc0, desc1 = self.gnn(desc0, desc1) - - # Final MLP projection. - mdesc0, mdesc1 = self.final_proj(desc0), self.final_proj(desc1) - - # Compute matching descriptor distance. - scores = torch.einsum('bdn,bdm->bnm', mdesc0, mdesc1) - scores = scores / self.config['descriptor_dim']**.5 - - #print(scores) - #print(scores.max(), scores.min()) - - # Run the optimal transport. - scores = log_optimal_transport( - scores, self.bin_score, - iters=self.config['sinkhorn_iterations']) - - - - # Get the matches with score above "match_threshold". - max0, max1 = scores[:, :-1, :-1].max(2), scores[:, :-1, :-1].max(1) - indices0, indices1 = max0.indices, max1.indices - mutual0 = arange_like(indices0, 1)[None] == indices1.gather(1, indices0) - mutual1 = arange_like(indices1, 1)[None] == indices0.gather(1, indices1) - zero = scores.new_tensor(0) - mscores0 = torch.where(mutual0, max0.values.exp(), zero) - mscores1 = torch.where(mutual1, mscores0.gather(1, indices1), zero) - valid0 = mutual0 & (mscores0 > self.config['match_threshold']) - valid1 = mutual1 & valid0.gather(1, indices1) - indices0 = torch.where(valid0, indices0, indices0.new_tensor(-1)) - indices1 = torch.where(valid1, indices1, indices1.new_tensor(-1)) - - return { - 'matches0': indices0, # use -1 for invalid match - 'matches1': indices1, # use -1 for invalid match - 'matching_scores0': mscores0, - 'matching_scores1': mscores1, - } diff --git a/models/superpoint.py b/models/superpoint.py deleted file mode 100755 index 8e41192..0000000 --- a/models/superpoint.py +++ /dev/null @@ -1,202 +0,0 @@ -# %BANNER_BEGIN% -# --------------------------------------------------------------------- -# %COPYRIGHT_BEGIN% -# -# Magic Leap, Inc. ("COMPANY") CONFIDENTIAL -# -# Unpublished Copyright (c) 2020 -# Magic Leap, Inc., All Rights Reserved. -# -# NOTICE: All information contained herein is, and remains the property -# of COMPANY. The intellectual and technical concepts contained herein -# are proprietary to COMPANY and may be covered by U.S. and Foreign -# Patents, patents in process, and are protected by trade secret or -# copyright law. Dissemination of this information or reproduction of -# this material is strictly forbidden unless prior written permission is -# obtained from COMPANY. Access to the source code contained herein is -# hereby forbidden to anyone except current COMPANY employees, managers -# or contractors who have executed Confidentiality and Non-disclosure -# agreements explicitly covering such access. -# -# The copyright notice above does not evidence any actual or intended -# publication or disclosure of this source code, which includes -# information that is confidential and/or proprietary, and is a trade -# secret, of COMPANY. ANY REPRODUCTION, MODIFICATION, DISTRIBUTION, -# PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH USE OF THIS -# SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS -# STRICTLY PROHIBITED, AND IN VIOLATION OF APPLICABLE LAWS AND -# INTERNATIONAL TREATIES. THE RECEIPT OR POSSESSION OF THIS SOURCE -# CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS -# TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS CONTENTS, OR TO MANUFACTURE, -# USE, OR SELL ANYTHING THAT IT MAY DESCRIBE, IN WHOLE OR IN PART. -# -# %COPYRIGHT_END% -# ---------------------------------------------------------------------- -# %AUTHORS_BEGIN% -# -# Originating Authors: Paul-Edouard Sarlin -# -# %AUTHORS_END% -# --------------------------------------------------------------------*/ -# %BANNER_END% - -from pathlib import Path -import torch -from torch import nn - -def simple_nms(scores, nms_radius: int): - """ Fast Non-maximum suppression to remove nearby points """ - assert(nms_radius >= 0) - - def max_pool(x): - return torch.nn.functional.max_pool2d( - x, kernel_size=nms_radius*2+1, stride=1, padding=nms_radius) - - zeros = torch.zeros_like(scores) - max_mask = scores == max_pool(scores) - for _ in range(2): - supp_mask = max_pool(max_mask.float()) > 0 - supp_scores = torch.where(supp_mask, zeros, scores) - new_max_mask = supp_scores == max_pool(supp_scores) - max_mask = max_mask | (new_max_mask & (~supp_mask)) - return torch.where(max_mask, scores, zeros) - - -def remove_borders(keypoints, scores, border: int, height: int, width: int): - """ Removes keypoints too close to the border """ - mask_h = (keypoints[:, 0] >= border) & (keypoints[:, 0] < (height - border)) - mask_w = (keypoints[:, 1] >= border) & (keypoints[:, 1] < (width - border)) - mask = mask_h & mask_w - return keypoints[mask], scores[mask] - - -def top_k_keypoints(keypoints, scores, k: int): - if k >= len(keypoints): - return keypoints, scores - scores, indices = torch.topk(scores, k, dim=0) - return keypoints[indices], scores - - -def sample_descriptors(keypoints, descriptors, s: int = 8): - """ Interpolate descriptors at keypoint locations """ - b, c, h, w = descriptors.shape - keypoints = keypoints - s / 2 + 0.5 - keypoints /= torch.tensor([(w*s - s/2 - 0.5), (h*s - s/2 - 0.5)], - ).to(keypoints)[None] - keypoints = keypoints*2 - 1 # normalize to (-1, 1) - args = {'align_corners': True} if int(torch.__version__[2]) > 2 else {} - descriptors = torch.nn.functional.grid_sample( - descriptors, keypoints.view(b, 1, -1, 2), mode='bilinear', **args) - descriptors = torch.nn.functional.normalize( - descriptors.reshape(b, c, -1), p=2, dim=1) - return descriptors - - -class SuperPoint(nn.Module): - """SuperPoint Convolutional Detector and Descriptor - - SuperPoint: Self-Supervised Interest Point Detection and - Description. Daniel DeTone, Tomasz Malisiewicz, and Andrew - Rabinovich. In CVPRW, 2019. https://arxiv.org/abs/1712.07629 - - """ - default_config = { - 'descriptor_dim': 256, - 'nms_radius': 4, - 'keypoint_threshold': 0.005, - 'max_keypoints': -1, - 'remove_borders': 4, - } - - def __init__(self, config): - super().__init__() - self.config = {**self.default_config, **config} - - self.relu = nn.ReLU(inplace=True) - self.pool = nn.MaxPool2d(kernel_size=2, stride=2) - c1, c2, c3, c4, c5 = 64, 64, 128, 128, 256 - - self.conv1a = nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1) - self.conv1b = nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1) - self.conv2a = nn.Conv2d(c1, c2, kernel_size=3, stride=1, padding=1) - self.conv2b = nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1) - self.conv3a = nn.Conv2d(c2, c3, kernel_size=3, stride=1, padding=1) - self.conv3b = nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1) - self.conv4a = nn.Conv2d(c3, c4, kernel_size=3, stride=1, padding=1) - self.conv4b = nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1) - - self.convPa = nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1) - self.convPb = nn.Conv2d(c5, 65, kernel_size=1, stride=1, padding=0) - - self.convDa = nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1) - self.convDb = nn.Conv2d( - c5, self.config['descriptor_dim'], - kernel_size=1, stride=1, padding=0) - - path = Path(__file__).parent / 'weights/superpoint_v1.pth' - self.load_state_dict(torch.load(str(path))) - - mk = self.config['max_keypoints'] - if mk == 0 or mk < -1: - raise ValueError('\"max_keypoints\" must be positive or \"-1\"') - - print('Loaded SuperPoint model') - - def forward(self, data): - """ Compute keypoints, scores, descriptors for image """ - # Shared Encoder - x = self.relu(self.conv1a(data['image'])) - x = self.relu(self.conv1b(x)) - x = self.pool(x) - x = self.relu(self.conv2a(x)) - x = self.relu(self.conv2b(x)) - x = self.pool(x) - x = self.relu(self.conv3a(x)) - x = self.relu(self.conv3b(x)) - x = self.pool(x) - x = self.relu(self.conv4a(x)) - x = self.relu(self.conv4b(x)) - - # Compute the dense keypoint scores - cPa = self.relu(self.convPa(x)) - scores = self.convPb(cPa) - scores = torch.nn.functional.softmax(scores, 1)[:, :-1] - b, _, h, w = scores.shape - scores = scores.permute(0, 2, 3, 1).reshape(b, h, w, 8, 8) - scores = scores.permute(0, 1, 3, 2, 4).reshape(b, h*8, w*8) - scores = simple_nms(scores, self.config['nms_radius']) - - # Extract keypoints - keypoints = [ - torch.nonzero(s > self.config['keypoint_threshold']) - for s in scores] - scores = [s[tuple(k.t())] for s, k in zip(scores, keypoints)] - - # Discard keypoints near the image borders - keypoints, scores = list(zip(*[ - remove_borders(k, s, self.config['remove_borders'], h*8, w*8) - for k, s in zip(keypoints, scores)])) - - # Keep the k keypoints with highest score - if self.config['max_keypoints'] >= 0: - keypoints, scores = list(zip(*[ - top_k_keypoints(k, s, self.config['max_keypoints']) - for k, s in zip(keypoints, scores)])) - - # Convert (h, w) to (x, y) - keypoints = [torch.flip(k, [1]).float() for k in keypoints] - - # Compute the dense descriptors - cDa = self.relu(self.convDa(x)) - descriptors = self.convDb(cDa) - descriptors = torch.nn.functional.normalize(descriptors, p=2, dim=1) - - # Extract descriptors - descriptors = [sample_descriptors(k[None], d[None], 8)[0] - for k, d in zip(keypoints, descriptors)] - - return { - 'keypoints': keypoints, - 'scores': scores, - 'descriptors': descriptors, - } diff --git a/models/utils.py b/models/utils.py deleted file mode 100755 index 6b4ec97..0000000 --- a/models/utils.py +++ /dev/null @@ -1,558 +0,0 @@ -# %BANNER_BEGIN% -# --------------------------------------------------------------------- -# %COPYRIGHT_BEGIN% -# -# Magic Leap, Inc. ("COMPANY") CONFIDENTIAL -# -# Unpublished Copyright (c) 2020 -# Magic Leap, Inc., All Rights Reserved. -# -# NOTICE: All information contained herein is, and remains the property -# of COMPANY. The intellectual and technical concepts contained herein -# are proprietary to COMPANY and may be covered by U.S. and Foreign -# Patents, patents in process, and are protected by trade secret or -# copyright law. Dissemination of this information or reproduction of -# this material is strictly forbidden unless prior written permission is -# obtained from COMPANY. Access to the source code contained herein is -# hereby forbidden to anyone except current COMPANY employees, managers -# or contractors who have executed Confidentiality and Non-disclosure -# agreements explicitly covering such access. -# -# The copyright notice above does not evidence any actual or intended -# publication or disclosure of this source code, which includes -# information that is confidential and/or proprietary, and is a trade -# secret, of COMPANY. ANY REPRODUCTION, MODIFICATION, DISTRIBUTION, -# PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH USE OF THIS -# SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS -# STRICTLY PROHIBITED, AND IN VIOLATION OF APPLICABLE LAWS AND -# INTERNATIONAL TREATIES. THE RECEIPT OR POSSESSION OF THIS SOURCE -# CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS -# TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS CONTENTS, OR TO MANUFACTURE, -# USE, OR SELL ANYTHING THAT IT MAY DESCRIBE, IN WHOLE OR IN PART. -# -# %COPYRIGHT_END% -# ---------------------------------------------------------------------- -# %AUTHORS_BEGIN% -# -# Originating Authors: Paul-Edouard Sarlin -# Daniel DeTone -# Tomasz Malisiewicz -# -# %AUTHORS_END% -# --------------------------------------------------------------------*/ -# %BANNER_END% - -from pathlib import Path -import time -from collections import OrderedDict -from threading import Thread -import numpy as np -import cv2 -import torch -import matplotlib.pyplot as plt -import matplotlib -matplotlib.use('Agg') - - -class AverageTimer: - """ Class to help manage printing simple timing of code execution. """ - - def __init__(self, smoothing=0.3, newline=False): - self.smoothing = smoothing - self.newline = newline - self.times = OrderedDict() - self.will_print = OrderedDict() - self.reset() - - def reset(self): - now = time.time() - self.start = now - self.last_time = now - for name in self.will_print: - self.will_print[name] = False - - def update(self, name='default'): - now = time.time() - dt = now - self.last_time - if name in self.times: - dt = self.smoothing * dt + (1 - self.smoothing) * self.times[name] - self.times[name] = dt - self.will_print[name] = True - self.last_time = now - - def print(self, text='Timer'): - total = 0. - print('[{}]'.format(text), end=' ') - for key in self.times: - val = self.times[key] - if self.will_print[key]: - print('%s=%.3f' % (key, val), end=' ') - total += val - print('total=%.3f sec {%.1f FPS}' % (total, 1./total), end=' ') - if self.newline: - print(flush=True) - else: - print(end='\r', flush=True) - self.reset() - - -class VideoStreamer: - """ Class to help process image streams. Four types of possible inputs:" - 1.) USB Webcam. - 2.) An IP camera - 3.) A directory of images (files in directory matching 'image_glob'). - 4.) A video file, such as an .mp4 or .avi file. - """ - def __init__(self, basedir, resize, skip, image_glob, max_length=1000000): - self._ip_grabbed = False - self._ip_running = False - self._ip_camera = False - self._ip_image = None - self._ip_index = 0 - self.cap = [] - self.camera = True - self.video_file = False - self.listing = [] - self.resize = resize - self.interp = cv2.INTER_AREA - self.i = 0 - self.skip = skip - self.max_length = max_length - if isinstance(basedir, int) or basedir.isdigit(): - print('==> Processing USB webcam input: {}'.format(basedir)) - self.cap = cv2.VideoCapture(int(basedir)) - self.listing = range(0, self.max_length) - elif basedir.startswith(('http', 'rtsp')): - print('==> Processing IP camera input: {}'.format(basedir)) - self.cap = cv2.VideoCapture(basedir) - self.start_ip_camera_thread() - self._ip_camera = True - self.listing = range(0, self.max_length) - elif Path(basedir).is_dir(): - print('==> Processing image directory input: {}'.format(basedir)) - self.listing = list(Path(basedir).glob(image_glob[0])) - for j in range(1, len(image_glob)): - image_path = list(Path(basedir).glob(image_glob[j])) - self.listing = self.listing + image_path - self.listing.sort() - self.listing = self.listing[::self.skip] - self.max_length = np.min([self.max_length, len(self.listing)]) - if self.max_length == 0: - raise IOError('No images found (maybe bad \'image_glob\' ?)') - self.listing = self.listing[:self.max_length] - self.camera = False - elif Path(basedir).exists(): - print('==> Processing video input: {}'.format(basedir)) - self.cap = cv2.VideoCapture(basedir) - self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 1) - num_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) - self.listing = range(0, num_frames) - self.listing = self.listing[::self.skip] - self.video_file = True - self.max_length = np.min([self.max_length, len(self.listing)]) - self.listing = self.listing[:self.max_length] - else: - raise ValueError('VideoStreamer input \"{}\" not recognized.'.format(basedir)) - if self.camera and not self.cap.isOpened(): - raise IOError('Could not read camera') - - def load_image(self, impath): - """ Read image as grayscale and resize to img_size. - Inputs - impath: Path to input image. - Returns - grayim: uint8 numpy array sized H x W. - """ - grayim = cv2.imread(impath, 0) - if grayim is None: - raise Exception('Error reading image %s' % impath) - w, h = grayim.shape[1], grayim.shape[0] - w_new, h_new = process_resize(w, h, self.resize) - grayim = cv2.resize( - grayim, (w_new, h_new), interpolation=self.interp) - return grayim - - def next_frame(self): - """ Return the next frame, and increment internal counter. - Returns - image: Next H x W image. - status: True or False depending whether image was loaded. - """ - - if self.i == self.max_length: - return (None, False) - if self.camera: - - if self._ip_camera: - #Wait for first image, making sure we haven't exited - while self._ip_grabbed is False and self._ip_exited is False: - time.sleep(.001) - - ret, image = self._ip_grabbed, self._ip_image.copy() - if ret is False: - self._ip_running = False - else: - ret, image = self.cap.read() - if ret is False: - print('VideoStreamer: Cannot get image from camera') - return (None, False) - w, h = image.shape[1], image.shape[0] - if self.video_file: - self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.listing[self.i]) - - w_new, h_new = process_resize(w, h, self.resize) - image = cv2.resize(image, (w_new, h_new), - interpolation=self.interp) - image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) - else: - image_file = str(self.listing[self.i]) - image = self.load_image(image_file) - self.i = self.i + 1 - return (image, True) - - def start_ip_camera_thread(self): - self._ip_thread = Thread(target=self.update_ip_camera, args=()) - self._ip_running = True - self._ip_thread.start() - self._ip_exited = False - return self - - def update_ip_camera(self): - while self._ip_running: - ret, img = self.cap.read() - if ret is False: - self._ip_running = False - self._ip_exited = True - self._ip_grabbed = False - return - - self._ip_image = img - self._ip_grabbed = ret - self._ip_index += 1 - #print('IPCAMERA THREAD got frame {}'.format(self._ip_index)) - - - def cleanup(self): - self._ip_running = False - -# --- PREPROCESSING --- - -def process_resize(w, h, resize): - assert(len(resize) > 0 and len(resize) <= 2) - if len(resize) == 1 and resize[0] > -1: - scale = resize[0] / max(h, w) - w_new, h_new = int(round(w*scale)), int(round(h*scale)) - elif len(resize) == 1 and resize[0] == -1: - w_new, h_new = w, h - else: # len(resize) == 2: - w_new, h_new = resize[0], resize[1] - - # Issue warning if resolution is too small or too large. - if max(w_new, h_new) < 160: - print('Warning: input resolution is very small, results may vary') - elif max(w_new, h_new) > 2000: - print('Warning: input resolution is very large, results may vary') - - return w_new, h_new - - -def frame2tensor(frame): - return torch.from_numpy(frame/255.).float()[None, None].cuda() - - -def read_image(path, resize, rotation, resize_float): - image = cv2.imread(str(path), cv2.IMREAD_GRAYSCALE) - if image is None: - return None, None, None - w, h = image.shape[1], image.shape[0] - w_new, h_new = process_resize(w, h, resize) - scales = (float(w) / float(w_new), float(h) / float(h_new)) - - if resize_float: - image = cv2.resize(image.astype('float32'), (w_new, h_new)) - else: - image = cv2.resize(image, (w_new, h_new)).astype('float32') - - if rotation != 0: - image = np.rot90(image, k=rotation) - if rotation % 2: - scales = scales[::-1] - - inp = frame2tensor(image) - return image, inp, scales - - - -def read_image_modified(image, resize, resize_float): - if image is None: - return None, None, None - w, h = image.shape[1], image.shape[0] - w_new, h_new = process_resize(w, h, resize) - scales = (float(w) / float(w_new), float(h) / float(h_new)) - if resize_float: - image = cv2.resize(image.astype('float32'), (w_new, h_new)) - else: - image = cv2.resize(image, (w_new, h_new)).astype('float32') - return image -# --- GEOMETRY --- - - -def estimate_pose(kpts0, kpts1, K0, K1, thresh, conf=0.99999): - if len(kpts0) < 5: - return None - - f_mean = np.mean([K0[0, 0], K1[1, 1], K0[0, 0], K1[1, 1]]) - norm_thresh = thresh / f_mean - - kpts0 = (kpts0 - K0[[0, 1], [2, 2]][None]) / K0[[0, 1], [0, 1]][None] - kpts1 = (kpts1 - K1[[0, 1], [2, 2]][None]) / K1[[0, 1], [0, 1]][None] - - E, mask = cv2.findEssentialMat( - kpts0, kpts1, np.eye(3), threshold=norm_thresh, prob=conf, - method=cv2.RANSAC) - - assert E is not None - - best_num_inliers = 0 - ret = None - for _E in np.split(E, len(E) / 3): - n, R, t, mask_new = cv2.recoverPose( - _E, kpts0, kpts1, np.eye(3), 1e9, mask=mask) - if n > best_num_inliers: - best_num_inliers = n - ret = (R, t[:, 0], mask.ravel() > 0) - return ret - - -def rotate_intrinsics(K, image_shape, rot): - """image_shape is the shape of the image after rotation""" - assert rot <= 3 - h, w = image_shape[:2][::-1 if (rot % 2) else 1] - fx, fy, cx, cy = K[0, 0], K[1, 1], K[0, 2], K[1, 2] - rot = rot % 4 - if rot == 1: - return np.array([[fy, 0., cy], - [0., fx, w-1-cx], - [0., 0., 1.]], dtype=K.dtype) - elif rot == 2: - return np.array([[fx, 0., w-1-cx], - [0., fy, h-1-cy], - [0., 0., 1.]], dtype=K.dtype) - else: # if rot == 3: - return np.array([[fy, 0., h-1-cy], - [0., fx, cx], - [0., 0., 1.]], dtype=K.dtype) - - -def rotate_pose_inplane(i_T_w, rot): - rotation_matrices = [ - np.array([[np.cos(r), -np.sin(r), 0., 0.], - [np.sin(r), np.cos(r), 0., 0.], - [0., 0., 1., 0.], - [0., 0., 0., 1.]], dtype=np.float32) - for r in [np.deg2rad(d) for d in (0, 270, 180, 90)] - ] - return np.dot(rotation_matrices[rot], i_T_w) - - -def scale_intrinsics(K, scales): - scales = np.diag([1./scales[0], 1./scales[1], 1.]) - return np.dot(scales, K) - - -def to_homogeneous(points): - return np.concatenate([points, np.ones_like(points[:, :1])], axis=-1) - - -def compute_epipolar_error(kpts0, kpts1, T_0to1, K0, K1): - kpts0 = (kpts0 - K0[[0, 1], [2, 2]][None]) / K0[[0, 1], [0, 1]][None] - kpts1 = (kpts1 - K1[[0, 1], [2, 2]][None]) / K1[[0, 1], [0, 1]][None] - kpts0 = to_homogeneous(kpts0) - kpts1 = to_homogeneous(kpts1) - - t0, t1, t2 = T_0to1[:3, 3] - t_skew = np.array([ - [0, -t2, t1], - [t2, 0, -t0], - [-t1, t0, 0] - ]) - E = t_skew @ T_0to1[:3, :3] - - Ep0 = kpts0 @ E.T # N x 3 - p1Ep0 = np.sum(kpts1 * Ep0, -1) # N - Etp1 = kpts1 @ E # N x 3 - d = p1Ep0**2 * (1.0 / (Ep0[:, 0]**2 + Ep0[:, 1]**2) - + 1.0 / (Etp1[:, 0]**2 + Etp1[:, 1]**2)) - return d - - -def angle_error_mat(R1, R2): - cos = (np.trace(np.dot(R1.T, R2)) - 1) / 2 - cos = np.clip(cos, -1., 1.) # numercial errors can make it out of bounds - return np.rad2deg(np.abs(np.arccos(cos))) - - -def angle_error_vec(v1, v2): - n = np.linalg.norm(v1) * np.linalg.norm(v2) - return np.rad2deg(np.arccos(np.clip(np.dot(v1, v2) / n, -1.0, 1.0))) - - -def compute_pose_error(T_0to1, R, t): - R_gt = T_0to1[:3, :3] - t_gt = T_0to1[:3, 3] - error_t = angle_error_vec(t, t_gt) - error_t = np.minimum(error_t, 180 - error_t) # ambiguity of E estimation - error_R = angle_error_mat(R, R_gt) - return error_t, error_R - - -def pose_auc(errors, thresholds): - sort_idx = np.argsort(errors) - errors = np.array(errors.copy())[sort_idx] - recall = (np.arange(len(errors)) + 1) / len(errors) - errors = np.r_[0., errors] - recall = np.r_[0., recall] - aucs = [] - for t in thresholds: - last_index = np.searchsorted(errors, t) - r = np.r_[recall[:last_index], recall[last_index-1]] - e = np.r_[errors[:last_index], t] - aucs.append(np.trapz(r, x=e)/t) - return aucs - - -# --- VISUALIZATION --- - - -def plot_image_pair(imgs, dpi=100, size=6, pad=.5): - n = len(imgs) - assert n == 2, 'number of images must be two' - figsize = (size*n, size*3/4) if size is not None else None - _, ax = plt.subplots(1, n, figsize=figsize, dpi=dpi) - for i in range(n): - ax[i].imshow(imgs[i], cmap=plt.get_cmap('gray'), vmin=0, vmax=255) - ax[i].get_yaxis().set_ticks([]) - ax[i].get_xaxis().set_ticks([]) - for spine in ax[i].spines.values(): # remove frame - spine.set_visible(False) - plt.tight_layout(pad=pad) - - -def plot_keypoints(kpts0, kpts1, color='w', ps=2): - ax = plt.gcf().axes - ax[0].scatter(kpts0[:, 0], kpts0[:, 1], c=color, s=ps) - ax[1].scatter(kpts1[:, 0], kpts1[:, 1], c=color, s=ps) - - -def plot_matches(kpts0, kpts1, color, lw=1.5, ps=4): - fig = plt.gcf() - ax = fig.axes - fig.canvas.draw() - - transFigure = fig.transFigure.inverted() - fkpts0 = transFigure.transform(ax[0].transData.transform(kpts0)) - fkpts1 = transFigure.transform(ax[1].transData.transform(kpts1)) - - fig.lines = [matplotlib.lines.Line2D( - (fkpts0[i, 0], fkpts1[i, 0]), (fkpts0[i, 1], fkpts1[i, 1]), zorder=1, - transform=fig.transFigure, c=color[i], linewidth=lw) - for i in range(len(kpts0))] - ax[0].scatter(kpts0[:, 0], kpts0[:, 1], c=color, s=ps) - ax[1].scatter(kpts1[:, 0], kpts1[:, 1], c=color, s=ps) - - -def make_matching_plot(image0, image1, kpts0, kpts1, mkpts0, mkpts1, - color, text, path, name0, name1, show_keypoints=False, - fast_viz=False, opencv_display=False, opencv_title='matches'): - - if fast_viz: - make_matching_plot_fast(image0, image1, kpts0, kpts1, mkpts0, mkpts1, - color, text, path, show_keypoints, 10, - opencv_display, opencv_title) - return - - plot_image_pair([image0, image1]) - if show_keypoints: - plot_keypoints(kpts0, kpts1, color='k', ps=4) - plot_keypoints(kpts0, kpts1, color='w', ps=2) - plot_matches(mkpts0, mkpts1, color) - - fig = plt.gcf() - txt_color = 'k' if image0[:100, :150].mean() > 200 else 'w' - fig.text( - 0.01, 0.99, '\n'.join(text), transform=fig.axes[0].transAxes, - fontsize=15, va='top', ha='left', color=txt_color) - - txt_color = 'k' if image0[-100:, :150].mean() > 200 else 'w' - fig.text( - 0.01, 0.01, name0, transform=fig.axes[0].transAxes, - fontsize=5, va='bottom', ha='left', color=txt_color) - - txt_color = 'k' if image1[-100:, :150].mean() > 200 else 'w' - fig.text( - 0.01, 0.01, name1, transform=fig.axes[1].transAxes, - fontsize=5, va='bottom', ha='left', color=txt_color) - - plt.savefig(str(path), bbox_inches='tight', pad_inches=0) - plt.close() - - -def make_matching_plot_fast(image0, image1, kpts0, kpts1, mkpts0, - mkpts1, color, text, path=None, - show_keypoints=False, margin=10, - opencv_display=False, opencv_title=''): - H0, W0 = image0.shape - H1, W1 = image1.shape - H, W = max(H0, H1), W0 + W1 + margin - - out = 255*np.ones((H, W), np.uint8) - out[:H0, :W0] = image0 - out[:H1, W0+margin:] = image1 - out = np.stack([out]*3, -1) - - if show_keypoints: - kpts0, kpts1 = np.round(kpts0).astype(int), np.round(kpts1).astype(int) - white = (255, 255, 255) - black = (0, 0, 0) - for x, y in kpts0: - cv2.circle(out, (x, y), 2, black, -1, lineType=cv2.LINE_AA) - cv2.circle(out, (x, y), 1, white, -1, lineType=cv2.LINE_AA) - for x, y in kpts1: - cv2.circle(out, (x + margin + W0, y), 2, black, -1, - lineType=cv2.LINE_AA) - cv2.circle(out, (x + margin + W0, y), 1, white, -1, - lineType=cv2.LINE_AA) - - mkpts0, mkpts1 = np.round(mkpts0).astype(int), np.round(mkpts1).astype(int) - color = (np.array(color[:, :3])*255).astype(int)[:, ::-1] - for (x0, y0), (x1, y1), c in zip(mkpts0, mkpts1, color): - c = c.tolist() - cv2.line(out, (x0, y0), (x1 + margin + W0, y1), - color=c, thickness=1, lineType=cv2.LINE_AA) - # display line end-points as circles - cv2.circle(out, (x0, y0), 2, c, -1, lineType=cv2.LINE_AA) - cv2.circle(out, (x1 + margin + W0, y1), 2, c, -1, - lineType=cv2.LINE_AA) - - Ht = int(H * 30 / 480) # text height - txt_color_fg = (255, 255, 255) - txt_color_bg = (0, 0, 0) - for i, t in enumerate(text): - cv2.putText(out, t, (10, Ht*(i+1)), cv2.FONT_HERSHEY_DUPLEX, - H*1.0/480, txt_color_bg, 2, cv2.LINE_AA) - cv2.putText(out, t, (10, Ht*(i+1)), cv2.FONT_HERSHEY_DUPLEX, - H*1.0/480, txt_color_fg, 1, cv2.LINE_AA) - - if path is not None: - cv2.imwrite(str(path), out) - - if opencv_display: - cv2.imshow(opencv_title, out) - cv2.waitKey(1) - - return out - - -def error_colormap(x): - return np.clip( - np.stack([2-x*2, x*2, np.zeros_like(x), np.ones_like(x)], -1), 0, 1) diff --git a/sjlee/IMC.py b/sjlee/IMC.py deleted file mode 100644 index d414952..0000000 --- a/sjlee/IMC.py +++ /dev/null @@ -1,208 +0,0 @@ - -import os -import sys - -import torch -import torch.nn as nn -import torch.nn.functional as F - -import numpy as np -from functools import partial - -from pydoc import source_synopsis -from sjlee.superglue2 import SuperGlue, normalize_keypoints, arange_like, log_sinkhorn_iterations, log_optimal_transport - -sys.path.append(os.path.join(os.path.dirname(__file__), 'cats')) -from cats import TransformerAggregator - -def dfs_freeze(model): - for name, child in model.named_children(): - for param in child.parameters(): - param.requires_grad = False - - dfs_freeze(child) - -def softmax_with_temperature(x, beta=2, d = 1): - r'''SFNet: Learning Object-aware Semantic Flow (Lee et al.)''' - M, _ = x.max(dim=d, keepdim=True) - x = x - M # subtract maximum value for stability - exp_x = torch.exp(x/beta) - exp_x_sum = exp_x.sum(dim=d, keepdim=True) - return exp_x / exp_x_sum - -def single_optimal(scores: torch.Tensor, alpha: torch.Tensor, iters: int) -> torch.Tensor: - """ Perform Differentiable Optimal Transport in Log-space for stability""" - b, m, n = scores.shape - one = scores.new_tensor(1) - ms, ns = (m*one).to(scores), (n*one).to(scores) - - norm = - (ms + ns).log() - log_mu = norm.expand(m) - log_nu = norm.expand(n) - log_mu, log_nu = log_mu[None].expand(b, -1), log_nu[None].expand(b, -1) - - Z = log_sinkhorn_iterations(scores, log_mu, log_nu, iters) - Z = Z - norm # multiply probabilities by M+N - return Z - -# positional embedding 필요한가? -# M * N 크기가 다 다른 문제 -class SimpleSuperCATs(SuperGlue): - def __init__(self, - config, - feature_size=32, - feature_proj_dim=128, - depth=4, - num_heads=4, - mlp_ratio=4, - ): - super().__init__(config) - - # freeze superglue's layers - dfs_freeze(self.kenc) - dfs_freeze(self.gnn) - dfs_freeze(self.final_proj) - - self.feature_size = feature_size - self.feature_proj_dim = feature_proj_dim - self.decoder_embed_dim = self.feature_size ** 2 - - self.decoder = TransformerAggregator( - img_size=self.feature_size, embed_dim=self.decoder_embed_dim, depth=depth, num_heads=num_heads, - mlp_ratio=mlp_ratio, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), - num_hyperpixel=1 - ) - - self.num_heads = num_heads - self.mask = None - - def forward(self, data): - """Run SuperGlue on a pair of keypoints and descriptors""" - desc0, desc1 = data['descriptors0'], data['descriptors1'] - kpts0, kpts1 = data['keypoints0'], data['keypoints1'] - - desc0 = desc0.transpose(0,1) - desc1 = desc1.transpose(0,1) - kpts0 = torch.reshape(kpts0, (1, -1, 2)) - kpts1 = torch.reshape(kpts1, (1, -1, 2)) - - if kpts0.shape[1] == 0 or kpts1.shape[1] == 0: # no keypoints - shape0, shape1 = kpts0.shape[:-1], kpts1.shape[:-1] - return [],{ - 'matches0': kpts0.new_full(shape0, -1, dtype=torch.int)[0], - 'matches1': kpts1.new_full(shape1, -1, dtype=torch.int)[0], - 'matching_scores0': kpts0.new_zeros(shape0)[0], - 'matching_scores1': kpts1.new_zeros(shape1)[0], - 'skip_train': True - } - - # Keypoint normalization. - kpts0 = normalize_keypoints(kpts0, data['image0'].shape) - kpts1 = normalize_keypoints(kpts1, data['image1'].shape) - - # Keypoint MLP encoder. - desc0 = desc0 + self.kenc(kpts0, torch.transpose(data['scores0'], 0, 1)) - desc1 = desc1 + self.kenc(kpts1, torch.transpose(data['scores1'], 0, 1)) - - # Multi-layer Transformer network. - desc0, desc1 = self.gnn(desc0, desc1) - - # Final MLP projection. - mdesc0, mdesc1 = self.final_proj(desc0), self.final_proj(desc1) - - # Compute matching descriptor distance. - scores = torch.einsum('bdn,bdm->bnm', mdesc0, mdesc1) - scores = scores / self.config['descriptor_dim']**.5 - - b, m, n = scores.shape - max_keypoints = self.feature_size ** 2 - if m + n < max_keypoints *2: - p2d = (0, max_keypoints-n, 0, max_keypoints-m) - pad = scores.min().item() - scores = F.pad(scores, p2d, 'constant', pad).type(scores.dtype) - self.mask = (scores == pad).expand(1, self.num_heads, max_keypoints, max_keypoints) - - scores = self.decoder(scores[:, None, :, :], self.mask) - scores = scores[:, :m, :n] - scores = log_optimal_transport( - scores, self.bin_score, - iters=1) - - # Get the matches with score above "match_threshold". - max0, max1 = scores[:, :-1, :-1].max(2), scores[:, :-1, :-1].max(1) - indices0, indices1 = max0.indices, max1.indices - mutual0 = arange_like(indices0, 1)[None] == indices1.gather(1, indices0) - mutual1 = arange_like(indices1 , 1)[None] == indices0.gather(1, indices1) - zero = scores.new_tensor(0) - mscores0 = torch.where(mutual0, max0.values.exp(), zero) - mscores1 = torch.where(mutual1, mscores0.gather(1, indices1), zero) - valid0 = mutual0 & (mscores0 > self.config['match_threshold']) - valid1 = mutual1 & valid0.gather(1, indices1) - indices0 = torch.where(valid0, indices0, indices0.new_tensor(-1)) - indices1 = torch.where(valid1, indices1, indices1.new_tensor(-1)) - - return scores, { - 'matches0': indices0[0], # use -1 for invalid match - 'matches1': indices1[0], # use -1 for invalid match - 'matching_scores0': mscores0[0], - 'matching_scores1': mscores1[0], - 'skip_train': False - } - - -if __name__ == '__main__': - from superpoint import SuperPoint - - config = { - 'superpoint': { - 'nms_radius': 4, - 'keypoint_threshold': 0.005, - 'max_keypoints': 1024 - }, - 'superglue': { - 'weights': 'outdoor', - 'sinkhorn_iterations': 20, - 'match_threshold':0.2 - } - } - - """ - data = { - 'image0': torch.randn(1, 1, 512, 512), - 'image1': torch.randn(1, 1, 512, 512) - } - - superpoint = SuperPoint(config.get('superpoint', {})) - - output1 = superpoint({'image': data['image0']}) - output2 = superpoint({'image': data['image1']}) - - pred = {} - - pred = {**pred, **{k+'0': v for k, v in output1.items()}} - pred = {**pred, **{k+'1': v for k, v in output2.items()}} - - data = {**data, **pred} - - for k in data: - if isinstance(data[k], (list, tuple)): - data[k] = torch.stack(data[k]) - """ - - pred = { - 'keypoints0' : torch.randn(1, 1, 484, 2), - 'keypoints1' : torch.randn(1, 1, 484, 2), - 'descriptors0' : torch.randn(256, 1, 484), - 'descriptors1' : torch.randn(256, 1, 484), - 'scores0' : torch.randn(484, 1), - 'scores1' : torch.randn(484, 1), - 'image0' : torch.randn(1, 1, 512, 512), - 'image1' : torch.randn(1, 1, 512, 512), - # 'all_matches' : torch.randn(2, 1, 1248) - } - - superglue = SimpleSuperCATs(config.get('superglue', {})) - scores, output = superglue(pred) - - # loss = loss_superglue(scores, pred['all_matches'].permute(1, 2, 0)) - # print(loss) \ No newline at end of file diff --git a/sjlee/__pycache__/IMC.cpython-38.pyc b/sjlee/__pycache__/IMC.cpython-38.pyc deleted file mode 100644 index 0c0fbaf..0000000 Binary files a/sjlee/__pycache__/IMC.cpython-38.pyc and /dev/null differ diff --git a/sjlee/__pycache__/IMC.cpython-39.pyc b/sjlee/__pycache__/IMC.cpython-39.pyc deleted file mode 100644 index 98af12e..0000000 Binary files a/sjlee/__pycache__/IMC.cpython-39.pyc and /dev/null differ diff --git a/sjlee/__pycache__/loss.cpython-38.pyc b/sjlee/__pycache__/loss.cpython-38.pyc deleted file mode 100644 index 9a08dc8..0000000 Binary files a/sjlee/__pycache__/loss.cpython-38.pyc and /dev/null differ diff --git a/sjlee/__pycache__/loss.cpython-39.pyc b/sjlee/__pycache__/loss.cpython-39.pyc deleted file mode 100644 index 2a65989..0000000 Binary files a/sjlee/__pycache__/loss.cpython-39.pyc and /dev/null differ diff --git a/sjlee/__pycache__/superglue.cpython-38.pyc b/sjlee/__pycache__/superglue.cpython-38.pyc deleted file mode 100644 index 3acd4d8..0000000 Binary files a/sjlee/__pycache__/superglue.cpython-38.pyc and /dev/null differ diff --git a/sjlee/__pycache__/superglue2.cpython-38.pyc b/sjlee/__pycache__/superglue2.cpython-38.pyc deleted file mode 100644 index 79679e0..0000000 Binary files a/sjlee/__pycache__/superglue2.cpython-38.pyc and /dev/null differ diff --git a/sjlee/__pycache__/superglue2.cpython-39.pyc b/sjlee/__pycache__/superglue2.cpython-39.pyc deleted file mode 100644 index dc8a91a..0000000 Binary files a/sjlee/__pycache__/superglue2.cpython-39.pyc and /dev/null differ diff --git a/sjlee/__pycache__/superpoint.cpython-38.pyc b/sjlee/__pycache__/superpoint.cpython-38.pyc deleted file mode 100644 index 262ba3e..0000000 Binary files a/sjlee/__pycache__/superpoint.cpython-38.pyc and /dev/null differ diff --git a/sjlee/cats/__pycache__/cats.cpython-38.pyc b/sjlee/cats/__pycache__/cats.cpython-38.pyc deleted file mode 100644 index 3754171..0000000 Binary files a/sjlee/cats/__pycache__/cats.cpython-38.pyc and /dev/null differ diff --git a/sjlee/cats/__pycache__/cats.cpython-39.pyc b/sjlee/cats/__pycache__/cats.cpython-39.pyc deleted file mode 100644 index aeb5fba..0000000 Binary files a/sjlee/cats/__pycache__/cats.cpython-39.pyc and /dev/null differ diff --git a/sjlee/cats/__pycache__/mod.cpython-38.pyc b/sjlee/cats/__pycache__/mod.cpython-38.pyc deleted file mode 100644 index 99fe958..0000000 Binary files a/sjlee/cats/__pycache__/mod.cpython-38.pyc and /dev/null differ diff --git a/sjlee/cats/__pycache__/mod.cpython-39.pyc b/sjlee/cats/__pycache__/mod.cpython-39.pyc deleted file mode 100644 index eb7be3e..0000000 Binary files a/sjlee/cats/__pycache__/mod.cpython-39.pyc and /dev/null differ diff --git a/sjlee/cats/cats.py b/sjlee/cats/cats.py deleted file mode 100644 index ec9e200..0000000 --- a/sjlee/cats/cats.py +++ /dev/null @@ -1,408 +0,0 @@ -import os -import sys -from operator import add -from functools import reduce, partial - -import torch -import torch.nn as nn -import torch.nn.functional as F -import numpy as np - -import torchvision.models as models - -from feature_backbones import resnet -from mod import FeatureL2Norm, unnormalise_and_convert_mapping_to_flow - -''' -Modified timm library Vision Transformer implementation -https://github.com/rwightman/pytorch-image-models -''' - -# ================= timm functions START ================= # - -import math -import warnings - -def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True): - """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). - This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, - the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... - See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for - changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use - 'survival rate' as the argument. - """ - if drop_prob == 0. or not training: - return x - keep_prob = 1 - drop_prob - shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets - random_tensor = x.new_empty(shape).bernoulli_(keep_prob) - if keep_prob > 0.0 and scale_by_keep: - random_tensor.div_(keep_prob) - return x * random_tensor - -def _no_grad_trunc_normal_(tensor, mean, std, a, b): - # Cut & paste from PyTorch official master until it's in a few official releases - RW - # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf - def norm_cdf(x): - # Computes standard normal cumulative distribution function - return (1. + math.erf(x / math.sqrt(2.))) / 2. - - if (mean < a - 2 * std) or (mean > b + 2 * std): - warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " - "The distribution of values may be incorrect.", - stacklevel=2) - - with torch.no_grad(): - # Values are generated by using a truncated uniform distribution and - # then using the inverse CDF for the normal distribution. - # Get upper and lower cdf values - l = norm_cdf((a - mean) / std) - u = norm_cdf((b - mean) / std) - - # Uniformly fill tensor with values from [l, u], then translate to - # [2l-1, 2u-1]. - tensor.uniform_(2 * l - 1, 2 * u - 1) - - # Use inverse cdf transform for normal distribution to get truncated - # standard normal - tensor.erfinv_() - - # Transform to proper mean, std - tensor.mul_(std * math.sqrt(2.)) - tensor.add_(mean) - - # Clamp to ensure it's in the proper range - tensor.clamp_(min=a, max=b) - return tensor - - -def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): - # type: (Tensor, float, float, float, float) -> Tensor - r"""Fills the input Tensor with values drawn from a truncated - normal distribution. The values are effectively drawn from the - normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` - with values outside :math:`[a, b]` redrawn until they are within - the bounds. The method used for generating the random values works - best when :math:`a \leq \text{mean} \leq b`. - Args: - tensor: an n-dimensional `torch.Tensor` - mean: the mean of the normal distribution - std: the standard deviation of the normal distribution - a: the minimum cutoff value - b: the maximum cutoff value - Examples: - >>> w = torch.empty(3, 5) - >>> nn.init.trunc_normal_(w) - """ - return _no_grad_trunc_normal_(tensor, mean, std, a, b) - -# ================= timm functions END================= # - - - - -class Mlp(nn.Module): - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - -class Attention(nn.Module): - def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): - super().__init__() - self.num_heads = num_heads - head_dim = dim // num_heads - # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights - self.scale = qk_scale or head_dim ** -0.5 - - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - - def forward(self, x, mask=None): - B, N, C = x.shape - qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) - q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) - - attn = (q @ k.transpose(-2, -1)) * self.scale - if mask is not None: - attn[mask] = -1e-9 - - attn = attn.softmax(dim=-1) - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(B, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - - -class MultiscaleBlock(nn.Module): - - def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., - drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): - super().__init__() - self.attn = Attention( - dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) - self.attn_multiscale = Attention( - dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) - # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - self.norm1 = norm_layer(dim) - self.norm2 = norm_layer(dim) - self.norm3 = norm_layer(dim) - self.norm4 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - self.mlp2 = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - def forward(self, inputs): - ''' - Multi-level aggregation - ''' - x, mask = inputs - B, N, H, W = x.shape - if N == 1: - x = x.flatten(0, 1) - x = self.norm1(x) - x = x + self.drop_path(self.attn(self.norm1(x), mask=mask)) - x = x + self.drop_path(self.mlp(self.norm2(x))) - return x.view(B, N, H, W), mask - x = x.flatten(0, 1) - x = x + self.drop_path(self.attn(self.norm1(x))) - x = x + self.drop_path(self.mlp2(self.norm4(x))) - x = x.view(B, N, H, W).transpose(1, 2).flatten(0, 1) - x = x + self.drop_path(self.attn_multiscale(self.norm3(x))) - x = x.view(B, H, N, W).transpose(1, 2).flatten(0, 1) - x = x + self.drop_path(self.mlp(self.norm2(x))) - x = x.view(B, N, H, W) - return x - - -class TransformerAggregator(nn.Module): - def __init__(self, num_hyperpixel, img_size=224, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, - drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None): - super().__init__() - self.img_size = img_size - self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models - norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) - - self.pos_embed_x = nn.Parameter(torch.zeros(1, num_hyperpixel, 1, img_size, embed_dim // 2)) - self.pos_embed_y = nn.Parameter(torch.zeros(1, num_hyperpixel, img_size, 1, embed_dim // 2)) - self.pos_drop = nn.Dropout(p=drop_rate) - - dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule - self.blocks = nn.Sequential(*[ - MultiscaleBlock( - dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer) - for i in range(depth)]) - - self.proj = nn.Linear(embed_dim, img_size ** 2) - self.norm = norm_layer(embed_dim) - - trunc_normal_(self.pos_embed_x, std=.02) - trunc_normal_(self.pos_embed_y, std=.02) - self.apply(self._init_weights) - - def _init_weights(self, m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - def forward(self, corr, mask=None): - B = corr.shape[0] - x = corr.clone() - - pos_embed = torch.cat((self.pos_embed_x.repeat(1, 1, self.img_size, 1, 1), self.pos_embed_y.repeat(1, 1, 1, self.img_size, 1)), dim=4) - pos_embed = pos_embed.flatten(2, 3) - - x = x.transpose(-1, -2) + pos_embed - x = self.proj(self.blocks((x, mask.transpose(-1, -2)))[0]).transpose(-1, -2) + corr # swapping the axis for swapping self-attention. - - x = x + pos_embed - x = self.proj(self.blocks((x, mask))[0]) + corr - - return x.mean(1) - - -class FeatureExtractionHyperPixel(nn.Module): - def __init__(self, hyperpixel_ids, feature_size, freeze=True): - super().__init__() - self.backbone = resnet.resnet101(pretrained=True) - self.feature_size = feature_size - if freeze: - for param in self.backbone.parameters(): - param.requires_grad = False - nbottlenecks = [3, 4, 23, 3] - self.bottleneck_ids = reduce(add, list(map(lambda x: list(range(x)), nbottlenecks))) - self.layer_ids = reduce(add, [[i + 1] * x for i, x in enumerate(nbottlenecks)]) - self.hyperpixel_ids = hyperpixel_ids - - - def forward(self, img): - r"""Extract desired a list of intermediate features""" - - feats = [] - - # Layer 0 - feat = self.backbone.conv1.forward(img) - feat = self.backbone.bn1.forward(feat) - feat = self.backbone.relu.forward(feat) - feat = self.backbone.maxpool.forward(feat) - if 0 in self.hyperpixel_ids: - feats.append(feat.clone()) - - # Layer 1-4 - for hid, (bid, lid) in enumerate(zip(self.bottleneck_ids, self.layer_ids)): - res = feat - feat = self.backbone.__getattr__('layer%d' % lid)[bid].conv1.forward(feat) - feat = self.backbone.__getattr__('layer%d' % lid)[bid].bn1.forward(feat) - feat = self.backbone.__getattr__('layer%d' % lid)[bid].relu.forward(feat) - feat = self.backbone.__getattr__('layer%d' % lid)[bid].conv2.forward(feat) - feat = self.backbone.__getattr__('layer%d' % lid)[bid].bn2.forward(feat) - feat = self.backbone.__getattr__('layer%d' % lid)[bid].relu.forward(feat) - feat = self.backbone.__getattr__('layer%d' % lid)[bid].conv3.forward(feat) - feat = self.backbone.__getattr__('layer%d' % lid)[bid].bn3.forward(feat) - - if bid == 0: - res = self.backbone.__getattr__('layer%d' % lid)[bid].downsample.forward(res) - - feat += res - - if hid + 1 in self.hyperpixel_ids: - feats.append(feat.clone()) - #if hid + 1 == max(self.hyperpixel_ids): - # break - feat = self.backbone.__getattr__('layer%d' % lid)[bid].relu.forward(feat) - - # Up-sample & concatenate features to construct a hyperimage - - """ - for idx, feat in enumerate(feats): - feats[idx] = F.interpolate(feat, self.feature_size, None, 'bilinear', True) - """ - - return feats - - -class CATs(nn.Module): - def __init__(self, - feature_size=16, - feature_proj_dim=128, - depth=4, - num_heads=6, - mlp_ratio=4, - hyperpixel_ids=[0,8,20,21,26,28,29,30], - freeze=True): - super().__init__() - self.feature_size = feature_size - self.feature_proj_dim = feature_proj_dim - self.decoder_embed_dim = self.feature_size ** 2 + self.feature_proj_dim - - channels = [64] + [256] * 3 + [512] * 4 + [1024] * 23 + [2048] * 3 - - self.feature_extraction = FeatureExtractionHyperPixel(hyperpixel_ids, feature_size, freeze) - self.proj = nn.ModuleList([ - nn.Linear(channels[i], self.feature_proj_dim) for i in hyperpixel_ids - ]) - - self.decoder = TransformerAggregator( - img_size=self.feature_size, embed_dim=self.decoder_embed_dim, depth=depth, num_heads=num_heads, - mlp_ratio=mlp_ratio, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), - num_hyperpixel=len(hyperpixel_ids)) - - self.l2norm = FeatureL2Norm() - - self.x_normal = np.linspace(-1,1,self.feature_size) - self.x_normal = nn.Parameter(torch.tensor(self.x_normal, dtype=torch.float, requires_grad=False)) - self.y_normal = np.linspace(-1,1,self.feature_size) - self.y_normal = nn.Parameter(torch.tensor(self.y_normal, dtype=torch.float, requires_grad=False)) - - def softmax_with_temperature(self, x, beta, d = 1): - r'''SFNet: Learning Object-aware Semantic Flow (Lee et al.)''' - M, _ = x.max(dim=d, keepdim=True) - x = x - M # subtract maximum value for stability - exp_x = torch.exp(x/beta) - exp_x_sum = exp_x.sum(dim=d, keepdim=True) - return exp_x / exp_x_sum - - def soft_argmax(self, corr, beta=0.02): - r'''SFNet: Learning Object-aware Semantic Flow (Lee et al.)''' - b,_,h,w = corr.size() - - corr = self.softmax_with_temperature(corr, beta=beta, d=1) - corr = corr.view(-1,h,w,h,w) # (target hxw) x (source hxw) - - grid_x = corr.sum(dim=1, keepdim=False) # marginalize to x-coord. - x_normal = self.x_normal.expand(b,w) - x_normal = x_normal.view(b,w,1,1) - grid_x = (grid_x*x_normal).sum(dim=1, keepdim=True) # b x 1 x h x w - - grid_y = corr.sum(dim=2, keepdim=False) # marginalize to y-coord. - y_normal = self.y_normal.expand(b,h) - y_normal = y_normal.view(b,h,1,1) - grid_y = (grid_y*y_normal).sum(dim=1, keepdim=True) # b x 1 x h x w - return grid_x, grid_y - - def mutual_nn_filter(self, correlation_matrix): - r"""Mutual nearest neighbor filtering (Rocco et al. NeurIPS'18)""" - corr_src_max = torch.max(correlation_matrix, dim=3, keepdim=True)[0] - corr_trg_max = torch.max(correlation_matrix, dim=2, keepdim=True)[0] - corr_src_max[corr_src_max == 0] += 1e-30 - corr_trg_max[corr_trg_max == 0] += 1e-30 - - corr_src = correlation_matrix / corr_src_max - corr_trg = correlation_matrix / corr_trg_max - - return correlation_matrix * (corr_src * corr_trg) - - def corr(self, src, trg): - return src.flatten(2).transpose(-1, -2) @ trg.flatten(2) - - def forward(self, target, source): - B, _, H, W = target.size() - - src_feats = self.feature_extraction(source) - tgt_feats = self.feature_extraction(target) - - corrs = [] - src_feats_proj = [] - tgt_feats_proj = [] - for i, (src, tgt) in enumerate(zip(src_feats, tgt_feats)): - corr = self.corr(self.l2norm(src), self.l2norm(tgt)) - corrs.append(corr) - src_feats_proj.append(self.proj[i](src.flatten(2).transpose(-1, -2))) - tgt_feats_proj.append(self.proj[i](tgt.flatten(2).transpose(-1, -2))) - - src_feats = torch.stack(src_feats_proj, dim=1) - tgt_feats = torch.stack(tgt_feats_proj, dim=1) - corr = torch.stack(corrs, dim=1) - - corr = self.mutual_nn_filter(corr) - - refined_corr = self.decoder(corr, src_feats, tgt_feats) - - grid_x, grid_y = self.soft_argmax(refined_corr.view(B, -1, self.feature_size, self.feature_size)) - - flow = torch.cat((grid_x, grid_y), dim=1) - flow = unnormalise_and_convert_mapping_to_flow(flow) - - return flow diff --git a/sjlee/cats/feature_backbones/__pycache__/resnet.cpython-38.pyc b/sjlee/cats/feature_backbones/__pycache__/resnet.cpython-38.pyc deleted file mode 100644 index b023d5f..0000000 Binary files a/sjlee/cats/feature_backbones/__pycache__/resnet.cpython-38.pyc and /dev/null differ diff --git a/sjlee/cats/feature_backbones/__pycache__/resnet.cpython-39.pyc b/sjlee/cats/feature_backbones/__pycache__/resnet.cpython-39.pyc deleted file mode 100644 index 26d7638..0000000 Binary files a/sjlee/cats/feature_backbones/__pycache__/resnet.cpython-39.pyc and /dev/null differ diff --git a/sjlee/cats/feature_backbones/resnet.py b/sjlee/cats/feature_backbones/resnet.py deleted file mode 100644 index 2c94e68..0000000 --- a/sjlee/cats/feature_backbones/resnet.py +++ /dev/null @@ -1,342 +0,0 @@ -import torch -import torch.nn as nn -#from .utils import load_state_dict_from_url -try: - from torch.hub import load_state_dict_from_url -except ImportError: - from torch.utils.model_zoo import load_url as load_state_dict_from_url - - -__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', - 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', - 'wide_resnet50_2', 'wide_resnet101_2'] - - -model_urls = { - 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', - 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', - 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', - 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', - 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', - 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', - 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', - 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth', - 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth', -} - - -def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): - """3x3 convolution with padding""" - return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, - padding=dilation, groups=groups, bias=False, dilation=dilation) - - -def conv1x1(in_planes, out_planes, stride=1): - """1x1 convolution""" - return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) - - -class BasicBlock(nn.Module): - expansion = 1 - __constants__ = ['downsample'] - - def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, - base_width=64, dilation=1, norm_layer=None): - super(BasicBlock, self).__init__() - if norm_layer is None: - norm_layer = nn.BatchNorm2d - if groups != 1 or base_width != 64: - raise ValueError('BasicBlock only supports groups=1 and base_width=64') - if dilation > 1: - raise NotImplementedError("Dilation > 1 not supported in BasicBlock") - # Both self.conv1 and self.downsample layers downsample the input when stride != 1 - self.conv1 = conv3x3(inplanes, planes, stride) - self.bn1 = norm_layer(planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = conv3x3(planes, planes) - self.bn2 = norm_layer(planes) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - identity = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - out = self.relu(out) - - return out - - -class Bottleneck(nn.Module): - expansion = 4 - __constants__ = ['downsample'] - - def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, - base_width=64, dilation=1, norm_layer=None): - super(Bottleneck, self).__init__() - if norm_layer is None: - norm_layer = nn.BatchNorm2d - width = int(planes * (base_width / 64.)) * groups - # Both self.conv2 and self.downsample layers downsample the input when stride != 1 - self.conv1 = conv1x1(inplanes, width) - self.bn1 = norm_layer(width) - self.conv2 = conv3x3(width, width, stride, groups, dilation) - self.bn2 = norm_layer(width) - self.conv3 = conv1x1(width, planes * self.expansion) - self.bn3 = norm_layer(planes * self.expansion) - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - identity = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - out = self.relu(out) - - return out - - -class ResNet(nn.Module): - - def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, - groups=1, width_per_group=64, replace_stride_with_dilation=None, - norm_layer=None): - super(ResNet, self).__init__() - if norm_layer is None: - norm_layer = nn.BatchNorm2d - self._norm_layer = norm_layer - - self.inplanes = 64 - self.dilation = 1 - if replace_stride_with_dilation is None: - # each element in the tuple indicates if we should replace - # the 2x2 stride with a dilated convolution instead - replace_stride_with_dilation = [False, False, False] - if len(replace_stride_with_dilation) != 3: - raise ValueError("replace_stride_with_dilation should be None " - "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) - self.groups = groups - self.base_width = width_per_group - self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, - bias=False) - self.bn1 = norm_layer(self.inplanes) - self.relu = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - self.layer1 = self._make_layer(block, 64, layers[0]) - self.layer2 = self._make_layer(block, 128, layers[1], stride=2, - dilate=replace_stride_with_dilation[0]) - self.layer3 = self._make_layer(block, 256, layers[2], stride=2, - dilate=replace_stride_with_dilation[1]) - self.layer4 = self._make_layer(block, 512, layers[3], stride=2, - dilate=replace_stride_with_dilation[2]) - self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) - self.fc = nn.Linear(512 * block.expansion, num_classes) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - # Zero-initialize the last BN in each residual branch, - # so that the residual branch starts with zeros, and each residual block behaves like an identity. - # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 - if zero_init_residual: - for m in self.modules(): - if isinstance(m, Bottleneck): - nn.init.constant_(m.bn3.weight, 0) - elif isinstance(m, BasicBlock): - nn.init.constant_(m.bn2.weight, 0) - - def _make_layer(self, block, planes, blocks, stride=1, dilate=False): - norm_layer = self._norm_layer - downsample = None - previous_dilation = self.dilation - if dilate: - self.dilation *= stride - stride = 1 - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - conv1x1(self.inplanes, planes * block.expansion, stride), - norm_layer(planes * block.expansion), - ) - - layers = [] - layers.append(block(self.inplanes, planes, stride, downsample, self.groups, - self.base_width, previous_dilation, norm_layer)) - self.inplanes = planes * block.expansion - for _ in range(1, blocks): - layers.append(block(self.inplanes, planes, groups=self.groups, - base_width=self.base_width, dilation=self.dilation, - norm_layer=norm_layer)) - - return nn.Sequential(*layers) - - def _forward(self, x): - x = self.conv1(x) - print(x.shape) - x = self.bn1(x) - x = self.relu(x) - x = self.maxpool(x) - - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - - x = self.avgpool(x) - x = torch.flatten(x, 1) - x = self.fc(x) - - return x - - # Allow for accessing forward method in a inherited class - forward = _forward - - -def _resnet(arch, block, layers, pretrained, progress, **kwargs): - model = ResNet(block, layers, **kwargs) - if pretrained: - state_dict = load_state_dict_from_url(model_urls[arch], - progress=progress) - model.load_state_dict(state_dict) - return model - - -def resnet18(pretrained=False, progress=True, **kwargs): - r"""ResNet-18 model from - `"Deep Residual Learning for Image Recognition" `_ - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, - **kwargs) - - -def resnet34(pretrained=False, progress=True, **kwargs): - r"""ResNet-34 model from - `"Deep Residual Learning for Image Recognition" `_ - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, - **kwargs) - - -def resnet50(pretrained=False, progress=True, **kwargs): - r"""ResNet-50 model from - `"Deep Residual Learning for Image Recognition" `_ - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, - **kwargs) - - -def resnet101(pretrained=False, progress=True, **kwargs): - r"""ResNet-101 model from - `"Deep Residual Learning for Image Recognition" `_ - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, - **kwargs) - - -def resnet152(pretrained=False, progress=True, **kwargs): - r"""ResNet-152 model from - `"Deep Residual Learning for Image Recognition" `_ - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, - **kwargs) - - -def resnext50_32x4d(pretrained=False, progress=True, **kwargs): - r"""ResNeXt-50 32x4d model from - `"Aggregated Residual Transformation for Deep Neural Networks" `_ - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - kwargs['groups'] = 32 - kwargs['width_per_group'] = 4 - return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], - pretrained, progress, **kwargs) - - -def resnext101_32x8d(pretrained=False, progress=True, **kwargs): - r"""ResNeXt-101 32x8d model from - `"Aggregated Residual Transformation for Deep Neural Networks" `_ - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - kwargs['groups'] = 32 - kwargs['width_per_group'] = 8 - return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], - pretrained, progress, **kwargs) - - -def wide_resnet50_2(pretrained=False, progress=True, **kwargs): - r"""Wide ResNet-50-2 model from - `"Wide Residual Networks" `_ - The model is the same as ResNet except for the bottleneck number of channels - which is twice larger in every block. The number of channels in outer 1x1 - convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 - channels, and in Wide ResNet-50-2 has 2048-1024-2048. - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - kwargs['width_per_group'] = 64 * 2 - return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], - pretrained, progress, **kwargs) - - -def wide_resnet101_2(pretrained=False, progress=True, **kwargs): - r"""Wide ResNet-101-2 model from - `"Wide Residual Networks" `_ - The model is the same as ResNet except for the bottleneck number of channels - which is twice larger in every block. The number of channels in outer 1x1 - convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 - channels, and in Wide ResNet-50-2 has 2048-1024-2048. - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - kwargs['width_per_group'] = 64 * 2 - return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], - pretrained, progress, **kwargs) \ No newline at end of file diff --git a/sjlee/cats/mod.py b/sjlee/cats/mod.py deleted file mode 100644 index 7ce21fa..0000000 --- a/sjlee/cats/mod.py +++ /dev/null @@ -1,213 +0,0 @@ -import torch -import torch.nn as nn -import numpy as np -from torch.autograd import Variable - -r''' -Copy-pasted from GLU-Net -https://github.com/PruneTruong/GLU-Net -''' - - -def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, batch_norm=False): - if batch_norm: - return nn.Sequential( - nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, - padding=padding, dilation=dilation, bias=True), - nn.BatchNorm2d(out_planes), - nn.LeakyReLU(0.1, inplace=True)) - else: - return nn.Sequential( - nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, - padding=padding, dilation=dilation, bias=True), - nn.LeakyReLU(0.1)) - - -def predict_flow(in_planes): - return nn.Conv2d(in_planes,2,kernel_size=3,stride=1,padding=1,bias=True) - - -def deconv(in_planes, out_planes, kernel_size=4, stride=2, padding=1): - return nn.ConvTranspose2d(in_planes, out_planes, kernel_size, stride, padding, bias=True) - - -def unnormalise_and_convert_mapping_to_flow(map): - # here map is normalised to -1;1 - # we put it back to 0,W-1, then convert it to flow - B, C, H, W = map.size() - mapping = torch.zeros_like(map) - # mesh grid - mapping[:,0,:,:] = (map[:, 0, :, :].float().clone() + 1) * (W - 1) / 2.0 # unormalise - mapping[:,1,:,:] = (map[:, 1, :, :].float().clone() + 1) * (H - 1) / 2.0 # unormalise - - xx = torch.arange(0, W).view(1,-1).repeat(H,1) - yy = torch.arange(0, H).view(-1,1).repeat(1,W) - xx = xx.view(1,1,H,W).repeat(B,1,1,1) - yy = yy.view(1,1,H,W).repeat(B,1,1,1) - grid = torch.cat((xx,yy),1).float() - - if mapping.is_cuda: - grid = grid.cuda() - flow = mapping - grid - return flow - - -class CorrelationVolume(nn.Module): - """ - Implementation by Ignacio Rocco - paper: https://arxiv.org/abs/1703.05593 - project: https://github.com/ignacio-rocco/cnngeometric_pytorch - """ - - def __init__(self): - super(CorrelationVolume, self).__init__() - - def forward(self, feature_A, feature_B): - b, c, h, w = feature_A.size() - - # reshape features for matrix multiplication - feature_A = feature_A.transpose(2, 3).contiguous().view(b, c, h * w) # shape (b,c,h*w) - feature_B = feature_B.view(b, c, h * w).transpose(1, 2) # shape (b,h*w,c) - feature_mul = torch.bmm(feature_B, feature_A) # shape (b,h*w,h*w) - correlation_tensor = feature_mul.view(b, h, w, h * w).transpose(2, 3).transpose(1, 2) - return correlation_tensor # shape (b,h*w,h,w) - - -class FeatureL2Norm(nn.Module): - """ - Implementation by Ignacio Rocco - paper: https://arxiv.org/abs/1703.05593 - project: https://github.com/ignacio-rocco/cnngeometric_pytorch - """ - def __init__(self): - super(FeatureL2Norm, self).__init__() - - def forward(self, feature, dim=1): - epsilon = 1e-6 - norm = torch.pow(torch.sum(torch.pow(feature, 2), dim) + epsilon, 0.5).unsqueeze(dim).expand_as(feature) - return torch.div(feature, norm) - - -class OpticalFlowEstimator(nn.Module): - - def __init__(self, in_channels, batch_norm): - super(OpticalFlowEstimator, self).__init__() - - dd = np.cumsum([128,128,96,64,32]) - self.conv_0 = conv(in_channels, 128, kernel_size=3, stride=1, batch_norm=batch_norm) - self.conv_1 = conv(in_channels + dd[0], 128, kernel_size=3, stride=1, batch_norm=batch_norm) - self.conv_2 = conv(in_channels + dd[1], 96, kernel_size=3, stride=1, batch_norm=batch_norm) - self.conv_3 = conv(in_channels + dd[2], 64, kernel_size=3, stride=1, batch_norm=batch_norm) - self.conv_4 = conv(in_channels + dd[3], 32, kernel_size=3, stride=1, batch_norm=batch_norm) - self.predict_flow = predict_flow(in_channels + dd[4]) - - def forward(self, x): - # dense net connection - x = torch.cat((self.conv_0(x), x),1) - x = torch.cat((self.conv_1(x), x),1) - x = torch.cat((self.conv_2(x), x),1) - x = torch.cat((self.conv_3(x), x),1) - x = torch.cat((self.conv_4(x), x),1) - flow = self.predict_flow(x) - return x, flow - - -class OpticalFlowEstimatorNoDenseConnection(nn.Module): - - def __init__(self, in_channels, batch_norm): - super(OpticalFlowEstimatorNoDenseConnection, self).__init__() - self.conv_0 = conv(in_channels, 128, kernel_size=3, stride=1, batch_norm=batch_norm) - self.conv_1 = conv(128, 128, kernel_size=3, stride=1, batch_norm=batch_norm) - self.conv_2 = conv(128, 96, kernel_size=3, stride=1, batch_norm=batch_norm) - self.conv_3 = conv(96, 64, kernel_size=3, stride=1, batch_norm=batch_norm) - self.conv_4 = conv(64, 32, kernel_size=3, stride=1, batch_norm=batch_norm) - self.predict_flow = predict_flow(32) - - def forward(self, x): - x = self.conv_4(self.conv_3(self.conv_2(self.conv_1(self.conv_0(x))))) - flow = self.predict_flow(x) - return x, flow - - -# extracted from DGCNet -def conv_blck(in_channels, out_channels, kernel_size=3, - stride=1, padding=1, dilation=1, bn=False): - if bn: - return nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size, - stride, padding, dilation), - nn.BatchNorm2d(out_channels), - nn.ReLU(inplace=True)) - else: - return nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size, - stride, padding, dilation), - nn.ReLU(inplace=True)) - - -def conv_head(in_channels): - return nn.Conv2d(in_channels, 2, kernel_size=3, padding=1) - - -class CorrespondenceMapBase(nn.Module): - def __init__(self, in_channels, bn=False): - super().__init__() - - def forward(self, x1, x2=None, x3=None): - x = x1 - # concatenating dimensions - if (x2 is not None) and (x3 is None): - x = torch.cat((x1, x2), 1) - elif (x2 is None) and (x3 is not None): - x = torch.cat((x1, x3), 1) - elif (x2 is not None) and (x3 is not None): - x = torch.cat((x1, x2, x3), 1) - - return x - - -class CMDTop(CorrespondenceMapBase): - def __init__(self, in_channels, bn=False): - super().__init__(in_channels, bn) - chan = [128, 128, 96, 64, 32] - self.conv0 = conv_blck(in_channels, chan[0], bn=bn) - self.conv1 = conv_blck(chan[0], chan[1], bn=bn) - self.conv2 = conv_blck(chan[1], chan[2], bn=bn) - self.conv3 = conv_blck(chan[2], chan[3], bn=bn) - self.conv4 = conv_blck(chan[3], chan[4], bn=bn) - self.final = conv_head(chan[-1]) - - def forward(self, x1, x2=None, x3=None): - x = super().forward(x1, x2, x3) - x = self.conv4(self.conv3(self.conv2(self.conv1(self.conv0(x))))) - return self.final(x) - - -def warp(x, flo): - """ - warp an image/tensor (im2) back to im1, according to the optical flow - x: [B, C, H, W] (im2) - flo: [B, 2, H, W] flow - """ - B, C, H, W = x.size() - # mesh grid - xx = torch.arange(0, W).view(1, -1).repeat(H, 1) - yy = torch.arange(0, H).view(-1, 1).repeat(1, W) - xx = xx.view(1, 1, H, W).repeat(B, 1, 1, 1) - yy = yy.view(1, 1, H, W).repeat(B, 1, 1, 1) - grid = torch.cat((xx, yy), 1).float() - - if x.is_cuda: - grid = grid.cuda() - vgrid = grid + flo - # makes a mapping out of the flow - - # scale grid to [-1,1] - vgrid[:, 0, :, :] = 2.0 * vgrid[:, 0, :, :].clone() / max(W - 1, 1) - 1.0 - vgrid[:, 1, :, :] = 2.0 * vgrid[:, 1, :, :].clone() / max(H - 1, 1) - 1.0 - - vgrid = vgrid.permute(0, 2, 3, 1) - - if float(torch.__version__[:3]) >= 1.3: - output = nn.functional.grid_sample(x, vgrid, align_corners=True) - else: - output = nn.functional.grid_sample(x, vgrid) - return output \ No newline at end of file diff --git a/sjlee/loss.py b/sjlee/loss.py deleted file mode 100644 index 38807f9..0000000 --- a/sjlee/loss.py +++ /dev/null @@ -1,16 +0,0 @@ -import torch - -def loss_superglue(scores, all_matches): - # check if indexed correctly - loss = [] - for i in range(len(all_matches[0])): - x = all_matches[0][i][0] - y = all_matches[0][i][1] - loss.append(-torch.log( scores[0][x][y].exp() + 1e-7 )) # check batch size == 1 ? - # for p0 in unmatched0: - # loss += -torch.log(scores[0][p0][-1]) - # for p1 in unmatched1: - # loss += -torch.log(scores[0][-1][p1]) - loss_mean = torch.mean(torch.stack(loss)) - loss_mean = torch.reshape(loss_mean, (1, -1)) - return loss_mean[0] diff --git a/sjlee/superglue.py b/sjlee/superglue.py deleted file mode 100644 index 6837d47..0000000 --- a/sjlee/superglue.py +++ /dev/null @@ -1,359 +0,0 @@ -# %BANNER_BEGIN% -# --------------------------------------------------------------------- -# %COPYRIGHT_BEGIN% -# -# Magic Leap, Inc. ("COMPANY") CONFIDENTIAL -# -# Unpublished Copyright (c) 2020 -# Magic Leap, Inc., All Rights Reserved. -# -# NOTICE: All information contained herein is, and remains the property -# of COMPANY. The intellectual and technical concepts contained herein -# are proprietary to COMPANY and may be covered by U.S. and Foreign -# Patents, patents in process, and are protected by trade secret or -# copyright law. Dissemination of this information or reproduction of -# this material is strictly forbidden unless prior written permission is -# obtained from COMPANY. Access to the source code contained herein is -# hereby forbidden to anyone except current COMPANY employees, managers -# or contractors who have executed Confidentiality and Non-disclosure -# agreements explicitly covering such access. -# -# The copyright notice above does not evidence any actual or intended -# publication or disclosure of this source code, which includes -# information that is confidential and/or proprietary, and is a trade -# secret, of COMPANY. ANY REPRODUCTION, MODIFICATION, DISTRIBUTION, -# PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH USE OF THIS -# SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS -# STRICTLY PROHIBITED, AND IN VIOLATION OF APPLICABLE LAWS AND -# INTERNATIONAL TREATIES. THE RECEIPT OR POSSESSION OF THIS SOURCE -# CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS -# TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS CONTENTS, OR TO MANUFACTURE, -# USE, OR SELL ANYTHING THAT IT MAY DESCRIBE, IN WHOLE OR IN PART. -# -# %COPYRIGHT_END% -# ---------------------------------------------------------------------- -# %AUTHORS_BEGIN% -# -# Originating Authors: Paul-Edouard Sarlin -# -# %AUTHORS_END% -# --------------------------------------------------------------------*/ -# %BANNER_END% - -from copy import deepcopy -from pathlib import Path -import torch -from torch import nn - - -def MLP(channels: list, do_bn=True): - """ Multi-layer perceptron """ - n = len(channels) - layers = [] - for i in range(1, n): - layers.append( - nn.Conv1d(channels[i - 1], channels[i], kernel_size=1, bias=True)) - if i < (n-1): - if do_bn: - # layers.append(nn.BatchNorm1d(channels[i])) - layers.append(nn.InstanceNorm1d(channels[i])) - layers.append(nn.ReLU()) - return nn.Sequential(*layers) - - -def normalize_keypoints(kpts, image_shape): - """ Normalize keypoints locations based on image image_shape""" - _, _, height, width = image_shape - one = kpts.new_tensor(1) - size = torch.stack([one*width, one*height])[None] - center = size / 2 - scaling = size.max(1, keepdim=True).values * 0.7 - return (kpts - center[:, None, :]) / scaling[:, None, :] - - -class KeypointEncoder(nn.Module): - """ Joint encoding of visual appearance and location using MLPs""" - def __init__(self, feature_dim, layers): - super().__init__() - self.encoder = MLP([3] + layers + [feature_dim]) - nn.init.constant_(self.encoder[-1].bias, 0.0) - - def forward(self, kpts, scores): - inputs = [kpts.transpose(1, 2), scores.unsqueeze(1)] - return self.encoder(torch.cat(inputs, dim=1)) - - -def attention(query, key, value): - dim = query.shape[1] - scores = torch.einsum('bdhn,bdhm->bhnm', query, key) / dim**.5 - prob = torch.nn.functional.softmax(scores, dim=-1) - return torch.einsum('bhnm,bdhm->bdhn', prob, value), prob - - -class MultiHeadedAttention(nn.Module): - """ Multi-head attention to increase model expressivitiy """ - def __init__(self, num_heads: int, d_model: int): - super().__init__() - assert d_model % num_heads == 0 - self.dim = d_model // num_heads - self.num_heads = num_heads - self.merge = nn.Conv1d(d_model, d_model, kernel_size=1) - self.proj = nn.ModuleList([deepcopy(self.merge) for _ in range(3)]) - - def forward(self, query, key, value): - batch_dim = query.size(0) - query, key, value = [l(x).view(batch_dim, self.dim, self.num_heads, -1) - for l, x in zip(self.proj, (query, key, value))] - x, prob = attention(query, key, value) - self.prob.append(prob) - return self.merge(x.contiguous().view(batch_dim, self.dim*self.num_heads, -1)) - - -class AttentionalPropagation(nn.Module): - def __init__(self, feature_dim: int, num_heads: int): - super().__init__() - self.attn = MultiHeadedAttention(num_heads, feature_dim) - self.mlp = MLP([feature_dim*2, feature_dim*2, feature_dim]) - nn.init.constant_(self.mlp[-1].bias, 0.0) - - def forward(self, x, source): - message = self.attn(x, source, source) - return self.mlp(torch.cat([x, message], dim=1)) - - -class AttentionalGNN(nn.Module): - def __init__(self, feature_dim: int, layer_names: list): - super().__init__() - self.layers = nn.ModuleList([ - AttentionalPropagation(feature_dim, 4) - for _ in range(len(layer_names))]) - self.names = layer_names - - def forward(self, desc0, desc1): - for layer, name in zip(self.layers, self.names): - layer.attn.prob = [] - if name == 'cross': - src0, src1 = desc1, desc0 - else: # if name == 'self': - src0, src1 = desc0, desc1 - delta0, delta1 = layer(desc0, src0), layer(desc1, src1) - desc0, desc1 = (desc0 + delta0), (desc1 + delta1) - return desc0, desc1 - - -def log_sinkhorn_iterations(Z, log_mu, log_nu, iters: int): - """ Perform Sinkhorn Normalization in Log-space for stability""" - u, v = torch.zeros_like(log_mu), torch.zeros_like(log_nu) - for _ in range(iters): - u = log_mu - torch.logsumexp(Z + v.unsqueeze(1), dim=2) - v = log_nu - torch.logsumexp(Z + u.unsqueeze(2), dim=1) - return Z + u.unsqueeze(2) + v.unsqueeze(1) - - -def log_optimal_transport(scores, alpha, iters: int): - """ Perform Differentiable Optimal Transport in Log-space for stability""" - b, m, n = scores.shape - one = scores.new_tensor(1) - ms, ns = (m*one).to(scores), (n*one).to(scores) - - bins0 = alpha.expand(b, m, 1) - bins1 = alpha.expand(b, 1, n) - alpha = alpha.expand(b, 1, 1) - - couplings = torch.cat([torch.cat([scores, bins0], -1), - torch.cat([bins1, alpha], -1)], 1) - - norm = - (ms + ns).log() - log_mu = torch.cat([norm.expand(m), ns.log()[None] + norm]) - log_nu = torch.cat([norm.expand(n), ms.log()[None] + norm]) - log_mu, log_nu = log_mu[None].expand(b, -1), log_nu[None].expand(b, -1) - - Z = log_sinkhorn_iterations(couplings, log_mu, log_nu, iters) - Z = Z - norm # multiply probabilities by M+N - return Z - - -def arange_like(x, dim: int): - return x.new_ones(x.shape[dim]).cumsum(0) - 1 # traceable in 1.1 - - -class SuperGlue(nn.Module): - """SuperGlue feature matching middle-end - Given two sets of keypoints and locations, we determine the - correspondences by: - 1. Keypoint Encoding (normalization + visual feature and location fusion) - 2. Graph Neural Network with multiple self and cross-attention layers - 3. Final projection layer - 4. Optimal Transport Layer (a differentiable Hungarian matching algorithm) - 5. Thresholding matrix based on mutual exclusivity and a match_threshold - The correspondence ids use -1 to indicate non-matching points. - Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew - Rabinovich. SuperGlue: Learning Feature Matching with Graph Neural - Networks. In CVPR, 2020. https://arxiv.org/abs/1911.11763 - """ - default_config = { - 'descriptor_dim': 256, - 'weights': 'indoor', - 'keypoint_encoder': [32, 64, 128, 256], - 'GNN_layers': ['self', 'cross'] * 9, - 'sinkhorn_iterations': 100, - 'match_threshold': 0.2, - } - - def __init__(self, config): - super().__init__() - self.config = {**self.default_config, **config} - - self.kenc = KeypointEncoder( - self.config['descriptor_dim'], self.config['keypoint_encoder']) - - self.gnn = AttentionalGNN( - self.config['descriptor_dim'], self.config['GNN_layers']) - - self.final_proj = nn.Conv1d( - self.config['descriptor_dim'], self.config['descriptor_dim'], - kernel_size=1, bias=True) - - bin_score = torch.nn.Parameter(torch.tensor(1.)) - self.register_parameter('bin_score', bin_score) - - # assert self.config['weights'] in ['indoor', 'outdoor'] - # path = Path(__file__).parent - # path = path / 'weights/superglue_{}.pth'.format(self.config['weights']) - # self.load_state_dict(torch.load(path)) - # print('Loaded SuperGlue model (\"{}\" weights)'.format( - # self.config['weights'])) - - def forward(self, data): - """Run SuperGlue on a pair of keypoints and descriptors""" - desc0, desc1 = data['descriptors0'], data['descriptors1'] - kpts0, kpts1 = data['keypoints0'], data['keypoints1'] - - """ - desc0 = desc0.transpose(0,1) - desc1 = desc1.transpose(0,1) - kpts0 = torch.reshape(kpts0, (1, -1, 2)) - kpts1 = torch.reshape(kpts1, (1, -1, 2)) - """ - - if kpts0.shape[1] == 0 or kpts1.shape[1] == 0: # no keypoints - shape0, shape1 = kpts0.shape[:-1], kpts1.shape[:-1] - return { - 'matches0': kpts0.new_full(shape0, -1, dtype=torch.int)[0], - 'matches1': kpts1.new_full(shape1, -1, dtype=torch.int)[0], - 'matching_scores0': kpts0.new_zeros(shape0)[0], - 'matching_scores1': kpts1.new_zeros(shape1)[0], - 'skip_train': True - } - - """ - file_name = data['file_name'] - all_matches = data['all_matches'].permute(1,2,0) # shape=torch.Size([1, 87, 2]) - """ - - # Keypoint normalization. - kpts0 = normalize_keypoints(kpts0, data['image0'].shape) - kpts1 = normalize_keypoints(kpts1, data['image1'].shape) - - # Keypoint MLP encoder. - """ - desc0 = desc0 + self.kenc(kpts0, torch.transpose(data['scores0'], 0, 1)) - desc1 = desc1 + self.kenc(kpts1, torch.transpose(data['scores1'], 0, 1)) - """ - desc0 = desc0 + self.kenc(kpts0, data['scores0']) - desc1 = desc1 + self.kenc(kpts1, data['scores1']) - - # Multi-layer Transformer network. - desc0, desc1 = self.gnn(desc0, desc1) - - # Final MLP projection. - mdesc0, mdesc1 = self.final_proj(desc0), self.final_proj(desc1) - - # Compute matching descriptor distance. - scores = torch.einsum('bdn,bdm->bnm', mdesc0, mdesc1) - scores = scores / self.config['descriptor_dim']**.5 - - # Run the optimal transport. - scores = log_optimal_transport( - scores, self.bin_score, - iters=self.config['sinkhorn_iterations']) - - # Get the matches with score above "match_threshold". - max0, max1 = scores[:, :-1, :-1].max(2), scores[:, :-1, :-1].max(1) - indices0, indices1 = max0.indices, max1.indices - mutual0 = arange_like(indices0, 1)[None] == indices1.gather(1, indices0) - mutual1 = arange_like(indices1, 1)[None] == indices0.gather(1, indices1) - zero = scores.new_tensor(0) - mscores0 = torch.where(mutual0, max0.values.exp(), zero) - mscores1 = torch.where(mutual1, mscores0.gather(1, indices1), zero) - valid0 = mutual0 & (mscores0 > self.config['match_threshold']) - valid1 = mutual1 & valid0.gather(1, indices1) - indices0 = torch.where(valid0, indices0, indices0.new_tensor(-1)) - indices1 = torch.where(valid1, indices1, indices1.new_tensor(-1)) - - """ - # check if indexed correctly - loss = [] - for i in range(len(all_matches[0])): - x = all_matches[0][i][0] - y = all_matches[0][i][1] - loss.append(-torch.log( scores[0][x][y].exp() )) # check batch size == 1 ? - # for p0 in unmatched0: - # loss += -torch.log(scores[0][p0][-1]) - # for p1 in unmatched1: - # loss += -torch.log(scores[0][-1][p1]) - loss_mean = torch.mean(torch.stack(loss)) - loss_mean = torch.reshape(loss_mean, (1, -1)) - """ - - return { - 'matches0': indices0[0], # use -1 for invalid match - 'matches1': indices1[0], # use -1 for invalid match - 'matching_scores0': mscores0[0], - 'matching_scores1': mscores1[0], - # 'loss': loss_mean[0], - 'skip_train': False - } - - # scores big value or small value means confidence? log can't take neg value - -if __name__ == '__main__': - from superpoint import SuperPoint - - config = { - 'superpoint': { - 'nms_radius': 4, - 'keypoint_threshold': 0.005, - 'max_keypoints': -1 - }, - 'superglue': { - 'weights': 'indoor', - 'sinkhorn_iterations': 20, - 'match_threshold':0.2, - } - } - - data = { - 'image0': torch.randn(1, 1, 512, 512), - 'image1': torch.randn(1, 1, 512, 512) - } - - superpoint = SuperPoint(config.get('superpoint', {})) - - output1 = superpoint({'image': data['image0']}) - output2 = superpoint({'image': data['image1']}) - - pred = {} - - pred = {**pred, **{k+'0': v for k, v in output1.items()}} - pred = {**pred, **{k+'1': v for k, v in output2.items()}} - - data = {**data, **pred} - - for k in data: - if isinstance(data[k], (list, tuple)): - data[k] = torch.stack(data[k]) - - print(data['descriptors0'].shape) - superglue = SuperGlue(config.get('superglue', {})) - superglue(data) \ No newline at end of file diff --git a/sjlee/superglue2.py b/sjlee/superglue2.py deleted file mode 100644 index 9db9b6b..0000000 --- a/sjlee/superglue2.py +++ /dev/null @@ -1,330 +0,0 @@ - -# %BANNER_BEGIN% -# --------------------------------------------------------------------- -# %COPYRIGHT_BEGIN% -# -# Magic Leap, Inc. ("COMPANY") CONFIDENTIAL -# -# Unpublished Copyright (c) 2020 -# Magic Leap, Inc., All Rights Reserved. -# -# NOTICE: All information contained herein is, and remains the property -# of COMPANY. The intellectual and technical concepts contained herein -# are proprietary to COMPANY and may be covered by U.S. and Foreign -# Patents, patents in process, and are protected by trade secret or -# copyright law. Dissemination of this information or reproduction of -# this material is strictly forbidden unless prior written permission is -# obtained from COMPANY. Access to the source code contained herein is -# hereby forbidden to anyone except current COMPANY employees, managers -# or contractors who have executed Confidentiality and Non-disclosure -# agreements explicitly covering such access. -# -# The copyright notice above does not evidence any actual or intended -# publication or disclosure of this source code, which includes -# information that is confidential and/or proprietary, and is a trade -# secret, of COMPANY. ANY REPRODUCTION, MODIFICATION, DISTRIBUTION, -# PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH USE OF THIS -# SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS -# STRICTLY PROHIBITED, AND IN VIOLATION OF APPLICABLE LAWS AND -# INTERNATIONAL TREATIES. THE RECEIPT OR POSSESSION OF THIS SOURCE -# CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS -# TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS CONTENTS, OR TO MANUFACTURE, -# USE, OR SELL ANYTHING THAT IT MAY DESCRIBE, IN WHOLE OR IN PART. -# -# %COPYRIGHT_END% -# ---------------------------------------------------------------------- -# %AUTHORS_BEGIN% -# -# Originating Authors: Paul-Edouard Sarlin -# -# %AUTHORS_END% -# --------------------------------------------------------------------*/ -# %BANNER_END% - -from copy import deepcopy -from pathlib import Path -from typing import List, Tuple - -import torch -from torch import nn - - -def MLP(channels: List[int], do_bn: bool = True) -> nn.Module: - """ Multi-layer perceptron """ - n = len(channels) - layers = [] - for i in range(1, n): - layers.append( - nn.Conv1d(channels[i - 1], channels[i], kernel_size=1, bias=True)) - if i < (n-1): - if do_bn: - layers.append(nn.BatchNorm1d(channels[i])) - layers.append(nn.ReLU()) - return nn.Sequential(*layers) - - -def normalize_keypoints(kpts, image_shape): - """ Normalize keypoints locations based on image image_shape""" - _, _, height, width = image_shape - one = kpts.new_tensor(1) - size = torch.stack([one*width, one*height])[None] - center = size / 2 - scaling = size.max(1, keepdim=True).values * 0.7 - return (kpts - center[:, None, :]) / scaling[:, None, :] - - -class KeypointEncoder(nn.Module): - """ Joint encoding of visual appearance and location using MLPs""" - def __init__(self, feature_dim: int, layers: List[int]) -> None: - super().__init__() - self.encoder = MLP([3] + layers + [feature_dim]) - nn.init.constant_(self.encoder[-1].bias, 0.0) - - def forward(self, kpts, scores): - inputs = [kpts.transpose(1, 2), scores.unsqueeze(1)] - return self.encoder(torch.cat(inputs, dim=1)) - - -def attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor) -> Tuple[torch.Tensor,torch.Tensor]: - dim = query.shape[1] - scores = torch.einsum('bdhn,bdhm->bhnm', query, key) / dim**.5 - prob = torch.nn.functional.softmax(scores, dim=-1) - return torch.einsum('bhnm,bdhm->bdhn', prob, value), prob - - -class MultiHeadedAttention(nn.Module): - """ Multi-head attention to increase model expressivitiy """ - def __init__(self, num_heads: int, d_model: int): - super().__init__() - assert d_model % num_heads == 0 - self.dim = d_model // num_heads - self.num_heads = num_heads - self.merge = nn.Conv1d(d_model, d_model, kernel_size=1) - self.proj = nn.ModuleList([deepcopy(self.merge) for _ in range(3)]) - - def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor) -> torch.Tensor: - batch_dim = query.size(0) - query, key, value = [l(x).view(batch_dim, self.dim, self.num_heads, -1) - for l, x in zip(self.proj, (query, key, value))] - x, _ = attention(query, key, value) - return self.merge(x.contiguous().view(batch_dim, self.dim*self.num_heads, -1)) - - -class AttentionalPropagation(nn.Module): - def __init__(self, feature_dim: int, num_heads: int): - super().__init__() - self.attn = MultiHeadedAttention(num_heads, feature_dim) - self.mlp = MLP([feature_dim*2, feature_dim*2, feature_dim]) - nn.init.constant_(self.mlp[-1].bias, 0.0) - - def forward(self, x: torch.Tensor, source: torch.Tensor) -> torch.Tensor: - message = self.attn(x, source, source) - return self.mlp(torch.cat([x, message], dim=1)) - - -class AttentionalGNN(nn.Module): - def __init__(self, feature_dim: int, layer_names: List[str]) -> None: - super().__init__() - self.layers = nn.ModuleList([ - AttentionalPropagation(feature_dim, 4) - for _ in range(len(layer_names))]) - self.names = layer_names - - def forward(self, desc0: torch.Tensor, desc1: torch.Tensor) -> Tuple[torch.Tensor,torch.Tensor]: - for layer, name in zip(self.layers, self.names): - if name == 'cross': - src0, src1 = desc1, desc0 - else: # if name == 'self': - src0, src1 = desc0, desc1 - delta0, delta1 = layer(desc0, src0), layer(desc1, src1) - desc0, desc1 = (desc0 + delta0), (desc1 + delta1) - return desc0, desc1 - - -def log_sinkhorn_iterations(Z: torch.Tensor, log_mu: torch.Tensor, log_nu: torch.Tensor, iters: int) -> torch.Tensor: - """ Perform Sinkhorn Normalization in Log-space for stability""" - u, v = torch.zeros_like(log_mu), torch.zeros_like(log_nu) - for _ in range(iters): - u = log_mu - torch.logsumexp(Z + v.unsqueeze(1), dim=2) - v = log_nu - torch.logsumexp(Z + u.unsqueeze(2), dim=1) - - return Z + u.unsqueeze(2) + v.unsqueeze(1) - - -def log_optimal_transport(scores: torch.Tensor, alpha: torch.Tensor, iters: int) -> torch.Tensor: - """ Perform Differentiable Optimal Transport in Log-space for stability""" - b, m, n = scores.shape - one = scores.new_tensor(1) - ms, ns = (m*one).to(scores), (n*one).to(scores) - - bins0 = alpha.expand(b, m, 1) - bins1 = alpha.expand(b, 1, n) - alpha = alpha.expand(b, 1, 1) - - couplings = torch.cat([torch.cat([scores, bins0], -1), - torch.cat([bins1, alpha], -1)], 1) - - norm = - (ms + ns).log() - log_mu = torch.cat([norm.expand(m), ns.log()[None] + norm]) - log_nu = torch.cat([norm.expand(n), ms.log()[None] + norm]) - log_mu, log_nu = log_mu[None].expand(b, -1), log_nu[None].expand(b, -1) - - Z = log_sinkhorn_iterations(couplings, log_mu, log_nu, iters) - Z = Z - norm # multiply probabilities by M+N - return Z - - -def arange_like(x, dim: int): - return x.new_ones(x.shape[dim]).cumsum(0) - 1 # traceable in 1.1 - - -class SuperGlue(nn.Module): - """SuperGlue feature matching middle-end - Given two sets of keypoints and locations, we determine the - correspondences by: - 1. Keypoint Encoding (normalization + visual feature and location fusion) - 2. Graph Neural Network with multiple self and cross-attention layers - 3. Final projection layer - 4. Optimal Transport Layer (a differentiable Hungarian matching algorithm) - 5. Thresholding matrix based on mutual exclusivity and a match_threshold - The correspondence ids use -1 to indicate non-matching points. - Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew - Rabinovich. SuperGlue: Learning Feature Matching with Graph Neural - Networks. In CVPR, 2020. https://arxiv.org/abs/1911.11763 - """ - default_config = { - 'descriptor_dim': 256, - 'weights': 'indoor', - 'keypoint_encoder': [32, 64, 128, 256], - 'GNN_layers': ['self', 'cross'] * 9, - 'sinkhorn_iterations': 100, - 'match_threshold': 0.2, - } - - def __init__(self, config): - super().__init__() - self.config = {**self.default_config, **config} - - self.kenc = KeypointEncoder( - self.config['descriptor_dim'], self.config['keypoint_encoder']) - - self.gnn = AttentionalGNN( - feature_dim=self.config['descriptor_dim'], layer_names=self.config['GNN_layers']) - - self.final_proj = nn.Conv1d( - self.config['descriptor_dim'], self.config['descriptor_dim'], - kernel_size=1, bias=True) - - bin_score = torch.nn.Parameter(torch.tensor(1.)) - self.register_parameter('bin_score', bin_score) - - assert self.config['weights'] in ['indoor', 'outdoor'] - path = Path(__file__).parent - path = path / 'weights/superglue_{}.pth'.format(self.config['weights']) - self.load_state_dict(torch.load(str(path))) - print('Loaded SuperGlue model (\"{}\" weights)'.format( - self.config['weights'])) - - def forward(self, data): - """Run SuperGlue on a pair of keypoints and descriptors""" - desc0, desc1 = data['descriptors0'], data['descriptors1'] - kpts0, kpts1 = data['keypoints0'], data['keypoints1'] - - if kpts0.shape[1] == 0 or kpts1.shape[1] == 0: # no keypoints - shape0, shape1 = kpts0.shape[:-1], kpts1.shape[:-1] - return { - 'matches0': kpts0.new_full(shape0, -1, dtype=torch.int), - 'matches1': kpts1.new_full(shape1, -1, dtype=torch.int), - 'matching_scores0': kpts0.new_zeros(shape0), - 'matching_scores1': kpts1.new_zeros(shape1), - } - - # Keypoint normalization. - kpts0 = normalize_keypoints(kpts0, data['image0'].shape) - kpts1 = normalize_keypoints(kpts1, data['image1'].shape) - - # Keypoint MLP encoder. - desc0 = desc0 + self.kenc(kpts0, data['scores0']) - desc1 = desc1 + self.kenc(kpts1, data['scores1']) - - # Multi-layer Transformer network. - desc0, desc1 = self.gnn(desc0, desc1) - - # Final MLP projection. - mdesc0, mdesc1 = self.final_proj(desc0), self.final_proj(desc1) - - # Compute matching descriptor distance. - scores = torch.einsum('bdn,bdm->bnm', mdesc0, mdesc1) - scores = scores / self.config['descriptor_dim']**.5 - - # Run the optimal transport. - scores = log_optimal_transport( - scores, self.bin_score, - iters=self.config['sinkhorn_iterations']) - - import numpy as np - a = scores[:, :-1, :-1].exp().detach().numpy() - print((a > 1).astype(np.int32).mean()) - - print(scores[:, :-1, :-1].exp().max()) - - # Get the matches with score above "match_threshold". - max0, max1 = scores[:, :-1, :-1].max(2), scores[:, :-1, :-1].max(1) - indices0, indices1 = max0.indices, max1.indices - mutual0 = arange_like(indices0, 1)[None] == indices1.gather(1, indices0) - mutual1 = arange_like(indices1, 1)[None] == indices0.gather(1, indices1) - zero = scores.new_tensor(0) - mscores0 = torch.where(mutual0, max0.values.exp(), zero) - mscores1 = torch.where(mutual1, mscores0.gather(1, indices1), zero) - valid0 = mutual0 & (mscores0 > self.config['match_threshold']) - valid1 = mutual1 & valid0.gather(1, indices1) - indices0 = torch.where(valid0, indices0, indices0.new_tensor(-1)) - indices1 = torch.where(valid1, indices1, indices1.new_tensor(-1)) - - print(scores.shape) - return { - 'matches0': indices0, # use -1 for invalid match - 'matches1': indices1, # use -1 for invalid match - 'matching_scores0': mscores0, - 'matching_scores1': mscores1, - } - -if __name__ == '__main__': - from superpoint import SuperPoint - - config = { - 'superpoint': { - 'nms_radius': 4, - 'keypoint_threshold': 0.005, - 'max_keypoints': -1 - }, - 'superglue': { - 'weights': 'indoor', - 'sinkhorn_iterations': 20, - 'match_threshold':0.2, - } - } - - data = { - 'image0': torch.randn(1, 1, 512, 512), - 'image1': torch.randn(1, 1, 512, 512) - } - - superpoint = SuperPoint(config.get('superpoint', {})) - - output1 = superpoint({'image': data['image0']}) - output2 = superpoint({'image': data['image1']}) - - pred = {} - - pred = {**pred, **{k+'0': v for k, v in output1.items()}} - pred = {**pred, **{k+'1': v for k, v in output2.items()}} - - data = {**data, **pred} - - for k in data: - if isinstance(data[k], (list, tuple)): - data[k] = torch.stack(data[k]) - - superglue = SuperGlue(config.get('superglue', {})) - output = superglue(data) diff --git a/sjlee/superpoint.py b/sjlee/superpoint.py deleted file mode 100644 index 14a07fd..0000000 --- a/sjlee/superpoint.py +++ /dev/null @@ -1,222 +0,0 @@ -# %BANNER_BEGIN% -# --------------------------------------------------------------------- -# %COPYRIGHT_BEGIN% -# -# Magic Leap, Inc. ("COMPANY") CONFIDENTIAL -# -# Unpublished Copyright (c) 2020 -# Magic Leap, Inc., All Rights Reserved. -# -# NOTICE: All information contained herein is, and remains the property -# of COMPANY. The intellectual and technical concepts contained herein -# are proprietary to COMPANY and may be covered by U.S. and Foreign -# Patents, patents in process, and are protected by trade secret or -# copyright law. Dissemination of this information or reproduction of -# this material is strictly forbidden unless prior written permission is -# obtained from COMPANY. Access to the source code contained herein is -# hereby forbidden to anyone except current COMPANY employees, managers -# or contractors who have executed Confidentiality and Non-disclosure -# agreements explicitly covering such access. -# -# The copyright notice above does not evidence any actual or intended -# publication or disclosure of this source code, which includes -# information that is confidential and/or proprietary, and is a trade -# secret, of COMPANY. ANY REPRODUCTION, MODIFICATION, DISTRIBUTION, -# PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH USE OF THIS -# SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS -# STRICTLY PROHIBITED, AND IN VIOLATION OF APPLICABLE LAWS AND -# INTERNATIONAL TREATIES. THE RECEIPT OR POSSESSION OF THIS SOURCE -# CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS -# TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS CONTENTS, OR TO MANUFACTURE, -# USE, OR SELL ANYTHING THAT IT MAY DESCRIBE, IN WHOLE OR IN PART. -# -# %COPYRIGHT_END% -# ---------------------------------------------------------------------- -# %AUTHORS_BEGIN% -# -# Originating Authors: Paul-Edouard Sarlin -# -# %AUTHORS_END% -# --------------------------------------------------------------------*/ -# %BANNER_END% - -from pathlib import Path -import torch -from torch import nn - -def simple_nms(scores, nms_radius: int): - """ Fast Non-maximum suppression to remove nearby points """ - assert(nms_radius >= 0) - - def max_pool(x): - return torch.nn.functional.max_pool2d( - x, kernel_size=nms_radius*2+1, stride=1, padding=nms_radius) - - zeros = torch.zeros_like(scores) - max_mask = scores == max_pool(scores) - for _ in range(2): - supp_mask = max_pool(max_mask.float()) > 0 - supp_scores = torch.where(supp_mask, zeros, scores) - new_max_mask = supp_scores == max_pool(supp_scores) - max_mask = max_mask | (new_max_mask & (~supp_mask)) - return torch.where(max_mask, scores, zeros) - - -def remove_borders(keypoints, scores, border: int, height: int, width: int): - """ Removes keypoints too close to the border """ - mask_h = (keypoints[:, 0] >= border) & (keypoints[:, 0] < (height - border)) - mask_w = (keypoints[:, 1] >= border) & (keypoints[:, 1] < (width - border)) - mask = mask_h & mask_w - return keypoints[mask], scores[mask] - - -def top_k_keypoints(keypoints, scores, k: int): - if k >= len(keypoints): - return keypoints, scores - scores, indices = torch.topk(scores, k, dim=0) - return keypoints[indices], scores - - -def sample_descriptors(keypoints, descriptors, s: int = 8): - """ Interpolate descriptors at keypoint locations """ - b, c, h, w = descriptors.shape - keypoints = keypoints - s / 2 + 0.5 - keypoints /= torch.tensor([(w*s - s/2 - 0.5), (h*s - s/2 - 0.5)], - ).to(keypoints)[None] - keypoints = keypoints*2 - 1 # normalize to (-1, 1) - args = {'align_corners': True} if int(torch.__version__[2]) > 2 else {} - descriptors = torch.nn.functional.grid_sample( - descriptors, keypoints.view(b, 1, -1, 2), mode='bilinear', **args) - descriptors = torch.nn.functional.normalize( - descriptors.reshape(b, c, -1), p=2, dim=1) - return descriptors - - -class SuperPoint(nn.Module): - """SuperPoint Convolutional Detector and Descriptor - SuperPoint: Self-Supervised Interest Point Detection and - Description. Daniel DeTone, Tomasz Malisiewicz, and Andrew - Rabinovich. In CVPRW, 2019. https://arxiv.org/abs/1712.07629 - """ - default_config = { - 'descriptor_dim': 256, - 'nms_radius': 4, - 'keypoint_threshold': 0.005, - 'max_keypoints': -1, - 'remove_borders': 4, - } - - def __init__(self, config): - super().__init__() - self.config = {**self.default_config, **config} - - self.relu = nn.ReLU(inplace=True) - self.pool = nn.MaxPool2d(kernel_size=2, stride=2) - c1, c2, c3, c4, c5 = 64, 64, 128, 128, 256 - - self.conv1a = nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1) - self.conv1b = nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1) - self.conv2a = nn.Conv2d(c1, c2, kernel_size=3, stride=1, padding=1) - self.conv2b = nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1) - self.conv3a = nn.Conv2d(c2, c3, kernel_size=3, stride=1, padding=1) - self.conv3b = nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1) - self.conv4a = nn.Conv2d(c3, c4, kernel_size=3, stride=1, padding=1) - self.conv4b = nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1) - - self.convPa = nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1) - self.convPb = nn.Conv2d(c5, 65, kernel_size=1, stride=1, padding=0) - - self.convDa = nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1) - self.convDb = nn.Conv2d( - c5, self.config['descriptor_dim'], - kernel_size=1, stride=1, padding=0) - - path = Path(__file__).parent / 'weights/superpoint_v1.pth' - self.load_state_dict(torch.load(str(path))) - - mk = self.config['max_keypoints'] - if mk == 0 or mk < -1: - raise ValueError('\"max_keypoints\" must be positive or \"-1\"') - - print('Loaded SuperPoint model') - - def forward(self, data): - """ Compute keypoints, scores, descriptors for image """ - # Shared Encoder - x = self.relu(self.conv1a(data['image'])) - x = self.relu(self.conv1b(x)) - x = self.pool(x) - x = self.relu(self.conv2a(x)) - x = self.relu(self.conv2b(x)) - x = self.pool(x) - x = self.relu(self.conv3a(x)) - x = self.relu(self.conv3b(x)) - x = self.pool(x) - x = self.relu(self.conv4a(x)) - x = self.relu(self.conv4b(x)) - - # Compute the dense keypoint scores - cPa = self.relu(self.convPa(x)) - scores = self.convPb(cPa) - scores = torch.nn.functional.softmax(scores, 1)[:, :-1] - b, _, h, w = scores.shape - scores = scores.permute(0, 2, 3, 1).reshape(b, h, w, 8, 8) - scores = scores.permute(0, 1, 3, 2, 4).reshape(b, h*8, w*8) - scores = simple_nms(scores, self.config['nms_radius']) - - # Extract keypoints - keypoints = [ - torch.nonzero(s > self.config['keypoint_threshold']) - for s in scores] - scores = [s[tuple(k.t())] for s, k in zip(scores, keypoints)] - - # Discard keypoints near the image borders - keypoints, scores = list(zip(*[ - remove_borders(k, s, self.config['remove_borders'], h*8, w*8) - for k, s in zip(keypoints, scores)])) - - # Keep the k keypoints with highest score - if self.config['max_keypoints'] >= 0: - keypoints, scores = list(zip(*[ - top_k_keypoints(k, s, self.config['max_keypoints']) - for k, s in zip(keypoints, scores)])) - - # Convert (h, w) to (x, y) - keypoints = [torch.flip(k, [1]).float() for k in keypoints] - - # Compute the dense descriptors - cDa = self.relu(self.convDa(x)) - descriptors = self.convDb(cDa) - descriptors = torch.nn.functional.normalize(descriptors, p=2, dim=1) - - # Extract descriptors - descriptors = [sample_descriptors(k[None], d[None], 8)[0] - for k, d in zip(keypoints, descriptors)] - - return { - 'keypoints': keypoints, - 'scores': scores, - 'descriptors': descriptors, - } - -if __name__ == '__main__': - config = { - 'superpoint': { - 'nms_radius': 4, - 'keypoint_threshold': 0.005, - 'max_keypoints': -1 - }, - 'superglue': { - 'weights': 'indoor', - 'sinkhorn_iterations': 20, - 'match_threshold':0.2, - } - } - - test_img = torch.randn(1, 1, 512, 512) - data = {'image': test_img} - - superpoint = SuperPoint(config.get('superpoint', {})) - output = superpoint(data) - - print(output['keypoints'][0].shape, output['descriptors'][0].shape) \ No newline at end of file diff --git a/sjlee/test.py b/sjlee/test.py deleted file mode 100644 index 3bfdc70..0000000 --- a/sjlee/test.py +++ /dev/null @@ -1,10 +0,0 @@ - -import torch -import numpy as np -from torch.nn.utils.rnn import pad_sequence -import torch.nn.functional as F - -a = torch.randn(3, 4) -print(a.dtype) -a = F.pad(a, (0, 2, 0, 1), 'constant', 0).type(a.dtype) -print(a) \ No newline at end of file diff --git a/sjlee/train_pseudo.py b/sjlee/train_pseudo.py deleted file mode 100644 index 3c09bf4..0000000 --- a/sjlee/train_pseudo.py +++ /dev/null @@ -1,41 +0,0 @@ - -""" -1. config 아래와 같이 설정 -2. weights은 상황에 맞게 indoor, outdoor 설정해주어야 함 -config = { - 'superpoint': { - 'nms_radius': 4, - 'keypoint_threshold': 0.005, - 'max_keypoints': 1024 - }, - 'superglue': { - 'weights': 'outdoor', - 'sinkhorn_iterations': 20, - 'match_threshold':0.2 - } - } -""" - -""" -# start training -for epoch in range(1, opt.epoch+1): - epoch_loss = 0 - superglue.double().train() - for i, pred in enumerate(train_loader): - for k in pred: - if k != 'file_name' and k!='image0' and k!='image1': - if type(pred[k]) == torch.Tensor: - pred[k] = Variable(pred[k].cuda()) - else: - pred[k] = Variable(torch.stack(pred[k]).cuda()) - - # =========== new code =============== # - scores, data = superglue(pred) - loss = loss_superglue(scores, data['all_matches'].permute(1, 2, 0)) - - for k, v in pred.items(): - pred[k] = v[0] - pred = {**pred, **data, **{'loss', loss}} - - # ... keep going -""" \ No newline at end of file diff --git a/sjlee_backup/IMC.py b/sjlee_backup/IMC.py deleted file mode 100644 index a7cbe25..0000000 --- a/sjlee_backup/IMC.py +++ /dev/null @@ -1,212 +0,0 @@ - -import os -import sys - -import torch -import torch.nn as nn -import torch.nn.functional as F - -import numpy as np -from functools import partial - -from pydoc import source_synopsis -from sjlee_backup.superglue2 import SuperGlue, normalize_keypoints, arange_like, log_optimal_transport -from sjlee_backup.losssuperglue import loss_superglue - -sys.path.append(os.path.join(os.path.dirname(__file__), 'cats')) -from sjlee.cats.cats import TransformerAggregator ########################################################### - -def dfs_freeze(model): - for name, child in model.named_children(): - for param in child.parameters(): - param.requires_grad = False - - dfs_freeze(child) - -def softmax_with_temperature(x, beta=2., d = 1): - r'''SFNet: Learning Object-aware Semantic Flow (Lee et al.)''' - M, _ = x.max(dim=d, keepdim=True) - x = x - M # subtract maximum value for stability - exp_x = torch.exp(x/beta) - exp_x_sum = exp_x.sum(dim=d, keepdim=True) - return exp_x / exp_x_sum - -# positional embedding 필요한가? -# M * N 크기가 다 다른 문제 -class SimpleSuperCATs(SuperGlue): - def __init__(self, - config, - feature_size=32, - feature_proj_dim=128, - depth=4, - num_heads=4, - mlp_ratio=4, - ): - super().__init__(config) - - # freeze superglue's layers - dfs_freeze(self.kenc) - dfs_freeze(self.gnn) - dfs_freeze(self.final_proj) - - self.feature_size = feature_size - self.feature_proj_dim = feature_proj_dim - self.decoder_embed_dim = self.feature_size ** 2 - - self.decoder = TransformerAggregator( - img_size=self.feature_size, embed_dim=self.decoder_embed_dim, depth=depth, num_heads=num_heads, - mlp_ratio=mlp_ratio, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), - num_hyperpixel=1 - ) - - def forward(self, data): - """Run SuperGlue on a pair of keypoints and descriptors""" - with torch.no_grad(): - - desc0, desc1 = data['descriptors0'], data['descriptors1'] - kpts0, kpts1 = data['keypoints0'], data['keypoints1'] - - - - desc0 = desc0.transpose(0,1) - desc1 = desc1.transpose(0,1) - kpts0 = torch.reshape(kpts0, (1, -1, 2)) - kpts1 = torch.reshape(kpts1, (1, -1, 2)) - - if kpts0.shape[1] == 0 or kpts1.shape[1] == 0: # no keypoints - shape0, shape1 = kpts0.shape[:-1], kpts1.shape[:-1] - return [], { - 'matches0': kpts0.new_full(shape0, -1, dtype=torch.int)[0], - 'matches1': kpts1.new_full(shape1, -1, dtype=torch.int)[0], - 'matching_scores0': kpts0.new_zeros(shape0)[0], - 'matching_scores1': kpts1.new_zeros(shape1)[0], - 'skip_train': True - } - - # Keypoint normalization. - kpts0 = normalize_keypoints(kpts0, data['image0'].shape) - kpts1 = normalize_keypoints(kpts1, data['image1'].shape) - - # Keypoint MLP encoder. - desc0 = desc0 + self.kenc(kpts0, torch.transpose(data['scores0'], 0, 1)) - desc1 = desc1 + self.kenc(kpts1, torch.transpose(data['scores1'], 0, 1)) - - # Multi-layer Transformer network. - desc0, desc1 = self.gnn(desc0, desc1) - - # Final MLP projection. - mdesc0, mdesc1 = self.final_proj(desc0), self.final_proj(desc1) - - # Compute matching descriptor distance. - scores = torch.einsum('bdn,bdm->bnm', mdesc0, mdesc1) - scores = scores / self.config['descriptor_dim']**.5 - - #scores[scores>30.] = 30. - #scores[scores<-80.] = -80. - #print(scores.max(), scores.min()) - - b, m, n = scores.shape - max_keypoints = self.feature_size ** 2 - if m + n < max_keypoints *2: - p2d = (0, max_keypoints-n, 0, max_keypoints-m) - scores = F.pad(scores, p2d, 'constant', 0.).type(scores.dtype) - - #print(scores.max(), scores.min()) - scores = self.decoder(scores[:, None, :, :]) - - scores = (softmax_with_temperature(scores)) - - - - #scores = self.decoder(scores[:, None, :, :]) - #print(scores.max(), scores.min()) - scores = scores[:, :m, :n] - #print(scores.max(), scores.min()) - - # Run the optimal transport. - ''' - scores = log_optimal_transport( - scores, self.bin_score, - iters=self.config['sinkhorn_iterations']) - ''' - # Get the matches with score above "match_threshold". - max0, max1 = scores[:, :, :].max(2), scores[:, :, :].max(1) - indices0, indices1 = max0.indices, max1.indices - mutual0 = arange_like(indices0, 1)[None] == indices1.gather(1, indices0) - mutual1 = arange_like(indices1 , 1)[None] == indices0.gather(1, indices1) - zero = scores.new_tensor(0) - mscores0 = torch.where(mutual0, max0.values, zero) - mscores1 = torch.where(mutual1, mscores0.gather(1, indices1), zero) - valid0 = mutual0 & (mscores0 > self.config['match_threshold']) - valid1 = mutual1 & valid0.gather(1, indices1) - indices0 = torch.where(valid0, indices0, indices0.new_tensor(-1)) - indices1 = torch.where(valid1, indices1, indices1.new_tensor(-1)) - - #print(mscores0.min(), mscores0.max()) - #print(mscores0) - - return scores, { - 'matches0': indices0[0], # use -1 for invalid match - 'matches1': indices1[0], # use -1 for invalid match - 'matching_scores0': mscores0[0], - 'matching_scores1': mscores1[0], - 'skip_train': False - } - - -if __name__ == '__main__': - from superpoint import SuperPoint - - config = { - 'superpoint': { - 'nms_radius': 4, - 'keypoint_threshold': 0.005, - 'max_keypoints': 1024 - }, - 'superglue': { - 'weights': 'outdoor', - 'sinkhorn_iterations': 20, - 'match_threshold':0.2 - } - } - - """ - data = { - 'image0': torch.randn(1, 1, 512, 512), - 'image1': torch.randn(1, 1, 512, 512) - } - - superpoint = SuperPoint(config.get('superpoint', {})) - - output1 = superpoint({'image': data['image0']}) - output2 = superpoint({'image': data['image1']}) - - pred = {} - - pred = {**pred, **{k+'0': v for k, v in output1.items()}} - pred = {**pred, **{k+'1': v for k, v in output2.items()}} - - data = {**data, **pred} - - for k in data: - if isinstance(data[k], (list, tuple)): - data[k] = torch.stack(data[k]) - """ - - pred = { - 'keypoints0' : torch.randn(1, 1, 484, 2), - 'keypoints1' : torch.randn(1, 1, 484, 2), - 'descriptors0' : torch.randn(256, 1, 484), - 'descriptors1' : torch.randn(256, 1, 484), - 'scores0' : torch.randn(484, 1), - 'scores1' : torch.randn(484, 1), - 'image0' : torch.randn(1, 1, 512, 512), - 'image1' : torch.randn(1, 1, 512, 512), - # 'all_matches' : torch.randn(2, 1, 1248) - } - - superglue = SimpleSuperCATs(config.get('superglue', {})) - scores, output = superglue(pred) - - # loss = loss_superglue(scores, pred['all_matches'].permute(1, 2, 0)) - # print(loss) \ No newline at end of file diff --git a/sjlee_backup/IMCsuperglue.py b/sjlee_backup/IMCsuperglue.py deleted file mode 100644 index b7eb471..0000000 --- a/sjlee_backup/IMCsuperglue.py +++ /dev/null @@ -1,192 +0,0 @@ - -import os -import sys - -import torch -import torch.nn as nn -import torch.nn.functional as F - -import numpy as np -from functools import partial - -from pydoc import source_synopsis -from sjlee_backup.superglue2 import SuperGlue, normalize_keypoints, arange_like, log_optimal_transport -from sjlee_backup.losssuperglue import loss_superglue - -sys.path.append(os.path.join(os.path.dirname(__file__), 'cats')) -from cats import TransformerAggregator - -def dfs_freeze(model): - for name, child in model.named_children(): - for param in child.parameters(): - param.requires_grad = False - - dfs_freeze(child) - -def softmax_with_temperature(x, beta=2., d = 1): - r'''SFNet: Learning Object-aware Semantic Flow (Lee et al.)''' - M, _ = x.max(dim=d, keepdim=True) - x = x - M # subtract maximum value for stability - exp_x = torch.exp(x/beta) - exp_x_sum = exp_x.sum(dim=d, keepdim=True) - return exp_x / exp_x_sum - -# positional embedding 필요한가? -# M * N 크기가 다 다른 문제 -class SimpleSuperCATs(SuperGlue): - def __init__(self, - config, - feature_size=32, - feature_proj_dim=128, - depth=4, - num_heads=4, - mlp_ratio=4, - ): - super().__init__(config) - - # freeze superglue's layers - dfs_freeze(self.kenc) - dfs_freeze(self.gnn) - dfs_freeze(self.final_proj) - - self.feature_size = feature_size - self.feature_proj_dim = feature_proj_dim - self.decoder_embed_dim = self.feature_size ** 2 - - self.decoder = TransformerAggregator( - img_size=self.feature_size, embed_dim=self.decoder_embed_dim, depth=depth, num_heads=num_heads, - mlp_ratio=mlp_ratio, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), - num_hyperpixel=1 - ) - - def forward(self, data): - """Run SuperGlue on a pair of keypoints and descriptors""" - with torch.no_grad(): - - desc0, desc1 = data['descriptors0'], data['descriptors1'] - kpts0, kpts1 = data['keypoints0'], data['keypoints1'] - - - - desc0 = desc0.transpose(0,1) - desc1 = desc1.transpose(0,1) - kpts0 = torch.reshape(kpts0, (1, -1, 2)) - kpts1 = torch.reshape(kpts1, (1, -1, 2)) - - if kpts0.shape[1] == 0 or kpts1.shape[1] == 0: # no keypoints - shape0, shape1 = kpts0.shape[:-1], kpts1.shape[:-1] - return [], { - 'matches0': kpts0.new_full(shape0, -1, dtype=torch.int)[0], - 'matches1': kpts1.new_full(shape1, -1, dtype=torch.int)[0], - 'matching_scores0': kpts0.new_zeros(shape0)[0], - 'matching_scores1': kpts1.new_zeros(shape1)[0], - 'skip_train': True - } - - # Keypoint normalization. - kpts0 = normalize_keypoints(kpts0, data['image0'].shape) - kpts1 = normalize_keypoints(kpts1, data['image1'].shape) - - # Keypoint MLP encoder. - desc0 = desc0 + self.kenc(kpts0, torch.transpose(data['scores0'], 0, 1)) - desc1 = desc1 + self.kenc(kpts1, torch.transpose(data['scores1'], 0, 1)) - - # Multi-layer Transformer network. - desc0, desc1 = self.gnn(desc0, desc1) - - # Final MLP projection. - mdesc0, mdesc1 = self.final_proj(desc0), self.final_proj(desc1) - - # Compute matching descriptor distance. - scores = torch.einsum('bdn,bdm->bnm', mdesc0, mdesc1) - scores = scores / self.config['descriptor_dim']**.5 - - #print(scores.max(), scores.min()) - - # Run the optimal transport. - - scores = log_optimal_transport( - scores, self.bin_score, - iters=self.config['sinkhorn_iterations']) - - # Get the matches with score above "match_threshold". - max0, max1 = scores[:, :-1, :-1].max(2), scores[:, :-1, :-1].max(1) - indices0, indices1 = max0.indices, max1.indices - mutual0 = arange_like(indices0, 1)[None] == indices1.gather(1, indices0) - mutual1 = arange_like(indices1 , 1)[None] == indices0.gather(1, indices1) - zero = scores.new_tensor(0) - mscores0 = torch.where(mutual0, max0.values.exp(), zero) - mscores1 = torch.where(mutual1, mscores0.gather(1, indices1), zero) - valid0 = mutual0 & (mscores0 > self.config['match_threshold']) - valid1 = mutual1 & valid0.gather(1, indices1) - indices0 = torch.where(valid0, indices0, indices0.new_tensor(-1)) - indices1 = torch.where(valid1, indices1, indices1.new_tensor(-1)) - - #print(mscores0.min(), mscores0.max()) - #print(mscores0) - - return scores, { - 'matches0': indices0[0], # use -1 for invalid match - 'matches1': indices1[0], # use -1 for invalid match - 'matching_scores0': mscores0[0], - 'matching_scores1': mscores1[0], - 'skip_train': False - } - - -if __name__ == '__main__': - from superpoint import SuperPoint - - config = { - 'superpoint': { - 'nms_radius': 4, - 'keypoint_threshold': 0.005, - 'max_keypoints': 1024 - }, - 'superglue': { - 'weights': 'outdoor', - 'sinkhorn_iterations': 20, - 'match_threshold':0.2 - } - } - - """ - data = { - 'image0': torch.randn(1, 1, 512, 512), - 'image1': torch.randn(1, 1, 512, 512) - } - - superpoint = SuperPoint(config.get('superpoint', {})) - - output1 = superpoint({'image': data['image0']}) - output2 = superpoint({'image': data['image1']}) - - pred = {} - - pred = {**pred, **{k+'0': v for k, v in output1.items()}} - pred = {**pred, **{k+'1': v for k, v in output2.items()}} - - data = {**data, **pred} - - for k in data: - if isinstance(data[k], (list, tuple)): - data[k] = torch.stack(data[k]) - """ - - pred = { - 'keypoints0' : torch.randn(1, 1, 484, 2), - 'keypoints1' : torch.randn(1, 1, 484, 2), - 'descriptors0' : torch.randn(256, 1, 484), - 'descriptors1' : torch.randn(256, 1, 484), - 'scores0' : torch.randn(484, 1), - 'scores1' : torch.randn(484, 1), - 'image0' : torch.randn(1, 1, 512, 512), - 'image1' : torch.randn(1, 1, 512, 512), - # 'all_matches' : torch.randn(2, 1, 1248) - } - - superglue = SimpleSuperCATs(config.get('superglue', {})) - scores, output = superglue(pred) - - # loss = loss_superglue(scores, pred['all_matches'].permute(1, 2, 0)) - # print(loss) \ No newline at end of file diff --git "a/sjlee_backup/IMC\353\202\230\354\244\221\354\227\220.py" "b/sjlee_backup/IMC\353\202\230\354\244\221\354\227\220.py" deleted file mode 100644 index 81129e1..0000000 --- "a/sjlee_backup/IMC\353\202\230\354\244\221\354\227\220.py" +++ /dev/null @@ -1,221 +0,0 @@ - -import os -import sys - -import torch -import torch.nn as nn -import torch.nn.functional as F - -import numpy as np -from functools import partial - -from pydoc import source_synopsis -from sjlee_backup.superglue2 import SuperGlue, normalize_keypoints, arange_like, log_optimal_transport -from sjlee_backup.loss import loss_superglue - -sys.path.append(os.path.join(os.path.dirname(__file__), 'cats')) -from cats import TransformerAggregator - -def dfs_freeze(model): - for name, child in model.named_children(): - for param in child.parameters(): - param.requires_grad = False - - dfs_freeze(child) - -def softmax_with_temperature(x, beta=2., d = 1): - r'''SFNet: Learning Object-aware Semantic Flow (Lee et al.)''' - M, _ = x.max(dim=d, keepdim=True) - x = x - M # subtract maximum value for stability - exp_x = torch.exp(x/beta) - exp_x_sum = exp_x.sum(dim=d, keepdim=True) - return exp_x / exp_x_sum - -# positional embedding 필요한가? -# M * N 크기가 다 다른 문제 -class SimpleSuperCATs(SuperGlue): - def __init__(self, - config, - feature_size=32, - feature_proj_dim=128, - depth=4, - num_heads=4, - mlp_ratio=4, - ): - super().__init__(config) - - # freeze superglue's layers - dfs_freeze(self.kenc) - dfs_freeze(self.gnn) - dfs_freeze(self.final_proj) - - self.feature_size = feature_size - self.feature_proj_dim = feature_proj_dim - self.decoder_embed_dim = self.feature_size ** 2 - - self.decoder = TransformerAggregator( - img_size=self.feature_size, embed_dim=self.decoder_embed_dim, depth=depth, num_heads=num_heads, - mlp_ratio=mlp_ratio, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), - num_hyperpixel=1 - ) - - def forward(self, data): - """Run SuperGlue on a pair of keypoints and descriptors""" - with torch.no_grad(): - - desc0, desc1 = data['descriptors0'], data['descriptors1'] - kpts0, kpts1 = data['keypoints0'], data['keypoints1'] - - - - desc0 = desc0.transpose(0,1) - desc1 = desc1.transpose(0,1) - kpts0 = torch.reshape(kpts0, (1, -1, 2)) - kpts1 = torch.reshape(kpts1, (1, -1, 2)) - - if kpts0.shape[1] == 0 or kpts1.shape[1] == 0: # no keypoints - shape0, shape1 = kpts0.shape[:-1], kpts1.shape[:-1] - return [], { - 'matches0': kpts0.new_full(shape0, -1, dtype=torch.int)[0], - 'matches1': kpts1.new_full(shape1, -1, dtype=torch.int)[0], - 'matching_scores0': kpts0.new_zeros(shape0)[0], - 'matching_scores1': kpts1.new_zeros(shape1)[0], - 'skip_train': True - } - - - - # Keypoint normalization. - kpts0 = normalize_keypoints(kpts0, data['image0'].shape) - kpts1 = normalize_keypoints(kpts1, data['image1'].shape) - - # Keypoint MLP encoder. - desc0 = desc0 + self.kenc(kpts0, torch.transpose(data['scores0'], 0, 1)) - desc1 = desc1 + self.kenc(kpts1, torch.transpose(data['scores1'], 0, 1)) - - - # Multi-layer Transformer network. - desc0, desc1 = self.gnn(desc0, desc1) - - # Final MLP projection. - mdesc0, mdesc1 = self.final_proj(desc0), self.final_proj(desc1) - - # Compute matching descriptor distance. - scores = torch.einsum('bdn,bdm->bnm', mdesc0, mdesc1) - scores = scores / self.config['descriptor_dim']**.5 - - - - b, m, n = scores.shape - max_keypoints = self.feature_size ** 2 - if m + n < max_keypoints *2: - p2d = (0, max_keypoints-n, 0, max_keypoints-m) - scores = F.pad(scores, p2d, 'constant', 0.).type(scores.dtype) - - - scores = self.decoder(scores[:, None, :, :]) - scores = scores[:, :m, :n] - - #print(scores) - thr = 80. - scores[scores<-thr] = -thr - scores[scores>thr] = thr - #print(scores) - scores = (softmax_with_temperature(scores)) - - - # Run the optimal transport. - ''' - scores = log_optimal_transport( - scores, self.bin_score, - iters=self.config['sinkhorn_iterations']) - scores[scores<-100.] = -100. - scores = scores[:, :-1, :-1].exp() - ''' - - #print(scores) - - #print(scores.min(), scores.max()) - #print(scores.exp().min(), scores.exp().max()) - - # Get the matches with score above "match_threshold". - max0, max1 = scores[:, :, :].max(2), scores[:, :, :].max(1) - indices0, indices1 = max0.indices, max1.indices - mutual0 = arange_like(indices0, 1)[None] == indices1.gather(1, indices0) - mutual1 = arange_like(indices1 , 1)[None] == indices0.gather(1, indices1) - zero = scores.new_tensor(0) - mscores0 = torch.where(mutual0, max0.values, zero) - mscores1 = torch.where(mutual1, mscores0.gather(1, indices1), zero) - valid0 = mutual0 & (mscores0 > self.config['match_threshold']) - valid1 = mutual1 & valid0.gather(1, indices1) - indices0 = torch.where(valid0, indices0, indices0.new_tensor(-1)) - indices1 = torch.where(valid1, indices1, indices1.new_tensor(-1)) - - #print(mscores0.min(), mscores0.max()) - #print(mscores0) - - return scores, { - 'matches0': indices0[0], # use -1 for invalid match - 'matches1': indices1[0], # use -1 for invalid match - 'matching_scores0': mscores0[0], - 'matching_scores1': mscores1[0], - 'skip_train': False - } - - -if __name__ == '__main__': - from superpoint import SuperPoint - - config = { - 'superpoint': { - 'nms_radius': 4, - 'keypoint_threshold': 0.005, - 'max_keypoints': 1024 - }, - 'superglue': { - 'weights': 'outdoor', - 'sinkhorn_iterations': 20, - 'match_threshold':0.2 - } - } - - """ - data = { - 'image0': torch.randn(1, 1, 512, 512), - 'image1': torch.randn(1, 1, 512, 512) - } - - superpoint = SuperPoint(config.get('superpoint', {})) - - output1 = superpoint({'image': data['image0']}) - output2 = superpoint({'image': data['image1']}) - - pred = {} - - pred = {**pred, **{k+'0': v for k, v in output1.items()}} - pred = {**pred, **{k+'1': v for k, v in output2.items()}} - - data = {**data, **pred} - - for k in data: - if isinstance(data[k], (list, tuple)): - data[k] = torch.stack(data[k]) - """ - - pred = { - 'keypoints0' : torch.randn(1, 1, 484, 2), - 'keypoints1' : torch.randn(1, 1, 484, 2), - 'descriptors0' : torch.randn(256, 1, 484), - 'descriptors1' : torch.randn(256, 1, 484), - 'scores0' : torch.randn(484, 1), - 'scores1' : torch.randn(484, 1), - 'image0' : torch.randn(1, 1, 512, 512), - 'image1' : torch.randn(1, 1, 512, 512), - # 'all_matches' : torch.randn(2, 1, 1248) - } - - superglue = SimpleSuperCATs(config.get('superglue', {})) - scores, output = superglue(pred) - - # loss = loss_superglue(scores, pred['all_matches'].permute(1, 2, 0)) - # print(loss) \ No newline at end of file diff --git a/sjlee_backup/__pycache__/IMC.cpython-38.pyc b/sjlee_backup/__pycache__/IMC.cpython-38.pyc deleted file mode 100644 index a70c1a1..0000000 Binary files a/sjlee_backup/__pycache__/IMC.cpython-38.pyc and /dev/null differ diff --git a/sjlee_backup/__pycache__/IMC_backup.cpython-38.pyc b/sjlee_backup/__pycache__/IMC_backup.cpython-38.pyc deleted file mode 100644 index 6d0a596..0000000 Binary files a/sjlee_backup/__pycache__/IMC_backup.cpython-38.pyc and /dev/null differ diff --git a/sjlee_backup/__pycache__/IMCcopy.cpython-38.pyc b/sjlee_backup/__pycache__/IMCcopy.cpython-38.pyc deleted file mode 100644 index 4adcea9..0000000 Binary files a/sjlee_backup/__pycache__/IMCcopy.cpython-38.pyc and /dev/null differ diff --git a/sjlee_backup/__pycache__/IMCsuperglue.cpython-38.pyc b/sjlee_backup/__pycache__/IMCsuperglue.cpython-38.pyc deleted file mode 100644 index 1cee602..0000000 Binary files a/sjlee_backup/__pycache__/IMCsuperglue.cpython-38.pyc and /dev/null differ diff --git a/sjlee_backup/__pycache__/loss.cpython-38.pyc b/sjlee_backup/__pycache__/loss.cpython-38.pyc deleted file mode 100644 index 57d4a2b..0000000 Binary files a/sjlee_backup/__pycache__/loss.cpython-38.pyc and /dev/null differ diff --git a/sjlee_backup/__pycache__/losssuperglue.cpython-38.pyc b/sjlee_backup/__pycache__/losssuperglue.cpython-38.pyc deleted file mode 100644 index 81b596e..0000000 Binary files a/sjlee_backup/__pycache__/losssuperglue.cpython-38.pyc and /dev/null differ diff --git a/sjlee_backup/__pycache__/superglue.cpython-38.pyc b/sjlee_backup/__pycache__/superglue.cpython-38.pyc deleted file mode 100644 index 3acd4d8..0000000 Binary files a/sjlee_backup/__pycache__/superglue.cpython-38.pyc and /dev/null differ diff --git a/sjlee_backup/__pycache__/superglue2.cpython-38.pyc b/sjlee_backup/__pycache__/superglue2.cpython-38.pyc deleted file mode 100644 index 8dccc4e..0000000 Binary files a/sjlee_backup/__pycache__/superglue2.cpython-38.pyc and /dev/null differ diff --git a/sjlee_backup/__pycache__/superpoint.cpython-38.pyc b/sjlee_backup/__pycache__/superpoint.cpython-38.pyc deleted file mode 100644 index 262ba3e..0000000 Binary files a/sjlee_backup/__pycache__/superpoint.cpython-38.pyc and /dev/null differ diff --git a/sjlee_backup/cats/__pycache__/cats.cpython-38.pyc b/sjlee_backup/cats/__pycache__/cats.cpython-38.pyc deleted file mode 100644 index 0ce884b..0000000 Binary files a/sjlee_backup/cats/__pycache__/cats.cpython-38.pyc and /dev/null differ diff --git a/sjlee_backup/cats/__pycache__/cats.cpython-39.pyc b/sjlee_backup/cats/__pycache__/cats.cpython-39.pyc deleted file mode 100644 index 5711968..0000000 Binary files a/sjlee_backup/cats/__pycache__/cats.cpython-39.pyc and /dev/null differ diff --git a/sjlee_backup/cats/__pycache__/mod.cpython-38.pyc b/sjlee_backup/cats/__pycache__/mod.cpython-38.pyc deleted file mode 100644 index 23b810b..0000000 Binary files a/sjlee_backup/cats/__pycache__/mod.cpython-38.pyc and /dev/null differ diff --git a/sjlee_backup/cats/cats.py b/sjlee_backup/cats/cats.py deleted file mode 100644 index e85d793..0000000 --- a/sjlee_backup/cats/cats.py +++ /dev/null @@ -1,404 +0,0 @@ -import os -import sys -from operator import add -from functools import reduce, partial - -import torch -import torch.nn as nn -import torch.nn.functional as F -import numpy as np - -import torchvision.models as models - -from feature_backbones import resnet -from mod import FeatureL2Norm, unnormalise_and_convert_mapping_to_flow - -''' -Modified timm library Vision Transformer implementation -https://github.com/rwightman/pytorch-image-models -''' - -# ================= timm functions START ================= # - -import math -import warnings - -def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True): - """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). - This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, - the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... - See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for - changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use - 'survival rate' as the argument. - """ - if drop_prob == 0. or not training: - return x - keep_prob = 1 - drop_prob - shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets - random_tensor = x.new_empty(shape).bernoulli_(keep_prob) - if keep_prob > 0.0 and scale_by_keep: - random_tensor.div_(keep_prob) - return x * random_tensor - -def _no_grad_trunc_normal_(tensor, mean, std, a, b): - # Cut & paste from PyTorch official master until it's in a few official releases - RW - # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf - def norm_cdf(x): - # Computes standard normal cumulative distribution function - return (1. + math.erf(x / math.sqrt(2.))) / 2. - - if (mean < a - 2 * std) or (mean > b + 2 * std): - warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " - "The distribution of values may be incorrect.", - stacklevel=2) - - with torch.no_grad(): - # Values are generated by using a truncated uniform distribution and - # then using the inverse CDF for the normal distribution. - # Get upper and lower cdf values - l = norm_cdf((a - mean) / std) - u = norm_cdf((b - mean) / std) - - # Uniformly fill tensor with values from [l, u], then translate to - # [2l-1, 2u-1]. - tensor.uniform_(2 * l - 1, 2 * u - 1) - - # Use inverse cdf transform for normal distribution to get truncated - # standard normal - tensor.erfinv_() - - # Transform to proper mean, std - tensor.mul_(std * math.sqrt(2.)) - tensor.add_(mean) - - # Clamp to ensure it's in the proper range - tensor.clamp_(min=a, max=b) - return tensor - - -def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): - # type: (Tensor, float, float, float, float) -> Tensor - r"""Fills the input Tensor with values drawn from a truncated - normal distribution. The values are effectively drawn from the - normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` - with values outside :math:`[a, b]` redrawn until they are within - the bounds. The method used for generating the random values works - best when :math:`a \leq \text{mean} \leq b`. - Args: - tensor: an n-dimensional `torch.Tensor` - mean: the mean of the normal distribution - std: the standard deviation of the normal distribution - a: the minimum cutoff value - b: the maximum cutoff value - Examples: - >>> w = torch.empty(3, 5) - >>> nn.init.trunc_normal_(w) - """ - return _no_grad_trunc_normal_(tensor, mean, std, a, b) - -# ================= timm functions END================= # - - - - -class Mlp(nn.Module): - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - -class Attention(nn.Module): - def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): - super().__init__() - self.num_heads = num_heads - head_dim = dim // num_heads - # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights - self.scale = qk_scale or head_dim ** -0.5 - - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - - def forward(self, x): - B, N, C = x.shape - qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) - q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) - - attn = (q @ k.transpose(-2, -1)) * self.scale - attn = attn.softmax(dim=-1) - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(B, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - - -class MultiscaleBlock(nn.Module): - - def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., - drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): - super().__init__() - self.attn = Attention( - dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) - self.attn_multiscale = Attention( - dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) - # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - self.norm1 = norm_layer(dim) - self.norm2 = norm_layer(dim) - self.norm3 = norm_layer(dim) - self.norm4 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - self.mlp2 = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - def forward(self, x): - ''' - Multi-level aggregation - ''' - B, N, H, W = x.shape - if N == 1: - x = x.flatten(0, 1) - a = self.norm1(x) - x = x + self.drop_path(self.attn(self.norm1(x))) - x = x + self.drop_path(self.mlp(self.norm2(x))) - return x.view(B, N, H, W) - x = x.flatten(0, 1) - x = x + self.drop_path(self.attn(self.norm1(x))) - x = x + self.drop_path(self.mlp2(self.norm4(x))) - x = x.view(B, N, H, W).transpose(1, 2).flatten(0, 1) - x = x + self.drop_path(self.attn_multiscale(self.norm3(x))) - x = x.view(B, H, N, W).transpose(1, 2).flatten(0, 1) - x = x + self.drop_path(self.mlp(self.norm2(x))) - x = x.view(B, N, H, W) - return x - - -class TransformerAggregator(nn.Module): - def __init__(self, num_hyperpixel, img_size=224, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, - drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None): - super().__init__() - self.img_size = img_size - self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models - norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) - - self.pos_embed_x = nn.Parameter(torch.zeros(1, num_hyperpixel, 1, img_size, embed_dim // 2)) - self.pos_embed_y = nn.Parameter(torch.zeros(1, num_hyperpixel, img_size, 1, embed_dim // 2)) - self.pos_drop = nn.Dropout(p=drop_rate) - - dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule - self.blocks = nn.Sequential(*[ - MultiscaleBlock( - dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer) - for i in range(depth)]) - - self.proj = nn.Linear(embed_dim, img_size ** 2) - self.norm = norm_layer(embed_dim) - - trunc_normal_(self.pos_embed_x, std=.02) - trunc_normal_(self.pos_embed_y, std=.02) - self.apply(self._init_weights) - - def _init_weights(self, m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - def forward(self, corr): - B = corr.shape[0] - x = corr.clone() - - pos_embed = torch.cat((self.pos_embed_x.repeat(1, 1, self.img_size, 1, 1), self.pos_embed_y.repeat(1, 1, 1, self.img_size, 1)), dim=4) - pos_embed = pos_embed.flatten(2, 3) - - x = x.transpose(-1, -2) + pos_embed - x = self.proj(self.blocks(x)).transpose(-1, -2) + corr # swapping the axis for swapping self-attention. - - x = x + pos_embed - x = self.proj(self.blocks(x)) + corr - - return x.mean(1) - - -class FeatureExtractionHyperPixel(nn.Module): - def __init__(self, hyperpixel_ids, feature_size, freeze=True): - super().__init__() - self.backbone = resnet.resnet101(pretrained=True) - self.feature_size = feature_size - if freeze: - for param in self.backbone.parameters(): - param.requires_grad = False - nbottlenecks = [3, 4, 23, 3] - self.bottleneck_ids = reduce(add, list(map(lambda x: list(range(x)), nbottlenecks))) - self.layer_ids = reduce(add, [[i + 1] * x for i, x in enumerate(nbottlenecks)]) - self.hyperpixel_ids = hyperpixel_ids - - - def forward(self, img): - r"""Extract desired a list of intermediate features""" - - feats = [] - - # Layer 0 - feat = self.backbone.conv1.forward(img) - feat = self.backbone.bn1.forward(feat) - feat = self.backbone.relu.forward(feat) - feat = self.backbone.maxpool.forward(feat) - if 0 in self.hyperpixel_ids: - feats.append(feat.clone()) - - # Layer 1-4 - for hid, (bid, lid) in enumerate(zip(self.bottleneck_ids, self.layer_ids)): - res = feat - feat = self.backbone.__getattr__('layer%d' % lid)[bid].conv1.forward(feat) - feat = self.backbone.__getattr__('layer%d' % lid)[bid].bn1.forward(feat) - feat = self.backbone.__getattr__('layer%d' % lid)[bid].relu.forward(feat) - feat = self.backbone.__getattr__('layer%d' % lid)[bid].conv2.forward(feat) - feat = self.backbone.__getattr__('layer%d' % lid)[bid].bn2.forward(feat) - feat = self.backbone.__getattr__('layer%d' % lid)[bid].relu.forward(feat) - feat = self.backbone.__getattr__('layer%d' % lid)[bid].conv3.forward(feat) - feat = self.backbone.__getattr__('layer%d' % lid)[bid].bn3.forward(feat) - - if bid == 0: - res = self.backbone.__getattr__('layer%d' % lid)[bid].downsample.forward(res) - - feat += res - - if hid + 1 in self.hyperpixel_ids: - feats.append(feat.clone()) - #if hid + 1 == max(self.hyperpixel_ids): - # break - feat = self.backbone.__getattr__('layer%d' % lid)[bid].relu.forward(feat) - - # Up-sample & concatenate features to construct a hyperimage - - """ - for idx, feat in enumerate(feats): - feats[idx] = F.interpolate(feat, self.feature_size, None, 'bilinear', True) - """ - - return feats - - -class CATs(nn.Module): - def __init__(self, - feature_size=16, - feature_proj_dim=128, - depth=4, - num_heads=6, - mlp_ratio=4, - hyperpixel_ids=[0,8,20,21,26,28,29,30], - freeze=True): - super().__init__() - self.feature_size = feature_size - self.feature_proj_dim = feature_proj_dim - self.decoder_embed_dim = self.feature_size ** 2 + self.feature_proj_dim - - channels = [64] + [256] * 3 + [512] * 4 + [1024] * 23 + [2048] * 3 - - self.feature_extraction = FeatureExtractionHyperPixel(hyperpixel_ids, feature_size, freeze) - self.proj = nn.ModuleList([ - nn.Linear(channels[i], self.feature_proj_dim) for i in hyperpixel_ids - ]) - - self.decoder = TransformerAggregator( - img_size=self.feature_size, embed_dim=self.decoder_embed_dim, depth=depth, num_heads=num_heads, - mlp_ratio=mlp_ratio, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), - num_hyperpixel=len(hyperpixel_ids)) - - self.l2norm = FeatureL2Norm() - - self.x_normal = np.linspace(-1,1,self.feature_size) - self.x_normal = nn.Parameter(torch.tensor(self.x_normal, dtype=torch.float, requires_grad=False)) - self.y_normal = np.linspace(-1,1,self.feature_size) - self.y_normal = nn.Parameter(torch.tensor(self.y_normal, dtype=torch.float, requires_grad=False)) - - def softmax_with_temperature(self, x, beta, d = 1): - r'''SFNet: Learning Object-aware Semantic Flow (Lee et al.)''' - M, _ = x.max(dim=d, keepdim=True) - x = x - M # subtract maximum value for stability - exp_x = torch.exp(x/beta) - exp_x_sum = exp_x.sum(dim=d, keepdim=True) - return exp_x / exp_x_sum - - def soft_argmax(self, corr, beta=0.02): - r'''SFNet: Learning Object-aware Semantic Flow (Lee et al.)''' - b,_,h,w = corr.size() - - corr = self.softmax_with_temperature(corr, beta=beta, d=1) - corr = corr.view(-1,h,w,h,w) # (target hxw) x (source hxw) - - grid_x = corr.sum(dim=1, keepdim=False) # marginalize to x-coord. - x_normal = self.x_normal.expand(b,w) - x_normal = x_normal.view(b,w,1,1) - grid_x = (grid_x*x_normal).sum(dim=1, keepdim=True) # b x 1 x h x w - - grid_y = corr.sum(dim=2, keepdim=False) # marginalize to y-coord. - y_normal = self.y_normal.expand(b,h) - y_normal = y_normal.view(b,h,1,1) - grid_y = (grid_y*y_normal).sum(dim=1, keepdim=True) # b x 1 x h x w - return grid_x, grid_y - - def mutual_nn_filter(self, correlation_matrix): - r"""Mutual nearest neighbor filtering (Rocco et al. NeurIPS'18)""" - corr_src_max = torch.max(correlation_matrix, dim=3, keepdim=True)[0] - corr_trg_max = torch.max(correlation_matrix, dim=2, keepdim=True)[0] - corr_src_max[corr_src_max == 0] += 1e-30 - corr_trg_max[corr_trg_max == 0] += 1e-30 - - corr_src = correlation_matrix / corr_src_max - corr_trg = correlation_matrix / corr_trg_max - - return correlation_matrix * (corr_src * corr_trg) - - def corr(self, src, trg): - return src.flatten(2).transpose(-1, -2) @ trg.flatten(2) - - def forward(self, target, source): - B, _, H, W = target.size() - - src_feats = self.feature_extraction(source) - tgt_feats = self.feature_extraction(target) - - corrs = [] - src_feats_proj = [] - tgt_feats_proj = [] - for i, (src, tgt) in enumerate(zip(src_feats, tgt_feats)): - corr = self.corr(self.l2norm(src), self.l2norm(tgt)) - corrs.append(corr) - src_feats_proj.append(self.proj[i](src.flatten(2).transpose(-1, -2))) - tgt_feats_proj.append(self.proj[i](tgt.flatten(2).transpose(-1, -2))) - - src_feats = torch.stack(src_feats_proj, dim=1) - tgt_feats = torch.stack(tgt_feats_proj, dim=1) - corr = torch.stack(corrs, dim=1) - - corr = self.mutual_nn_filter(corr) - - refined_corr = self.decoder(corr, src_feats, tgt_feats) - - grid_x, grid_y = self.soft_argmax(refined_corr.view(B, -1, self.feature_size, self.feature_size)) - - flow = torch.cat((grid_x, grid_y), dim=1) - flow = unnormalise_and_convert_mapping_to_flow(flow) - - return flow diff --git a/sjlee_backup/cats/feature_backbones/__pycache__/resnet.cpython-38.pyc b/sjlee_backup/cats/feature_backbones/__pycache__/resnet.cpython-38.pyc deleted file mode 100644 index 29a0c6e..0000000 Binary files a/sjlee_backup/cats/feature_backbones/__pycache__/resnet.cpython-38.pyc and /dev/null differ diff --git a/sjlee_backup/cats/feature_backbones/resnet.py b/sjlee_backup/cats/feature_backbones/resnet.py deleted file mode 100644 index 2c94e68..0000000 --- a/sjlee_backup/cats/feature_backbones/resnet.py +++ /dev/null @@ -1,342 +0,0 @@ -import torch -import torch.nn as nn -#from .utils import load_state_dict_from_url -try: - from torch.hub import load_state_dict_from_url -except ImportError: - from torch.utils.model_zoo import load_url as load_state_dict_from_url - - -__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', - 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', - 'wide_resnet50_2', 'wide_resnet101_2'] - - -model_urls = { - 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', - 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', - 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', - 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', - 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', - 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', - 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', - 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth', - 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth', -} - - -def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): - """3x3 convolution with padding""" - return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, - padding=dilation, groups=groups, bias=False, dilation=dilation) - - -def conv1x1(in_planes, out_planes, stride=1): - """1x1 convolution""" - return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) - - -class BasicBlock(nn.Module): - expansion = 1 - __constants__ = ['downsample'] - - def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, - base_width=64, dilation=1, norm_layer=None): - super(BasicBlock, self).__init__() - if norm_layer is None: - norm_layer = nn.BatchNorm2d - if groups != 1 or base_width != 64: - raise ValueError('BasicBlock only supports groups=1 and base_width=64') - if dilation > 1: - raise NotImplementedError("Dilation > 1 not supported in BasicBlock") - # Both self.conv1 and self.downsample layers downsample the input when stride != 1 - self.conv1 = conv3x3(inplanes, planes, stride) - self.bn1 = norm_layer(planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = conv3x3(planes, planes) - self.bn2 = norm_layer(planes) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - identity = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - out = self.relu(out) - - return out - - -class Bottleneck(nn.Module): - expansion = 4 - __constants__ = ['downsample'] - - def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, - base_width=64, dilation=1, norm_layer=None): - super(Bottleneck, self).__init__() - if norm_layer is None: - norm_layer = nn.BatchNorm2d - width = int(planes * (base_width / 64.)) * groups - # Both self.conv2 and self.downsample layers downsample the input when stride != 1 - self.conv1 = conv1x1(inplanes, width) - self.bn1 = norm_layer(width) - self.conv2 = conv3x3(width, width, stride, groups, dilation) - self.bn2 = norm_layer(width) - self.conv3 = conv1x1(width, planes * self.expansion) - self.bn3 = norm_layer(planes * self.expansion) - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - identity = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - out = self.relu(out) - - return out - - -class ResNet(nn.Module): - - def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, - groups=1, width_per_group=64, replace_stride_with_dilation=None, - norm_layer=None): - super(ResNet, self).__init__() - if norm_layer is None: - norm_layer = nn.BatchNorm2d - self._norm_layer = norm_layer - - self.inplanes = 64 - self.dilation = 1 - if replace_stride_with_dilation is None: - # each element in the tuple indicates if we should replace - # the 2x2 stride with a dilated convolution instead - replace_stride_with_dilation = [False, False, False] - if len(replace_stride_with_dilation) != 3: - raise ValueError("replace_stride_with_dilation should be None " - "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) - self.groups = groups - self.base_width = width_per_group - self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, - bias=False) - self.bn1 = norm_layer(self.inplanes) - self.relu = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - self.layer1 = self._make_layer(block, 64, layers[0]) - self.layer2 = self._make_layer(block, 128, layers[1], stride=2, - dilate=replace_stride_with_dilation[0]) - self.layer3 = self._make_layer(block, 256, layers[2], stride=2, - dilate=replace_stride_with_dilation[1]) - self.layer4 = self._make_layer(block, 512, layers[3], stride=2, - dilate=replace_stride_with_dilation[2]) - self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) - self.fc = nn.Linear(512 * block.expansion, num_classes) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - # Zero-initialize the last BN in each residual branch, - # so that the residual branch starts with zeros, and each residual block behaves like an identity. - # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 - if zero_init_residual: - for m in self.modules(): - if isinstance(m, Bottleneck): - nn.init.constant_(m.bn3.weight, 0) - elif isinstance(m, BasicBlock): - nn.init.constant_(m.bn2.weight, 0) - - def _make_layer(self, block, planes, blocks, stride=1, dilate=False): - norm_layer = self._norm_layer - downsample = None - previous_dilation = self.dilation - if dilate: - self.dilation *= stride - stride = 1 - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - conv1x1(self.inplanes, planes * block.expansion, stride), - norm_layer(planes * block.expansion), - ) - - layers = [] - layers.append(block(self.inplanes, planes, stride, downsample, self.groups, - self.base_width, previous_dilation, norm_layer)) - self.inplanes = planes * block.expansion - for _ in range(1, blocks): - layers.append(block(self.inplanes, planes, groups=self.groups, - base_width=self.base_width, dilation=self.dilation, - norm_layer=norm_layer)) - - return nn.Sequential(*layers) - - def _forward(self, x): - x = self.conv1(x) - print(x.shape) - x = self.bn1(x) - x = self.relu(x) - x = self.maxpool(x) - - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - - x = self.avgpool(x) - x = torch.flatten(x, 1) - x = self.fc(x) - - return x - - # Allow for accessing forward method in a inherited class - forward = _forward - - -def _resnet(arch, block, layers, pretrained, progress, **kwargs): - model = ResNet(block, layers, **kwargs) - if pretrained: - state_dict = load_state_dict_from_url(model_urls[arch], - progress=progress) - model.load_state_dict(state_dict) - return model - - -def resnet18(pretrained=False, progress=True, **kwargs): - r"""ResNet-18 model from - `"Deep Residual Learning for Image Recognition" `_ - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, - **kwargs) - - -def resnet34(pretrained=False, progress=True, **kwargs): - r"""ResNet-34 model from - `"Deep Residual Learning for Image Recognition" `_ - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, - **kwargs) - - -def resnet50(pretrained=False, progress=True, **kwargs): - r"""ResNet-50 model from - `"Deep Residual Learning for Image Recognition" `_ - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, - **kwargs) - - -def resnet101(pretrained=False, progress=True, **kwargs): - r"""ResNet-101 model from - `"Deep Residual Learning for Image Recognition" `_ - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, - **kwargs) - - -def resnet152(pretrained=False, progress=True, **kwargs): - r"""ResNet-152 model from - `"Deep Residual Learning for Image Recognition" `_ - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, - **kwargs) - - -def resnext50_32x4d(pretrained=False, progress=True, **kwargs): - r"""ResNeXt-50 32x4d model from - `"Aggregated Residual Transformation for Deep Neural Networks" `_ - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - kwargs['groups'] = 32 - kwargs['width_per_group'] = 4 - return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], - pretrained, progress, **kwargs) - - -def resnext101_32x8d(pretrained=False, progress=True, **kwargs): - r"""ResNeXt-101 32x8d model from - `"Aggregated Residual Transformation for Deep Neural Networks" `_ - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - kwargs['groups'] = 32 - kwargs['width_per_group'] = 8 - return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], - pretrained, progress, **kwargs) - - -def wide_resnet50_2(pretrained=False, progress=True, **kwargs): - r"""Wide ResNet-50-2 model from - `"Wide Residual Networks" `_ - The model is the same as ResNet except for the bottleneck number of channels - which is twice larger in every block. The number of channels in outer 1x1 - convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 - channels, and in Wide ResNet-50-2 has 2048-1024-2048. - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - kwargs['width_per_group'] = 64 * 2 - return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], - pretrained, progress, **kwargs) - - -def wide_resnet101_2(pretrained=False, progress=True, **kwargs): - r"""Wide ResNet-101-2 model from - `"Wide Residual Networks" `_ - The model is the same as ResNet except for the bottleneck number of channels - which is twice larger in every block. The number of channels in outer 1x1 - convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 - channels, and in Wide ResNet-50-2 has 2048-1024-2048. - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - kwargs['width_per_group'] = 64 * 2 - return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], - pretrained, progress, **kwargs) \ No newline at end of file diff --git a/sjlee_backup/cats/mod.py b/sjlee_backup/cats/mod.py deleted file mode 100644 index 7ce21fa..0000000 --- a/sjlee_backup/cats/mod.py +++ /dev/null @@ -1,213 +0,0 @@ -import torch -import torch.nn as nn -import numpy as np -from torch.autograd import Variable - -r''' -Copy-pasted from GLU-Net -https://github.com/PruneTruong/GLU-Net -''' - - -def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, batch_norm=False): - if batch_norm: - return nn.Sequential( - nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, - padding=padding, dilation=dilation, bias=True), - nn.BatchNorm2d(out_planes), - nn.LeakyReLU(0.1, inplace=True)) - else: - return nn.Sequential( - nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, - padding=padding, dilation=dilation, bias=True), - nn.LeakyReLU(0.1)) - - -def predict_flow(in_planes): - return nn.Conv2d(in_planes,2,kernel_size=3,stride=1,padding=1,bias=True) - - -def deconv(in_planes, out_planes, kernel_size=4, stride=2, padding=1): - return nn.ConvTranspose2d(in_planes, out_planes, kernel_size, stride, padding, bias=True) - - -def unnormalise_and_convert_mapping_to_flow(map): - # here map is normalised to -1;1 - # we put it back to 0,W-1, then convert it to flow - B, C, H, W = map.size() - mapping = torch.zeros_like(map) - # mesh grid - mapping[:,0,:,:] = (map[:, 0, :, :].float().clone() + 1) * (W - 1) / 2.0 # unormalise - mapping[:,1,:,:] = (map[:, 1, :, :].float().clone() + 1) * (H - 1) / 2.0 # unormalise - - xx = torch.arange(0, W).view(1,-1).repeat(H,1) - yy = torch.arange(0, H).view(-1,1).repeat(1,W) - xx = xx.view(1,1,H,W).repeat(B,1,1,1) - yy = yy.view(1,1,H,W).repeat(B,1,1,1) - grid = torch.cat((xx,yy),1).float() - - if mapping.is_cuda: - grid = grid.cuda() - flow = mapping - grid - return flow - - -class CorrelationVolume(nn.Module): - """ - Implementation by Ignacio Rocco - paper: https://arxiv.org/abs/1703.05593 - project: https://github.com/ignacio-rocco/cnngeometric_pytorch - """ - - def __init__(self): - super(CorrelationVolume, self).__init__() - - def forward(self, feature_A, feature_B): - b, c, h, w = feature_A.size() - - # reshape features for matrix multiplication - feature_A = feature_A.transpose(2, 3).contiguous().view(b, c, h * w) # shape (b,c,h*w) - feature_B = feature_B.view(b, c, h * w).transpose(1, 2) # shape (b,h*w,c) - feature_mul = torch.bmm(feature_B, feature_A) # shape (b,h*w,h*w) - correlation_tensor = feature_mul.view(b, h, w, h * w).transpose(2, 3).transpose(1, 2) - return correlation_tensor # shape (b,h*w,h,w) - - -class FeatureL2Norm(nn.Module): - """ - Implementation by Ignacio Rocco - paper: https://arxiv.org/abs/1703.05593 - project: https://github.com/ignacio-rocco/cnngeometric_pytorch - """ - def __init__(self): - super(FeatureL2Norm, self).__init__() - - def forward(self, feature, dim=1): - epsilon = 1e-6 - norm = torch.pow(torch.sum(torch.pow(feature, 2), dim) + epsilon, 0.5).unsqueeze(dim).expand_as(feature) - return torch.div(feature, norm) - - -class OpticalFlowEstimator(nn.Module): - - def __init__(self, in_channels, batch_norm): - super(OpticalFlowEstimator, self).__init__() - - dd = np.cumsum([128,128,96,64,32]) - self.conv_0 = conv(in_channels, 128, kernel_size=3, stride=1, batch_norm=batch_norm) - self.conv_1 = conv(in_channels + dd[0], 128, kernel_size=3, stride=1, batch_norm=batch_norm) - self.conv_2 = conv(in_channels + dd[1], 96, kernel_size=3, stride=1, batch_norm=batch_norm) - self.conv_3 = conv(in_channels + dd[2], 64, kernel_size=3, stride=1, batch_norm=batch_norm) - self.conv_4 = conv(in_channels + dd[3], 32, kernel_size=3, stride=1, batch_norm=batch_norm) - self.predict_flow = predict_flow(in_channels + dd[4]) - - def forward(self, x): - # dense net connection - x = torch.cat((self.conv_0(x), x),1) - x = torch.cat((self.conv_1(x), x),1) - x = torch.cat((self.conv_2(x), x),1) - x = torch.cat((self.conv_3(x), x),1) - x = torch.cat((self.conv_4(x), x),1) - flow = self.predict_flow(x) - return x, flow - - -class OpticalFlowEstimatorNoDenseConnection(nn.Module): - - def __init__(self, in_channels, batch_norm): - super(OpticalFlowEstimatorNoDenseConnection, self).__init__() - self.conv_0 = conv(in_channels, 128, kernel_size=3, stride=1, batch_norm=batch_norm) - self.conv_1 = conv(128, 128, kernel_size=3, stride=1, batch_norm=batch_norm) - self.conv_2 = conv(128, 96, kernel_size=3, stride=1, batch_norm=batch_norm) - self.conv_3 = conv(96, 64, kernel_size=3, stride=1, batch_norm=batch_norm) - self.conv_4 = conv(64, 32, kernel_size=3, stride=1, batch_norm=batch_norm) - self.predict_flow = predict_flow(32) - - def forward(self, x): - x = self.conv_4(self.conv_3(self.conv_2(self.conv_1(self.conv_0(x))))) - flow = self.predict_flow(x) - return x, flow - - -# extracted from DGCNet -def conv_blck(in_channels, out_channels, kernel_size=3, - stride=1, padding=1, dilation=1, bn=False): - if bn: - return nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size, - stride, padding, dilation), - nn.BatchNorm2d(out_channels), - nn.ReLU(inplace=True)) - else: - return nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size, - stride, padding, dilation), - nn.ReLU(inplace=True)) - - -def conv_head(in_channels): - return nn.Conv2d(in_channels, 2, kernel_size=3, padding=1) - - -class CorrespondenceMapBase(nn.Module): - def __init__(self, in_channels, bn=False): - super().__init__() - - def forward(self, x1, x2=None, x3=None): - x = x1 - # concatenating dimensions - if (x2 is not None) and (x3 is None): - x = torch.cat((x1, x2), 1) - elif (x2 is None) and (x3 is not None): - x = torch.cat((x1, x3), 1) - elif (x2 is not None) and (x3 is not None): - x = torch.cat((x1, x2, x3), 1) - - return x - - -class CMDTop(CorrespondenceMapBase): - def __init__(self, in_channels, bn=False): - super().__init__(in_channels, bn) - chan = [128, 128, 96, 64, 32] - self.conv0 = conv_blck(in_channels, chan[0], bn=bn) - self.conv1 = conv_blck(chan[0], chan[1], bn=bn) - self.conv2 = conv_blck(chan[1], chan[2], bn=bn) - self.conv3 = conv_blck(chan[2], chan[3], bn=bn) - self.conv4 = conv_blck(chan[3], chan[4], bn=bn) - self.final = conv_head(chan[-1]) - - def forward(self, x1, x2=None, x3=None): - x = super().forward(x1, x2, x3) - x = self.conv4(self.conv3(self.conv2(self.conv1(self.conv0(x))))) - return self.final(x) - - -def warp(x, flo): - """ - warp an image/tensor (im2) back to im1, according to the optical flow - x: [B, C, H, W] (im2) - flo: [B, 2, H, W] flow - """ - B, C, H, W = x.size() - # mesh grid - xx = torch.arange(0, W).view(1, -1).repeat(H, 1) - yy = torch.arange(0, H).view(-1, 1).repeat(1, W) - xx = xx.view(1, 1, H, W).repeat(B, 1, 1, 1) - yy = yy.view(1, 1, H, W).repeat(B, 1, 1, 1) - grid = torch.cat((xx, yy), 1).float() - - if x.is_cuda: - grid = grid.cuda() - vgrid = grid + flo - # makes a mapping out of the flow - - # scale grid to [-1,1] - vgrid[:, 0, :, :] = 2.0 * vgrid[:, 0, :, :].clone() / max(W - 1, 1) - 1.0 - vgrid[:, 1, :, :] = 2.0 * vgrid[:, 1, :, :].clone() / max(H - 1, 1) - 1.0 - - vgrid = vgrid.permute(0, 2, 3, 1) - - if float(torch.__version__[:3]) >= 1.3: - output = nn.functional.grid_sample(x, vgrid, align_corners=True) - else: - output = nn.functional.grid_sample(x, vgrid) - return output \ No newline at end of file diff --git a/sjlee_backup/loss.py b/sjlee_backup/loss.py deleted file mode 100644 index 6edbb37..0000000 --- a/sjlee_backup/loss.py +++ /dev/null @@ -1,19 +0,0 @@ -import torch - -def loss_superglue(scores, all_matches): - # check if indexed correctly - loss = [] - loss.append(torch.tensor(0.).cuda()) - for i in range(len(all_matches[0])): - x = all_matches[0][i][0] - y = all_matches[0][i][1] - if x>=len(scores[0]) or y>=len(scores[0][0]):continue - - loss.append(-torch.log( scores[0][x][y] )) # check batch size == 1 ? - # for p0 in unmatched0: - # loss += -torch.log(scores[0][p0][-1]) - # for p1 in unmatched1: - # loss += -torch.log(scores[0][-1][p1]) - loss_mean = torch.mean(torch.stack(loss)) - loss_mean = torch.reshape(loss_mean, (1, -1)) - return loss_mean[0] diff --git a/sjlee_backup/losssuperglue.py b/sjlee_backup/losssuperglue.py deleted file mode 100644 index cecca4e..0000000 --- a/sjlee_backup/losssuperglue.py +++ /dev/null @@ -1,19 +0,0 @@ -import torch - -def loss_superglue(scores, all_matches): - # check if indexed correctly - loss = [] - loss.append(torch.tensor(0.).cuda()) - for i in range(len(all_matches[0])): - x = all_matches[0][i][0] - y = all_matches[0][i][1] - - if x>=len(scores[0]) or y>=len(scores[0][0]):continue - loss.append(-torch.log( scores[0][x][y] )) # check batch size == 1 ? - # for p0 in unmatched0: - # loss += -torch.log(scores[0][p0][-1]) - # for p1 in unmatched1: - # loss += -torch.log(scores[0][-1][p1]) - loss_mean = torch.mean(torch.stack(loss)) - loss_mean = torch.reshape(loss_mean, (1, -1)) - return loss_mean[0] diff --git a/sjlee_backup/superglue.py b/sjlee_backup/superglue.py deleted file mode 100644 index 6837d47..0000000 --- a/sjlee_backup/superglue.py +++ /dev/null @@ -1,359 +0,0 @@ -# %BANNER_BEGIN% -# --------------------------------------------------------------------- -# %COPYRIGHT_BEGIN% -# -# Magic Leap, Inc. ("COMPANY") CONFIDENTIAL -# -# Unpublished Copyright (c) 2020 -# Magic Leap, Inc., All Rights Reserved. -# -# NOTICE: All information contained herein is, and remains the property -# of COMPANY. The intellectual and technical concepts contained herein -# are proprietary to COMPANY and may be covered by U.S. and Foreign -# Patents, patents in process, and are protected by trade secret or -# copyright law. Dissemination of this information or reproduction of -# this material is strictly forbidden unless prior written permission is -# obtained from COMPANY. Access to the source code contained herein is -# hereby forbidden to anyone except current COMPANY employees, managers -# or contractors who have executed Confidentiality and Non-disclosure -# agreements explicitly covering such access. -# -# The copyright notice above does not evidence any actual or intended -# publication or disclosure of this source code, which includes -# information that is confidential and/or proprietary, and is a trade -# secret, of COMPANY. ANY REPRODUCTION, MODIFICATION, DISTRIBUTION, -# PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH USE OF THIS -# SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS -# STRICTLY PROHIBITED, AND IN VIOLATION OF APPLICABLE LAWS AND -# INTERNATIONAL TREATIES. THE RECEIPT OR POSSESSION OF THIS SOURCE -# CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS -# TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS CONTENTS, OR TO MANUFACTURE, -# USE, OR SELL ANYTHING THAT IT MAY DESCRIBE, IN WHOLE OR IN PART. -# -# %COPYRIGHT_END% -# ---------------------------------------------------------------------- -# %AUTHORS_BEGIN% -# -# Originating Authors: Paul-Edouard Sarlin -# -# %AUTHORS_END% -# --------------------------------------------------------------------*/ -# %BANNER_END% - -from copy import deepcopy -from pathlib import Path -import torch -from torch import nn - - -def MLP(channels: list, do_bn=True): - """ Multi-layer perceptron """ - n = len(channels) - layers = [] - for i in range(1, n): - layers.append( - nn.Conv1d(channels[i - 1], channels[i], kernel_size=1, bias=True)) - if i < (n-1): - if do_bn: - # layers.append(nn.BatchNorm1d(channels[i])) - layers.append(nn.InstanceNorm1d(channels[i])) - layers.append(nn.ReLU()) - return nn.Sequential(*layers) - - -def normalize_keypoints(kpts, image_shape): - """ Normalize keypoints locations based on image image_shape""" - _, _, height, width = image_shape - one = kpts.new_tensor(1) - size = torch.stack([one*width, one*height])[None] - center = size / 2 - scaling = size.max(1, keepdim=True).values * 0.7 - return (kpts - center[:, None, :]) / scaling[:, None, :] - - -class KeypointEncoder(nn.Module): - """ Joint encoding of visual appearance and location using MLPs""" - def __init__(self, feature_dim, layers): - super().__init__() - self.encoder = MLP([3] + layers + [feature_dim]) - nn.init.constant_(self.encoder[-1].bias, 0.0) - - def forward(self, kpts, scores): - inputs = [kpts.transpose(1, 2), scores.unsqueeze(1)] - return self.encoder(torch.cat(inputs, dim=1)) - - -def attention(query, key, value): - dim = query.shape[1] - scores = torch.einsum('bdhn,bdhm->bhnm', query, key) / dim**.5 - prob = torch.nn.functional.softmax(scores, dim=-1) - return torch.einsum('bhnm,bdhm->bdhn', prob, value), prob - - -class MultiHeadedAttention(nn.Module): - """ Multi-head attention to increase model expressivitiy """ - def __init__(self, num_heads: int, d_model: int): - super().__init__() - assert d_model % num_heads == 0 - self.dim = d_model // num_heads - self.num_heads = num_heads - self.merge = nn.Conv1d(d_model, d_model, kernel_size=1) - self.proj = nn.ModuleList([deepcopy(self.merge) for _ in range(3)]) - - def forward(self, query, key, value): - batch_dim = query.size(0) - query, key, value = [l(x).view(batch_dim, self.dim, self.num_heads, -1) - for l, x in zip(self.proj, (query, key, value))] - x, prob = attention(query, key, value) - self.prob.append(prob) - return self.merge(x.contiguous().view(batch_dim, self.dim*self.num_heads, -1)) - - -class AttentionalPropagation(nn.Module): - def __init__(self, feature_dim: int, num_heads: int): - super().__init__() - self.attn = MultiHeadedAttention(num_heads, feature_dim) - self.mlp = MLP([feature_dim*2, feature_dim*2, feature_dim]) - nn.init.constant_(self.mlp[-1].bias, 0.0) - - def forward(self, x, source): - message = self.attn(x, source, source) - return self.mlp(torch.cat([x, message], dim=1)) - - -class AttentionalGNN(nn.Module): - def __init__(self, feature_dim: int, layer_names: list): - super().__init__() - self.layers = nn.ModuleList([ - AttentionalPropagation(feature_dim, 4) - for _ in range(len(layer_names))]) - self.names = layer_names - - def forward(self, desc0, desc1): - for layer, name in zip(self.layers, self.names): - layer.attn.prob = [] - if name == 'cross': - src0, src1 = desc1, desc0 - else: # if name == 'self': - src0, src1 = desc0, desc1 - delta0, delta1 = layer(desc0, src0), layer(desc1, src1) - desc0, desc1 = (desc0 + delta0), (desc1 + delta1) - return desc0, desc1 - - -def log_sinkhorn_iterations(Z, log_mu, log_nu, iters: int): - """ Perform Sinkhorn Normalization in Log-space for stability""" - u, v = torch.zeros_like(log_mu), torch.zeros_like(log_nu) - for _ in range(iters): - u = log_mu - torch.logsumexp(Z + v.unsqueeze(1), dim=2) - v = log_nu - torch.logsumexp(Z + u.unsqueeze(2), dim=1) - return Z + u.unsqueeze(2) + v.unsqueeze(1) - - -def log_optimal_transport(scores, alpha, iters: int): - """ Perform Differentiable Optimal Transport in Log-space for stability""" - b, m, n = scores.shape - one = scores.new_tensor(1) - ms, ns = (m*one).to(scores), (n*one).to(scores) - - bins0 = alpha.expand(b, m, 1) - bins1 = alpha.expand(b, 1, n) - alpha = alpha.expand(b, 1, 1) - - couplings = torch.cat([torch.cat([scores, bins0], -1), - torch.cat([bins1, alpha], -1)], 1) - - norm = - (ms + ns).log() - log_mu = torch.cat([norm.expand(m), ns.log()[None] + norm]) - log_nu = torch.cat([norm.expand(n), ms.log()[None] + norm]) - log_mu, log_nu = log_mu[None].expand(b, -1), log_nu[None].expand(b, -1) - - Z = log_sinkhorn_iterations(couplings, log_mu, log_nu, iters) - Z = Z - norm # multiply probabilities by M+N - return Z - - -def arange_like(x, dim: int): - return x.new_ones(x.shape[dim]).cumsum(0) - 1 # traceable in 1.1 - - -class SuperGlue(nn.Module): - """SuperGlue feature matching middle-end - Given two sets of keypoints and locations, we determine the - correspondences by: - 1. Keypoint Encoding (normalization + visual feature and location fusion) - 2. Graph Neural Network with multiple self and cross-attention layers - 3. Final projection layer - 4. Optimal Transport Layer (a differentiable Hungarian matching algorithm) - 5. Thresholding matrix based on mutual exclusivity and a match_threshold - The correspondence ids use -1 to indicate non-matching points. - Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew - Rabinovich. SuperGlue: Learning Feature Matching with Graph Neural - Networks. In CVPR, 2020. https://arxiv.org/abs/1911.11763 - """ - default_config = { - 'descriptor_dim': 256, - 'weights': 'indoor', - 'keypoint_encoder': [32, 64, 128, 256], - 'GNN_layers': ['self', 'cross'] * 9, - 'sinkhorn_iterations': 100, - 'match_threshold': 0.2, - } - - def __init__(self, config): - super().__init__() - self.config = {**self.default_config, **config} - - self.kenc = KeypointEncoder( - self.config['descriptor_dim'], self.config['keypoint_encoder']) - - self.gnn = AttentionalGNN( - self.config['descriptor_dim'], self.config['GNN_layers']) - - self.final_proj = nn.Conv1d( - self.config['descriptor_dim'], self.config['descriptor_dim'], - kernel_size=1, bias=True) - - bin_score = torch.nn.Parameter(torch.tensor(1.)) - self.register_parameter('bin_score', bin_score) - - # assert self.config['weights'] in ['indoor', 'outdoor'] - # path = Path(__file__).parent - # path = path / 'weights/superglue_{}.pth'.format(self.config['weights']) - # self.load_state_dict(torch.load(path)) - # print('Loaded SuperGlue model (\"{}\" weights)'.format( - # self.config['weights'])) - - def forward(self, data): - """Run SuperGlue on a pair of keypoints and descriptors""" - desc0, desc1 = data['descriptors0'], data['descriptors1'] - kpts0, kpts1 = data['keypoints0'], data['keypoints1'] - - """ - desc0 = desc0.transpose(0,1) - desc1 = desc1.transpose(0,1) - kpts0 = torch.reshape(kpts0, (1, -1, 2)) - kpts1 = torch.reshape(kpts1, (1, -1, 2)) - """ - - if kpts0.shape[1] == 0 or kpts1.shape[1] == 0: # no keypoints - shape0, shape1 = kpts0.shape[:-1], kpts1.shape[:-1] - return { - 'matches0': kpts0.new_full(shape0, -1, dtype=torch.int)[0], - 'matches1': kpts1.new_full(shape1, -1, dtype=torch.int)[0], - 'matching_scores0': kpts0.new_zeros(shape0)[0], - 'matching_scores1': kpts1.new_zeros(shape1)[0], - 'skip_train': True - } - - """ - file_name = data['file_name'] - all_matches = data['all_matches'].permute(1,2,0) # shape=torch.Size([1, 87, 2]) - """ - - # Keypoint normalization. - kpts0 = normalize_keypoints(kpts0, data['image0'].shape) - kpts1 = normalize_keypoints(kpts1, data['image1'].shape) - - # Keypoint MLP encoder. - """ - desc0 = desc0 + self.kenc(kpts0, torch.transpose(data['scores0'], 0, 1)) - desc1 = desc1 + self.kenc(kpts1, torch.transpose(data['scores1'], 0, 1)) - """ - desc0 = desc0 + self.kenc(kpts0, data['scores0']) - desc1 = desc1 + self.kenc(kpts1, data['scores1']) - - # Multi-layer Transformer network. - desc0, desc1 = self.gnn(desc0, desc1) - - # Final MLP projection. - mdesc0, mdesc1 = self.final_proj(desc0), self.final_proj(desc1) - - # Compute matching descriptor distance. - scores = torch.einsum('bdn,bdm->bnm', mdesc0, mdesc1) - scores = scores / self.config['descriptor_dim']**.5 - - # Run the optimal transport. - scores = log_optimal_transport( - scores, self.bin_score, - iters=self.config['sinkhorn_iterations']) - - # Get the matches with score above "match_threshold". - max0, max1 = scores[:, :-1, :-1].max(2), scores[:, :-1, :-1].max(1) - indices0, indices1 = max0.indices, max1.indices - mutual0 = arange_like(indices0, 1)[None] == indices1.gather(1, indices0) - mutual1 = arange_like(indices1, 1)[None] == indices0.gather(1, indices1) - zero = scores.new_tensor(0) - mscores0 = torch.where(mutual0, max0.values.exp(), zero) - mscores1 = torch.where(mutual1, mscores0.gather(1, indices1), zero) - valid0 = mutual0 & (mscores0 > self.config['match_threshold']) - valid1 = mutual1 & valid0.gather(1, indices1) - indices0 = torch.where(valid0, indices0, indices0.new_tensor(-1)) - indices1 = torch.where(valid1, indices1, indices1.new_tensor(-1)) - - """ - # check if indexed correctly - loss = [] - for i in range(len(all_matches[0])): - x = all_matches[0][i][0] - y = all_matches[0][i][1] - loss.append(-torch.log( scores[0][x][y].exp() )) # check batch size == 1 ? - # for p0 in unmatched0: - # loss += -torch.log(scores[0][p0][-1]) - # for p1 in unmatched1: - # loss += -torch.log(scores[0][-1][p1]) - loss_mean = torch.mean(torch.stack(loss)) - loss_mean = torch.reshape(loss_mean, (1, -1)) - """ - - return { - 'matches0': indices0[0], # use -1 for invalid match - 'matches1': indices1[0], # use -1 for invalid match - 'matching_scores0': mscores0[0], - 'matching_scores1': mscores1[0], - # 'loss': loss_mean[0], - 'skip_train': False - } - - # scores big value or small value means confidence? log can't take neg value - -if __name__ == '__main__': - from superpoint import SuperPoint - - config = { - 'superpoint': { - 'nms_radius': 4, - 'keypoint_threshold': 0.005, - 'max_keypoints': -1 - }, - 'superglue': { - 'weights': 'indoor', - 'sinkhorn_iterations': 20, - 'match_threshold':0.2, - } - } - - data = { - 'image0': torch.randn(1, 1, 512, 512), - 'image1': torch.randn(1, 1, 512, 512) - } - - superpoint = SuperPoint(config.get('superpoint', {})) - - output1 = superpoint({'image': data['image0']}) - output2 = superpoint({'image': data['image1']}) - - pred = {} - - pred = {**pred, **{k+'0': v for k, v in output1.items()}} - pred = {**pred, **{k+'1': v for k, v in output2.items()}} - - data = {**data, **pred} - - for k in data: - if isinstance(data[k], (list, tuple)): - data[k] = torch.stack(data[k]) - - print(data['descriptors0'].shape) - superglue = SuperGlue(config.get('superglue', {})) - superglue(data) \ No newline at end of file diff --git a/sjlee_backup/superglue2.py b/sjlee_backup/superglue2.py deleted file mode 100644 index 5bd4028..0000000 --- a/sjlee_backup/superglue2.py +++ /dev/null @@ -1,326 +0,0 @@ - -# %BANNER_BEGIN% -# --------------------------------------------------------------------- -# %COPYRIGHT_BEGIN% -# -# Magic Leap, Inc. ("COMPANY") CONFIDENTIAL -# -# Unpublished Copyright (c) 2020 -# Magic Leap, Inc., All Rights Reserved. -# -# NOTICE: All information contained herein is, and remains the property -# of COMPANY. The intellectual and technical concepts contained herein -# are proprietary to COMPANY and may be covered by U.S. and Foreign -# Patents, patents in process, and are protected by trade secret or -# copyright law. Dissemination of this information or reproduction of -# this material is strictly forbidden unless prior written permission is -# obtained from COMPANY. Access to the source code contained herein is -# hereby forbidden to anyone except current COMPANY employees, managers -# or contractors who have executed Confidentiality and Non-disclosure -# agreements explicitly covering such access. -# -# The copyright notice above does not evidence any actual or intended -# publication or disclosure of this source code, which includes -# information that is confidential and/or proprietary, and is a trade -# secret, of COMPANY. ANY REPRODUCTION, MODIFICATION, DISTRIBUTION, -# PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH USE OF THIS -# SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS -# STRICTLY PROHIBITED, AND IN VIOLATION OF APPLICABLE LAWS AND -# INTERNATIONAL TREATIES. THE RECEIPT OR POSSESSION OF THIS SOURCE -# CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS -# TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS CONTENTS, OR TO MANUFACTURE, -# USE, OR SELL ANYTHING THAT IT MAY DESCRIBE, IN WHOLE OR IN PART. -# -# %COPYRIGHT_END% -# ---------------------------------------------------------------------- -# %AUTHORS_BEGIN% -# -# Originating Authors: Paul-Edouard Sarlin -# -# %AUTHORS_END% -# --------------------------------------------------------------------*/ -# %BANNER_END% - -from copy import deepcopy -from pathlib import Path -from typing import List, Tuple - -import torch -from torch import nn - - -def MLP(channels: List[int], do_bn: bool = True) -> nn.Module: - """ Multi-layer perceptron """ - n = len(channels) - layers = [] - for i in range(1, n): - layers.append( - nn.Conv1d(channels[i - 1], channels[i], kernel_size=1, bias=True)) - if i < (n-1): - if do_bn: - layers.append(nn.BatchNorm1d(channels[i])) - layers.append(nn.ReLU()) - return nn.Sequential(*layers) - - -def normalize_keypoints(kpts, image_shape): - """ Normalize keypoints locations based on image image_shape""" - _, _, height, width = image_shape - one = kpts.new_tensor(1) - size = torch.stack([one*width, one*height])[None] - center = size / 2 - scaling = size.max(1, keepdim=True).values * 0.7 - return (kpts - center[:, None, :]) / scaling[:, None, :] - - -class KeypointEncoder(nn.Module): - """ Joint encoding of visual appearance and location using MLPs""" - def __init__(self, feature_dim: int, layers: List[int]) -> None: - super().__init__() - self.encoder = MLP([3] + layers + [feature_dim]) - nn.init.constant_(self.encoder[-1].bias, 0.0) - - def forward(self, kpts, scores): - inputs = [kpts.transpose(1, 2), scores.unsqueeze(1)] - return self.encoder(torch.cat(inputs, dim=1)) - - -def attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor) -> Tuple[torch.Tensor,torch.Tensor]: - dim = query.shape[1] - scores = torch.einsum('bdhn,bdhm->bhnm', query, key) / dim**.5 - prob = torch.nn.functional.softmax(scores, dim=-1) - return torch.einsum('bhnm,bdhm->bdhn', prob, value), prob - - -class MultiHeadedAttention(nn.Module): - """ Multi-head attention to increase model expressivitiy """ - def __init__(self, num_heads: int, d_model: int): - super().__init__() - assert d_model % num_heads == 0 - self.dim = d_model // num_heads - self.num_heads = num_heads - self.merge = nn.Conv1d(d_model, d_model, kernel_size=1) - self.proj = nn.ModuleList([deepcopy(self.merge) for _ in range(3)]) - - def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor) -> torch.Tensor: - batch_dim = query.size(0) - query, key, value = [l(x).view(batch_dim, self.dim, self.num_heads, -1) - for l, x in zip(self.proj, (query, key, value))] - x, _ = attention(query, key, value) - return self.merge(x.contiguous().view(batch_dim, self.dim*self.num_heads, -1)) - - -class AttentionalPropagation(nn.Module): - def __init__(self, feature_dim: int, num_heads: int): - super().__init__() - self.attn = MultiHeadedAttention(num_heads, feature_dim) - self.mlp = MLP([feature_dim*2, feature_dim*2, feature_dim]) - nn.init.constant_(self.mlp[-1].bias, 0.0) - - def forward(self, x: torch.Tensor, source: torch.Tensor) -> torch.Tensor: - message = self.attn(x, source, source) - return self.mlp(torch.cat([x, message], dim=1)) - - -class AttentionalGNN(nn.Module): - def __init__(self, feature_dim: int, layer_names: List[str]) -> None: - super().__init__() - self.layers = nn.ModuleList([ - AttentionalPropagation(feature_dim, 4) - for _ in range(len(layer_names))]) - self.names = layer_names - - def forward(self, desc0: torch.Tensor, desc1: torch.Tensor) -> Tuple[torch.Tensor,torch.Tensor]: - for layer, name in zip(self.layers, self.names): - if name == 'cross': - src0, src1 = desc1, desc0 - else: # if name == 'self': - src0, src1 = desc0, desc1 - delta0, delta1 = layer(desc0, src0), layer(desc1, src1) - desc0, desc1 = (desc0 + delta0), (desc1 + delta1) - return desc0, desc1 - - -def log_sinkhorn_iterations(Z: torch.Tensor, log_mu: torch.Tensor, log_nu: torch.Tensor, iters: int) -> torch.Tensor: - """ Perform Sinkhorn Normalization in Log-space for stability""" - u, v = torch.zeros_like(log_mu), torch.zeros_like(log_nu) - for _ in range(iters): - u = log_mu - torch.logsumexp(Z + v.unsqueeze(1), dim=2) - v = log_nu - torch.logsumexp(Z + u.unsqueeze(2), dim=1) - - return Z + u.unsqueeze(2) + v.unsqueeze(1) - - -def log_optimal_transport(scores: torch.Tensor, alpha: torch.Tensor, iters: int) -> torch.Tensor: - """ Perform Differentiable Optimal Transport in Log-space for stability""" - b, m, n = scores.shape - one = scores.new_tensor(1) - ms, ns = (m*one).to(scores), (n*one).to(scores) - - bins0 = alpha.expand(b, m, 1) - bins1 = alpha.expand(b, 1, n) - alpha = alpha.expand(b, 1, 1) - - couplings = torch.cat([torch.cat([scores, bins0], -1), - torch.cat([bins1, alpha], -1)], 1) - - norm = - (ms + ns).log() - log_mu = torch.cat([norm.expand(m), ns.log()[None] + norm]) - log_nu = torch.cat([norm.expand(n), ms.log()[None] + norm]) - log_mu, log_nu = log_mu[None].expand(b, -1), log_nu[None].expand(b, -1) - - Z = log_sinkhorn_iterations(couplings, log_mu, log_nu, iters) - Z = Z - norm # multiply probabilities by M+N - return Z - - -def arange_like(x, dim: int): - return x.new_ones(x.shape[dim]).cumsum(0) - 1 # traceable in 1.1 - - -class SuperGlue(nn.Module): - """SuperGlue feature matching middle-end - Given two sets of keypoints and locations, we determine the - correspondences by: - 1. Keypoint Encoding (normalization + visual feature and location fusion) - 2. Graph Neural Network with multiple self and cross-attention layers - 3. Final projection layer - 4. Optimal Transport Layer (a differentiable Hungarian matching algorithm) - 5. Thresholding matrix based on mutual exclusivity and a match_threshold - The correspondence ids use -1 to indicate non-matching points. - Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew - Rabinovich. SuperGlue: Learning Feature Matching with Graph Neural - Networks. In CVPR, 2020. https://arxiv.org/abs/1911.11763 - """ - default_config = { - 'descriptor_dim': 256, - 'weights': 'indoor', - 'keypoint_encoder': [32, 64, 128, 256], - 'GNN_layers': ['self', 'cross'] * 9, - 'sinkhorn_iterations': 100, - 'match_threshold': 0.2, - } - - def __init__(self, config): - super().__init__() - self.config = {**self.default_config, **config} - - self.kenc = KeypointEncoder( - self.config['descriptor_dim'], self.config['keypoint_encoder']) - - self.gnn = AttentionalGNN( - feature_dim=self.config['descriptor_dim'], layer_names=self.config['GNN_layers']) - - self.final_proj = nn.Conv1d( - self.config['descriptor_dim'], self.config['descriptor_dim'], - kernel_size=1, bias=True) - - bin_score = torch.nn.Parameter(torch.tensor(1.)) - self.register_parameter('bin_score', bin_score) - - assert self.config['weights'] in ['indoor', 'outdoor'] - path = Path(__file__).parent - path = path / 'weights/superglue_{}.pth'.format(self.config['weights']) - self.load_state_dict(torch.load(str(path))) - print('Loaded SuperGlue model (\"{}\" weights)'.format( - self.config['weights'])) - - def forward(self, data): - """Run SuperGlue on a pair of keypoints and descriptors""" - desc0, desc1 = data['descriptors0'], data['descriptors1'] - kpts0, kpts1 = data['keypoints0'], data['keypoints1'] - - if kpts0.shape[1] == 0 or kpts1.shape[1] == 0: # no keypoints - shape0, shape1 = kpts0.shape[:-1], kpts1.shape[:-1] - return { - 'matches0': kpts0.new_full(shape0, -1, dtype=torch.int), - 'matches1': kpts1.new_full(shape1, -1, dtype=torch.int), - 'matching_scores0': kpts0.new_zeros(shape0), - 'matching_scores1': kpts1.new_zeros(shape1), - } - - # Keypoint normalization. - kpts0 = normalize_keypoints(kpts0, data['image0'].shape) - kpts1 = normalize_keypoints(kpts1, data['image1'].shape) - - # Keypoint MLP encoder. - desc0 = desc0 + self.kenc(kpts0, data['scores0']) - desc1 = desc1 + self.kenc(kpts1, data['scores1']) - - # Multi-layer Transformer network. - desc0, desc1 = self.gnn(desc0, desc1) - - # Final MLP projection. - mdesc0, mdesc1 = self.final_proj(desc0), self.final_proj(desc1) - - # Compute matching descriptor distance. - scores = torch.einsum('bdn,bdm->bnm', mdesc0, mdesc1) - scores = scores / self.config['descriptor_dim']**.5 - - print(scores.shape) - - # Run the optimal transport. - scores = log_optimal_transport( - scores, self.bin_score, - iters=self.config['sinkhorn_iterations']) - - # Get the matches with score above "match_threshold". - max0, max1 = scores[:, :-1, :-1].max(2), scores[:, :-1, :-1].max(1) - indices0, indices1 = max0.indices, max1.indices - mutual0 = arange_like(indices0, 1)[None] == indices1.gather(1, indices0) - mutual1 = arange_like(indices1, 1)[None] == indices0.gather(1, indices1) - zero = scores.new_tensor(0) - mscores0 = torch.where(mutual0, max0.values.exp(), zero) - mscores1 = torch.where(mutual1, mscores0.gather(1, indices1), zero) - valid0 = mutual0 & (mscores0 > self.config['match_threshold']) - valid1 = mutual1 & valid0.gather(1, indices1) - indices0 = torch.where(valid0, indices0, indices0.new_tensor(-1)) - indices1 = torch.where(valid1, indices1, indices1.new_tensor(-1)) - - print(scores.shape) - return { - 'matches0': indices0, # use -1 for invalid match - 'matches1': indices1, # use -1 for invalid match - 'matching_scores0': mscores0, - 'matching_scores1': mscores1, - } - -if __name__ == '__main__': - from superpoint import SuperPoint - - config = { - 'superpoint': { - 'nms_radius': 4, - 'keypoint_threshold': 0.005, - 'max_keypoints': -1 - }, - 'superglue': { - 'weights': 'indoor', - 'sinkhorn_iterations': 20, - 'match_threshold':0.2, - } - } - - data = { - 'image0': torch.randn(1, 1, 512, 512), - 'image1': torch.randn(1, 1, 512, 512) - } - - superpoint = SuperPoint(config.get('superpoint', {})) - - output1 = superpoint({'image': data['image0']}) - output2 = superpoint({'image': data['image1']}) - - pred = {} - - pred = {**pred, **{k+'0': v for k, v in output1.items()}} - pred = {**pred, **{k+'1': v for k, v in output2.items()}} - - data = {**data, **pred} - - for k in data: - if isinstance(data[k], (list, tuple)): - data[k] = torch.stack(data[k]) - - superglue = SuperGlue(config.get('superglue', {})) - output = superglue(data) diff --git a/sjlee_backup/superpoint.py b/sjlee_backup/superpoint.py deleted file mode 100644 index 14a07fd..0000000 --- a/sjlee_backup/superpoint.py +++ /dev/null @@ -1,222 +0,0 @@ -# %BANNER_BEGIN% -# --------------------------------------------------------------------- -# %COPYRIGHT_BEGIN% -# -# Magic Leap, Inc. ("COMPANY") CONFIDENTIAL -# -# Unpublished Copyright (c) 2020 -# Magic Leap, Inc., All Rights Reserved. -# -# NOTICE: All information contained herein is, and remains the property -# of COMPANY. The intellectual and technical concepts contained herein -# are proprietary to COMPANY and may be covered by U.S. and Foreign -# Patents, patents in process, and are protected by trade secret or -# copyright law. Dissemination of this information or reproduction of -# this material is strictly forbidden unless prior written permission is -# obtained from COMPANY. Access to the source code contained herein is -# hereby forbidden to anyone except current COMPANY employees, managers -# or contractors who have executed Confidentiality and Non-disclosure -# agreements explicitly covering such access. -# -# The copyright notice above does not evidence any actual or intended -# publication or disclosure of this source code, which includes -# information that is confidential and/or proprietary, and is a trade -# secret, of COMPANY. ANY REPRODUCTION, MODIFICATION, DISTRIBUTION, -# PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH USE OF THIS -# SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS -# STRICTLY PROHIBITED, AND IN VIOLATION OF APPLICABLE LAWS AND -# INTERNATIONAL TREATIES. THE RECEIPT OR POSSESSION OF THIS SOURCE -# CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS -# TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS CONTENTS, OR TO MANUFACTURE, -# USE, OR SELL ANYTHING THAT IT MAY DESCRIBE, IN WHOLE OR IN PART. -# -# %COPYRIGHT_END% -# ---------------------------------------------------------------------- -# %AUTHORS_BEGIN% -# -# Originating Authors: Paul-Edouard Sarlin -# -# %AUTHORS_END% -# --------------------------------------------------------------------*/ -# %BANNER_END% - -from pathlib import Path -import torch -from torch import nn - -def simple_nms(scores, nms_radius: int): - """ Fast Non-maximum suppression to remove nearby points """ - assert(nms_radius >= 0) - - def max_pool(x): - return torch.nn.functional.max_pool2d( - x, kernel_size=nms_radius*2+1, stride=1, padding=nms_radius) - - zeros = torch.zeros_like(scores) - max_mask = scores == max_pool(scores) - for _ in range(2): - supp_mask = max_pool(max_mask.float()) > 0 - supp_scores = torch.where(supp_mask, zeros, scores) - new_max_mask = supp_scores == max_pool(supp_scores) - max_mask = max_mask | (new_max_mask & (~supp_mask)) - return torch.where(max_mask, scores, zeros) - - -def remove_borders(keypoints, scores, border: int, height: int, width: int): - """ Removes keypoints too close to the border """ - mask_h = (keypoints[:, 0] >= border) & (keypoints[:, 0] < (height - border)) - mask_w = (keypoints[:, 1] >= border) & (keypoints[:, 1] < (width - border)) - mask = mask_h & mask_w - return keypoints[mask], scores[mask] - - -def top_k_keypoints(keypoints, scores, k: int): - if k >= len(keypoints): - return keypoints, scores - scores, indices = torch.topk(scores, k, dim=0) - return keypoints[indices], scores - - -def sample_descriptors(keypoints, descriptors, s: int = 8): - """ Interpolate descriptors at keypoint locations """ - b, c, h, w = descriptors.shape - keypoints = keypoints - s / 2 + 0.5 - keypoints /= torch.tensor([(w*s - s/2 - 0.5), (h*s - s/2 - 0.5)], - ).to(keypoints)[None] - keypoints = keypoints*2 - 1 # normalize to (-1, 1) - args = {'align_corners': True} if int(torch.__version__[2]) > 2 else {} - descriptors = torch.nn.functional.grid_sample( - descriptors, keypoints.view(b, 1, -1, 2), mode='bilinear', **args) - descriptors = torch.nn.functional.normalize( - descriptors.reshape(b, c, -1), p=2, dim=1) - return descriptors - - -class SuperPoint(nn.Module): - """SuperPoint Convolutional Detector and Descriptor - SuperPoint: Self-Supervised Interest Point Detection and - Description. Daniel DeTone, Tomasz Malisiewicz, and Andrew - Rabinovich. In CVPRW, 2019. https://arxiv.org/abs/1712.07629 - """ - default_config = { - 'descriptor_dim': 256, - 'nms_radius': 4, - 'keypoint_threshold': 0.005, - 'max_keypoints': -1, - 'remove_borders': 4, - } - - def __init__(self, config): - super().__init__() - self.config = {**self.default_config, **config} - - self.relu = nn.ReLU(inplace=True) - self.pool = nn.MaxPool2d(kernel_size=2, stride=2) - c1, c2, c3, c4, c5 = 64, 64, 128, 128, 256 - - self.conv1a = nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1) - self.conv1b = nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1) - self.conv2a = nn.Conv2d(c1, c2, kernel_size=3, stride=1, padding=1) - self.conv2b = nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1) - self.conv3a = nn.Conv2d(c2, c3, kernel_size=3, stride=1, padding=1) - self.conv3b = nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1) - self.conv4a = nn.Conv2d(c3, c4, kernel_size=3, stride=1, padding=1) - self.conv4b = nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1) - - self.convPa = nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1) - self.convPb = nn.Conv2d(c5, 65, kernel_size=1, stride=1, padding=0) - - self.convDa = nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1) - self.convDb = nn.Conv2d( - c5, self.config['descriptor_dim'], - kernel_size=1, stride=1, padding=0) - - path = Path(__file__).parent / 'weights/superpoint_v1.pth' - self.load_state_dict(torch.load(str(path))) - - mk = self.config['max_keypoints'] - if mk == 0 or mk < -1: - raise ValueError('\"max_keypoints\" must be positive or \"-1\"') - - print('Loaded SuperPoint model') - - def forward(self, data): - """ Compute keypoints, scores, descriptors for image """ - # Shared Encoder - x = self.relu(self.conv1a(data['image'])) - x = self.relu(self.conv1b(x)) - x = self.pool(x) - x = self.relu(self.conv2a(x)) - x = self.relu(self.conv2b(x)) - x = self.pool(x) - x = self.relu(self.conv3a(x)) - x = self.relu(self.conv3b(x)) - x = self.pool(x) - x = self.relu(self.conv4a(x)) - x = self.relu(self.conv4b(x)) - - # Compute the dense keypoint scores - cPa = self.relu(self.convPa(x)) - scores = self.convPb(cPa) - scores = torch.nn.functional.softmax(scores, 1)[:, :-1] - b, _, h, w = scores.shape - scores = scores.permute(0, 2, 3, 1).reshape(b, h, w, 8, 8) - scores = scores.permute(0, 1, 3, 2, 4).reshape(b, h*8, w*8) - scores = simple_nms(scores, self.config['nms_radius']) - - # Extract keypoints - keypoints = [ - torch.nonzero(s > self.config['keypoint_threshold']) - for s in scores] - scores = [s[tuple(k.t())] for s, k in zip(scores, keypoints)] - - # Discard keypoints near the image borders - keypoints, scores = list(zip(*[ - remove_borders(k, s, self.config['remove_borders'], h*8, w*8) - for k, s in zip(keypoints, scores)])) - - # Keep the k keypoints with highest score - if self.config['max_keypoints'] >= 0: - keypoints, scores = list(zip(*[ - top_k_keypoints(k, s, self.config['max_keypoints']) - for k, s in zip(keypoints, scores)])) - - # Convert (h, w) to (x, y) - keypoints = [torch.flip(k, [1]).float() for k in keypoints] - - # Compute the dense descriptors - cDa = self.relu(self.convDa(x)) - descriptors = self.convDb(cDa) - descriptors = torch.nn.functional.normalize(descriptors, p=2, dim=1) - - # Extract descriptors - descriptors = [sample_descriptors(k[None], d[None], 8)[0] - for k, d in zip(keypoints, descriptors)] - - return { - 'keypoints': keypoints, - 'scores': scores, - 'descriptors': descriptors, - } - -if __name__ == '__main__': - config = { - 'superpoint': { - 'nms_radius': 4, - 'keypoint_threshold': 0.005, - 'max_keypoints': -1 - }, - 'superglue': { - 'weights': 'indoor', - 'sinkhorn_iterations': 20, - 'match_threshold':0.2, - } - } - - test_img = torch.randn(1, 1, 512, 512) - data = {'image': test_img} - - superpoint = SuperPoint(config.get('superpoint', {})) - output = superpoint(data) - - print(output['keypoints'][0].shape, output['descriptors'][0].shape) \ No newline at end of file diff --git a/sjlee_backup/train_pseudo.py b/sjlee_backup/train_pseudo.py deleted file mode 100644 index 3c09bf4..0000000 --- a/sjlee_backup/train_pseudo.py +++ /dev/null @@ -1,41 +0,0 @@ - -""" -1. config 아래와 같이 설정 -2. weights은 상황에 맞게 indoor, outdoor 설정해주어야 함 -config = { - 'superpoint': { - 'nms_radius': 4, - 'keypoint_threshold': 0.005, - 'max_keypoints': 1024 - }, - 'superglue': { - 'weights': 'outdoor', - 'sinkhorn_iterations': 20, - 'match_threshold':0.2 - } - } -""" - -""" -# start training -for epoch in range(1, opt.epoch+1): - epoch_loss = 0 - superglue.double().train() - for i, pred in enumerate(train_loader): - for k in pred: - if k != 'file_name' and k!='image0' and k!='image1': - if type(pred[k]) == torch.Tensor: - pred[k] = Variable(pred[k].cuda()) - else: - pred[k] = Variable(torch.stack(pred[k]).cuda()) - - # =========== new code =============== # - scores, data = superglue(pred) - loss = loss_superglue(scores, data['all_matches'].permute(1, 2, 0)) - - for k, v in pred.items(): - pred[k] = v[0] - pred = {**pred, **data, **{'loss', loss}} - - # ... keep going -""" \ No newline at end of file diff --git a/test.py b/test.py deleted file mode 100644 index b8af1e0..0000000 --- a/test.py +++ /dev/null @@ -1,322 +0,0 @@ -from pathlib import Path -import argparse -import random -import numpy as np -import matplotlib.cm as cm -import torch -import torch.nn as nn -from torch.autograd import Variable -import os -import torch.multiprocessing -from tqdm import tqdm - -import cv2 -from scipy.spatial.distance import cdist - -from models.utils import (compute_pose_error, compute_epipolar_error, - estimate_pose, make_matching_plot, - error_colormap, AverageTimer, pose_auc, read_image, - rotate_intrinsics, rotate_pose_inplane, - scale_intrinsics, read_image_modified, frame2tensor) - -from models.matchingsuperglue import Matching -from sjlee_backup.losssuperglue import loss_superglue - -torch.set_grad_enabled(True) -torch.multiprocessing.set_sharing_strategy('file_system') - -parser = argparse.ArgumentParser( - description='Image pair matching and pose evaluation with SuperGlue', - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - -parser.add_argument( - '--viz', action='store_true', - help='Visualize the matches and dump the plots') -parser.add_argument( - '--eval', action='store_true', - help='Perform the evaluation' - ' (requires ground truth pose and intrinsics)') - -parser.add_argument( - '--superglue', choices={'indoor', 'outdoor'}, default='indoor', - help='SuperGlue weights') -parser.add_argument( - '--max_keypoints', type=int, default=1024, - help='Maximum number of keypoints detected by Superpoint' - ' (\'-1\' keeps all keypoints)') -parser.add_argument( - '--keypoint_threshold', type=float, default=0.005, - help='SuperPoint keypoint detector confidence threshold') -parser.add_argument( - '--nms_radius', type=int, default=4, - help='SuperPoint Non Maximum Suppression (NMS) radius' - ' (Must be positive)') -parser.add_argument( - '--sinkhorn_iterations', type=int, default=20, - help='Number of Sinkhorn iterations performed by SuperGlue') -parser.add_argument( - '--match_threshold', type=float, default=0.2, - help='SuperGlue match threshold') - -parser.add_argument( - '--resize', type=int, nargs='+', default=[640, 480], - help='Resize the input image before running inference. If two numbers, ' - 'resize to the exact dimensions, if one number, resize the max ' - 'dimension, if -1, do not resize') -parser.add_argument( - '--resize_float', action='store_true', - help='Resize the image after casting uint8 to float') - -parser.add_argument( - '--cache', action='store_true', - help='Skip the pair if output .npz files are already found') -parser.add_argument( - '--show_keypoints', action='store_true', - help='Plot the keypoints in addition to the matches') -parser.add_argument( - '--fast_viz', action='store_true', - help='Use faster image visualization based on OpenCV instead of Matplotlib') -parser.add_argument( - '--viz_extension', type=str, default='png', choices=['png', 'pdf'], - help='Visualization file extension. Use pdf for highest-quality.') - -parser.add_argument( - '--opencv_display', action='store_true', - help='Visualize via OpenCV before saving output images') -parser.add_argument( - '--eval_pairs_list', type=str, default='assets/scannet_sample_pairs_with_gt.txt', - help='Path to the list of image pairs for evaluation') -parser.add_argument( - '--shuffle', action='store_true', - help='Shuffle ordering of pairs before processing') -parser.add_argument( - '--max_length', type=int, default=-1, - help='Maximum number of pairs to evaluate') - -parser.add_argument( - '--eval_input_dir', type=str, default='assets/scannet_sample_images/', - help='Path to the directory that contains the images') -parser.add_argument( - '--eval_output_dir', type=str, default='dump_match_pairs/', - help='Path to the directory in which the .npz results and optional,' - 'visualizations are written') -parser.add_argument( - '--learning_rate', type=int, default=0.0001, - help='Learning rate') - -parser.add_argument( - '--batch_size', type=int, default=1, - help='batch_size') -parser.add_argument( - '--train_path', type=str, default='/home/cvlab09/projects/seungjun_an/dataset/train2014/', - help='Path to the directory of training imgs.') -parser.add_argument( - '--epoch', type=int, default=2, - help='Number of epoches') - - - -if __name__ == '__main__': - opt = parser.parse_args() - print(opt) - - # make sure the flags are properly used - assert not (opt.opencv_display and not opt.viz), 'Must use --viz with --opencv_display' - assert not (opt.opencv_display and not opt.fast_viz), 'Cannot use --opencv_display without --fast_viz' - assert not (opt.fast_viz and not opt.viz), 'Must use --viz with --fast_viz' - assert not (opt.fast_viz and opt.viz_extension == 'pdf'), 'Cannot use pdf extension with --fast_viz' - - numOftrainSet = 100 - - # store viz results - eval_output_dir = Path(opt.eval_output_dir) - eval_output_dir.mkdir(exist_ok=True, parents=True) - print('Will write visualization images to', - 'directory \"{}\"'.format(eval_output_dir)) - config = { - 'superpoint': { - 'nms_radius': opt.nms_radius, - 'keypoint_threshold': opt.keypoint_threshold, - 'max_keypoints': opt.max_keypoints - }, - 'superglue': { - 'weights': opt.superglue, - 'sinkhorn_iterations': opt.sinkhorn_iterations, - 'match_threshold': opt.match_threshold, - } - } - ''' - # load training data - train_set = SparseDataset(opt.train_path, opt.max_keypoints, config) - train_loader = torch.utils.data.DataLoader(dataset=train_set, shuffle=False, batch_size=opt.batch_size, drop_last=True) - ''' - - matching = Matching(config).eval().to('cuda') - sum_loss = 0. - - #optimizer = torch.optim.Adam(superglue.parameters(), lr=opt.learning_rate) - - device = 'cuda' - - for epoch in range(1, opt.epoch+1): - epoch_loss = 0 - - ##superglue.double().train() ########################################## - for i in range(numOftrainSet): - file_name ='/home/cvlab09/projects/seungjun_an/dataset/train2014/'+str(i+1)+'.jpg' - image0, inp0, scales0 = read_image( - file_name, opt.resize, 0, opt.resize_float) - - - - width, height = image0.shape[:2] - - corners = np.array([[0, 0], [0, height], [width, 0], [width, height]], dtype=np.float32) - - warp = np.random.randint(-224, 224, size=(4, 2)).astype(np.float32) - - - - - # get the corresponding warped image - M = cv2.getPerspectiveTransform(corners, corners + warp) - warped = cv2.warpPerspective(src=image0, M=M, dsize=(image0.shape[1], image0.shape[0])) - - inp1 = frame2tensor(warped) - - print(i) - - scores, data, pred = matching({'image0': inp0, 'image1': inp1}) - - - ################################################################################################# - - if data['skip_train'] : continue - - - key1, key2, des1, des2 = pred['keypoints0'], pred['keypoints1'], pred['descriptors0'], pred['descriptors0'] - #all match 만들자 - ################################################################### - - kp1 = key1.squeeze() - kp2 = key2.squeeze() - kp1_np = np.array(key1.cpu()).squeeze() - kp2_np = np.array(key2.cpu()).squeeze() - descs1 = des1.cpu().detach().numpy().squeeze().transpose(0, 1) - descs2 = des2.cpu().detach().numpy().squeeze().transpose(0, 1) - - - - # obtain the matching matrix of the image pair - kp1_projected = cv2.perspectiveTransform(kp1_np.reshape((1, -1, 2)), M)[0, :, :] - dists = cdist(kp1_projected, kp2_np) - - min1 = np.argmin(dists, axis=0) - min2 = np.argmin(dists, axis=1) - - min1v = np.min(dists, axis=1) - min1f = min2[min1v < 3] - - xx = np.where(min2[min1] == np.arange(min1.shape[0]))[0] - matches = np.intersect1d(min1f, xx) - - missing1 = np.setdiff1d(np.arange(kp1_np.shape[0]), min1[matches]) - missing2 = np.setdiff1d(np.arange(kp2_np.shape[0]), matches) - - MN = np.concatenate([min1[matches][np.newaxis, :], matches[np.newaxis, :]]) - MN2 = np.concatenate([missing1[np.newaxis, :], (len(kp2)) * np.ones((1, len(missing1)), dtype=np.int64)]) - MN3 = np.concatenate([(len(kp1)) * np.ones((1, len(missing2)), dtype=np.int64), missing2[np.newaxis, :]]) - all_matches = np.concatenate([MN, MN2, MN3], axis=1) - all_matches = torch.tensor(all_matches).unsqueeze(1) - - - ####################################################################### - - if data['skip_train'] == True: # image has no keypoint - continue - - Loss = loss_superglue(scores, all_matches.permute(1, 2, 0)) - print(Loss) - - epoch_loss += Loss.item() - - sum_loss += Loss.item() - - #Loss.requres_grad = True - - #matching.zero_grad() - #Loss.backward() - #optimizer.step() - - # for every 50 images, print progress and visualize the matches - test = 10 - if (i+1) % test == 0: ############################50 - print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' - .format(epoch, opt.epoch, i+1, numOftrainSet, sum_loss / float(test))) - sum_loss = 0. - - ### eval ### - # Visualize the matches. - print('model->eval') ############################################################################ - #matching.eval() - image0, image1 = pred['image0'].cpu().numpy()[0]*255., pred['image1'].cpu().numpy()[0]*255. - - image0, image1 = image0[0], image1[0] - - kpts0, kpts1 = pred['keypoints0'].cpu().numpy()[0], pred['keypoints1'].cpu().numpy()[0] - matches, conf = data['matches0'].cpu().detach().numpy(), data['matching_scores0'].cpu().detach().numpy() - - kpts0, kpts1 = kpts0[0], kpts1[0] - - image0 = read_image_modified(image0, opt.resize, opt.resize_float) - image1 = read_image_modified(image1, opt.resize, opt.resize_float) - valid = matches > -1 - mkpts0 = kpts0[valid] - mkpts1 = kpts1[matches[valid]] - mconf = conf[valid] - viz_path = eval_output_dir / '{}_matches.{}'.format(str(i), opt.viz_extension) - color = cm.jet(mconf) - stem = file_name - text = [] - - make_matching_plot( - image0, image1, kpts0, kpts1, mkpts0, mkpts1, color, - text, viz_path, stem, stem, opt.show_keypoints, - opt.fast_viz, opt.opencv_display, 'Matches') - - print('superglue->train')################################ - #matching.float().train()################################# - # process checkpoint for every 5e3 images - if (i+1) % 2e3 == 0: - model_out_path = "model_epoch_{}.pth".format(epoch) - torch.save(matching, model_out_path) - print ('Epoch [{}/{}], Step [{}/{}], Checkpoint saved to {}' - .format(epoch, opt.epoch, i+1, numOftrainSet, model_out_path)) - - # save checkpoint when an epoch finishes - epoch_loss /= numOftrainSet - model_out_path = "model_epoch_{}.pth".format(epoch) - torch.save(matching, model_out_path) - print("Epoch [{}/{}] done. Epoch Loss {}. Checkpoint saved to {}" - .format(epoch, opt.epoch, epoch_loss, model_out_path)) - - - ''' - print(pred.keys()) - - print(pred['keypoints0'][0].size()) - print(pred['keypoints1'][0].size()) - print(pred['matches0'].size()) - - print(pred['matches1'].size()) - - - print(pred['matches0'].size()) - print(pred['matches1'].size()) - print(pred['matching_scores0'].size()) - print(pred['matching_scores1'].size()) - - print(pred['matches0']) - - print(pred['matching_scores0']) - ''' \ No newline at end of file diff --git a/test_matches/0_notraincatmatches.png b/test_matches/0_notraincatmatches.png deleted file mode 100644 index aae04af..0000000 Binary files a/test_matches/0_notraincatmatches.png and /dev/null differ diff --git a/test_matches/0_originmatches.png b/test_matches/0_originmatches.png deleted file mode 100644 index 76dc037..0000000 Binary files a/test_matches/0_originmatches.png and /dev/null differ diff --git a/test_matches/0_trainedcatmatches.png b/test_matches/0_trainedcatmatches.png deleted file mode 100644 index 9b3962d..0000000 Binary files a/test_matches/0_trainedcatmatches.png and /dev/null differ diff --git a/test_matches/10_notraincatmatches.png b/test_matches/10_notraincatmatches.png deleted file mode 100644 index 8199bc7..0000000 Binary files a/test_matches/10_notraincatmatches.png and /dev/null differ diff --git a/test_matches/10_originmatches.png b/test_matches/10_originmatches.png deleted file mode 100644 index 6224bcd..0000000 Binary files a/test_matches/10_originmatches.png and /dev/null differ diff --git a/test_matches/10_trainedcatmatches.png b/test_matches/10_trainedcatmatches.png deleted file mode 100644 index caef98b..0000000 Binary files a/test_matches/10_trainedcatmatches.png and /dev/null differ diff --git a/test_matches/11_notraincatmatches.png b/test_matches/11_notraincatmatches.png deleted file mode 100644 index 1331848..0000000 Binary files a/test_matches/11_notraincatmatches.png and /dev/null differ diff --git a/test_matches/11_originmatches.png b/test_matches/11_originmatches.png deleted file mode 100644 index 5ede60b..0000000 Binary files a/test_matches/11_originmatches.png and /dev/null differ diff --git a/test_matches/11_trainedcatmatches.png b/test_matches/11_trainedcatmatches.png deleted file mode 100644 index 520092b..0000000 Binary files a/test_matches/11_trainedcatmatches.png and /dev/null differ diff --git a/test_matches/12_notraincatmatches.png b/test_matches/12_notraincatmatches.png deleted file mode 100644 index b808d64..0000000 Binary files a/test_matches/12_notraincatmatches.png and /dev/null differ diff --git a/test_matches/12_originmatches.png b/test_matches/12_originmatches.png deleted file mode 100644 index f057704..0000000 Binary files a/test_matches/12_originmatches.png and /dev/null differ diff --git a/test_matches/12_trainedcatmatches.png b/test_matches/12_trainedcatmatches.png deleted file mode 100644 index a515d90..0000000 Binary files a/test_matches/12_trainedcatmatches.png and /dev/null differ diff --git a/test_matches/13_notraincatmatches.png b/test_matches/13_notraincatmatches.png deleted file mode 100644 index d3ecf0e..0000000 Binary files a/test_matches/13_notraincatmatches.png and /dev/null differ diff --git a/test_matches/13_originmatches.png b/test_matches/13_originmatches.png deleted file mode 100644 index aab5b29..0000000 Binary files a/test_matches/13_originmatches.png and /dev/null differ diff --git a/test_matches/13_trainedcatmatches.png b/test_matches/13_trainedcatmatches.png deleted file mode 100644 index f1c15db..0000000 Binary files a/test_matches/13_trainedcatmatches.png and /dev/null differ diff --git a/test_matches/14_notraincatmatches.png b/test_matches/14_notraincatmatches.png deleted file mode 100644 index 62c3ff9..0000000 Binary files a/test_matches/14_notraincatmatches.png and /dev/null differ diff --git a/test_matches/14_originmatches.png b/test_matches/14_originmatches.png deleted file mode 100644 index 9c7fd8b..0000000 Binary files a/test_matches/14_originmatches.png and /dev/null differ diff --git a/test_matches/14_trainedcatmatches.png b/test_matches/14_trainedcatmatches.png deleted file mode 100644 index e549ae8..0000000 Binary files a/test_matches/14_trainedcatmatches.png and /dev/null differ diff --git a/test_matches/15_notraincatmatches.png b/test_matches/15_notraincatmatches.png deleted file mode 100644 index dcc1e71..0000000 Binary files a/test_matches/15_notraincatmatches.png and /dev/null differ diff --git a/test_matches/15_originmatches.png b/test_matches/15_originmatches.png deleted file mode 100644 index 51ced44..0000000 Binary files a/test_matches/15_originmatches.png and /dev/null differ diff --git a/test_matches/15_trainedcatmatches.png b/test_matches/15_trainedcatmatches.png deleted file mode 100644 index 4f6f953..0000000 Binary files a/test_matches/15_trainedcatmatches.png and /dev/null differ diff --git a/test_matches/16_notraincatmatches.png b/test_matches/16_notraincatmatches.png deleted file mode 100644 index cccc029..0000000 Binary files a/test_matches/16_notraincatmatches.png and /dev/null differ diff --git a/test_matches/16_originmatches.png b/test_matches/16_originmatches.png deleted file mode 100644 index f75564c..0000000 Binary files a/test_matches/16_originmatches.png and /dev/null differ diff --git a/test_matches/16_trainedcatmatches.png b/test_matches/16_trainedcatmatches.png deleted file mode 100644 index 0867d3f..0000000 Binary files a/test_matches/16_trainedcatmatches.png and /dev/null differ diff --git a/test_matches/17_notraincatmatches.png b/test_matches/17_notraincatmatches.png deleted file mode 100644 index be09ded..0000000 Binary files a/test_matches/17_notraincatmatches.png and /dev/null differ diff --git a/test_matches/17_originmatches.png b/test_matches/17_originmatches.png deleted file mode 100644 index 845e3d5..0000000 Binary files a/test_matches/17_originmatches.png and /dev/null differ diff --git a/test_matches/17_trainedcatmatches.png b/test_matches/17_trainedcatmatches.png deleted file mode 100644 index bbea6dc..0000000 Binary files a/test_matches/17_trainedcatmatches.png and /dev/null differ diff --git a/test_matches/18_notraincatmatches.png b/test_matches/18_notraincatmatches.png deleted file mode 100644 index e442063..0000000 Binary files a/test_matches/18_notraincatmatches.png and /dev/null differ diff --git a/test_matches/18_originmatches.png b/test_matches/18_originmatches.png deleted file mode 100644 index e13f465..0000000 Binary files a/test_matches/18_originmatches.png and /dev/null differ diff --git a/test_matches/18_trainedcatmatches.png b/test_matches/18_trainedcatmatches.png deleted file mode 100644 index de20e8f..0000000 Binary files a/test_matches/18_trainedcatmatches.png and /dev/null differ diff --git a/test_matches/19_notraincatmatches.png b/test_matches/19_notraincatmatches.png deleted file mode 100644 index 99ca0a6..0000000 Binary files a/test_matches/19_notraincatmatches.png and /dev/null differ diff --git a/test_matches/19_originmatches.png b/test_matches/19_originmatches.png deleted file mode 100644 index 04f6251..0000000 Binary files a/test_matches/19_originmatches.png and /dev/null differ diff --git a/test_matches/19_trainedcatmatches.png b/test_matches/19_trainedcatmatches.png deleted file mode 100644 index ebef81c..0000000 Binary files a/test_matches/19_trainedcatmatches.png and /dev/null differ diff --git a/test_matches/1_notraincatmatches.png b/test_matches/1_notraincatmatches.png deleted file mode 100644 index a2e89ce..0000000 Binary files a/test_matches/1_notraincatmatches.png and /dev/null differ diff --git a/test_matches/1_originmatches.png b/test_matches/1_originmatches.png deleted file mode 100644 index d57404c..0000000 Binary files a/test_matches/1_originmatches.png and /dev/null differ diff --git a/test_matches/1_trainedcatmatches.png b/test_matches/1_trainedcatmatches.png deleted file mode 100644 index 69878f0..0000000 Binary files a/test_matches/1_trainedcatmatches.png and /dev/null differ diff --git a/test_matches/2_notraincatmatches.png b/test_matches/2_notraincatmatches.png deleted file mode 100644 index 93a9280..0000000 Binary files a/test_matches/2_notraincatmatches.png and /dev/null differ diff --git a/test_matches/2_originmatches.png b/test_matches/2_originmatches.png deleted file mode 100644 index 08c93f7..0000000 Binary files a/test_matches/2_originmatches.png and /dev/null differ diff --git a/test_matches/2_trainedcatmatches.png b/test_matches/2_trainedcatmatches.png deleted file mode 100644 index e9aea8b..0000000 Binary files a/test_matches/2_trainedcatmatches.png and /dev/null differ diff --git a/test_matches/3_notraincatmatches.png b/test_matches/3_notraincatmatches.png deleted file mode 100644 index 8b95044..0000000 Binary files a/test_matches/3_notraincatmatches.png and /dev/null differ diff --git a/test_matches/3_originmatches.png b/test_matches/3_originmatches.png deleted file mode 100644 index c0dca81..0000000 Binary files a/test_matches/3_originmatches.png and /dev/null differ diff --git a/test_matches/3_trainedcatmatches.png b/test_matches/3_trainedcatmatches.png deleted file mode 100644 index fbd6b3c..0000000 Binary files a/test_matches/3_trainedcatmatches.png and /dev/null differ diff --git a/test_matches/4_notraincatmatches.png b/test_matches/4_notraincatmatches.png deleted file mode 100644 index 8f82758..0000000 Binary files a/test_matches/4_notraincatmatches.png and /dev/null differ diff --git a/test_matches/4_originmatches.png b/test_matches/4_originmatches.png deleted file mode 100644 index 6287ce8..0000000 Binary files a/test_matches/4_originmatches.png and /dev/null differ diff --git a/test_matches/4_trainedcatmatches.png b/test_matches/4_trainedcatmatches.png deleted file mode 100644 index 673a2b1..0000000 Binary files a/test_matches/4_trainedcatmatches.png and /dev/null differ diff --git a/test_matches/5_notraincatmatches.png b/test_matches/5_notraincatmatches.png deleted file mode 100644 index be56c59..0000000 Binary files a/test_matches/5_notraincatmatches.png and /dev/null differ diff --git a/test_matches/5_originmatches.png b/test_matches/5_originmatches.png deleted file mode 100644 index b8c458e..0000000 Binary files a/test_matches/5_originmatches.png and /dev/null differ diff --git a/test_matches/5_trainedcatmatches.png b/test_matches/5_trainedcatmatches.png deleted file mode 100644 index be7e8d5..0000000 Binary files a/test_matches/5_trainedcatmatches.png and /dev/null differ diff --git a/test_matches/6_notraincatmatches.png b/test_matches/6_notraincatmatches.png deleted file mode 100644 index fbb2ff8..0000000 Binary files a/test_matches/6_notraincatmatches.png and /dev/null differ diff --git a/test_matches/6_originmatches.png b/test_matches/6_originmatches.png deleted file mode 100644 index 2a4bd87..0000000 Binary files a/test_matches/6_originmatches.png and /dev/null differ diff --git a/test_matches/6_trainedcatmatches.png b/test_matches/6_trainedcatmatches.png deleted file mode 100644 index 5569c68..0000000 Binary files a/test_matches/6_trainedcatmatches.png and /dev/null differ diff --git a/test_matches/7_notraincatmatches.png b/test_matches/7_notraincatmatches.png deleted file mode 100644 index 2ea89c3..0000000 Binary files a/test_matches/7_notraincatmatches.png and /dev/null differ diff --git a/test_matches/7_originmatches.png b/test_matches/7_originmatches.png deleted file mode 100644 index 5b7b6b7..0000000 Binary files a/test_matches/7_originmatches.png and /dev/null differ diff --git a/test_matches/7_trainedcatmatches.png b/test_matches/7_trainedcatmatches.png deleted file mode 100644 index 9fa5f54..0000000 Binary files a/test_matches/7_trainedcatmatches.png and /dev/null differ diff --git a/test_matches/8_notraincatmatches.png b/test_matches/8_notraincatmatches.png deleted file mode 100644 index 7018044..0000000 Binary files a/test_matches/8_notraincatmatches.png and /dev/null differ diff --git a/test_matches/8_originmatches.png b/test_matches/8_originmatches.png deleted file mode 100644 index 57374e9..0000000 Binary files a/test_matches/8_originmatches.png and /dev/null differ diff --git a/test_matches/8_trainedcatmatches.png b/test_matches/8_trainedcatmatches.png deleted file mode 100644 index ecbbe99..0000000 Binary files a/test_matches/8_trainedcatmatches.png and /dev/null differ diff --git a/test_matches/9_notraincatmatches.png b/test_matches/9_notraincatmatches.png deleted file mode 100644 index f6ac35a..0000000 Binary files a/test_matches/9_notraincatmatches.png and /dev/null differ diff --git a/test_matches/9_originmatches.png b/test_matches/9_originmatches.png deleted file mode 100644 index 4e441f5..0000000 Binary files a/test_matches/9_originmatches.png and /dev/null differ diff --git a/test_matches/9_trainedcatmatches.png b/test_matches/9_trainedcatmatches.png deleted file mode 100644 index 8bae7bd..0000000 Binary files a/test_matches/9_trainedcatmatches.png and /dev/null differ diff --git a/train.py b/train.py deleted file mode 100644 index a32fa37..0000000 --- a/train.py +++ /dev/null @@ -1,331 +0,0 @@ -from pathlib import Path -import argparse -import random -import numpy as np -import matplotlib.cm as cm -import torch -import torch.nn as nn -from torch.autograd import Variable -import os -import torch.multiprocessing -from tqdm import tqdm - -import cv2 -from scipy.spatial.distance import cdist - -from models.utils import (compute_pose_error, compute_epipolar_error, - estimate_pose, make_matching_plot, - error_colormap, AverageTimer, pose_auc, read_image, - rotate_intrinsics, rotate_pose_inplane, - scale_intrinsics, read_image_modified, frame2tensor) - -from models.matching import Matching -from sjlee.loss import loss_superglue - -torch.set_grad_enabled(True) -torch.multiprocessing.set_sharing_strategy('file_system') - -parser = argparse.ArgumentParser( - description='Image pair matching and pose evaluation with SuperGlue', - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - -parser.add_argument( - '--viz', action='store_true', - help='Visualize the matches and dump the plots') -parser.add_argument( - '--eval', action='store_true', - help='Perform the evaluation' - ' (requires ground truth pose and intrinsics)') - -parser.add_argument( - '--superglue', choices={'indoor', 'outdoor'}, default='indoor', - help='SuperGlue weights') -parser.add_argument( - '--max_keypoints', type=int, default=1023, - help='Maximum number of keypoints detected by Superpoint' - ' (\'-1\' keeps all keypoints)') -parser.add_argument( - '--keypoint_threshold', type=float, default=0.005, - help='SuperPoint keypoint detector confidence threshold') -parser.add_argument( - '--nms_radius', type=int, default=4, - help='SuperPoint Non Maximum Suppression (NMS) radius' - ' (Must be positive)') -parser.add_argument( - '--sinkhorn_iterations', type=int, default=20, - help='Number of Sinkhorn iterations performed by SuperGlue') -parser.add_argument( - '--match_threshold', type=float, default=0.2, - help='SuperGlue match threshold') - -parser.add_argument( - '--resize', type=int, nargs='+', default=[640, 480], - help='Resize the input image before running inference. If two numbers, ' - 'resize to the exact dimensions, if one number, resize the max ' - 'dimension, if -1, do not resize') -parser.add_argument( - '--resize_float', action='store_true', - help='Resize the image after casting uint8 to float') - -parser.add_argument( - '--cache', action='store_true', - help='Skip the pair if output .npz files are already found') -parser.add_argument( - '--show_keypoints', action='store_true', - help='Plot the keypoints in addition to the matches') -parser.add_argument( - '--fast_viz', action='store_true', - help='Use faster image visualization based on OpenCV instead of Matplotlib') -parser.add_argument( - '--viz_extension', type=str, default='png', choices=['png', 'pdf'], - help='Visualization file extension. Use pdf for highest-quality.') - -parser.add_argument( - '--opencv_display', action='store_true', - help='Visualize via OpenCV before saving output images') -parser.add_argument( - '--eval_pairs_list', type=str, default='assets/scannet_sample_pairs_with_gt.txt', - help='Path to the list of image pairs for evaluation') -parser.add_argument( - '--shuffle', action='store_true', - help='Shuffle ordering of pairs before processing') -parser.add_argument( - '--max_length', type=int, default=-1, - help='Maximum number of pairs to evaluate') - -parser.add_argument( - '--eval_input_dir', type=str, default='assets/scannet_sample_images/', - help='Path to the directory that contains the images') -parser.add_argument( - '--eval_output_dir', type=str, default='dump_match_pairs/', - help='Path to the directory in which the .npz results and optional,' - 'visualizations are written') -parser.add_argument( - '--learning_rate', type=float, default=0.0001, #0.0001 - help='Learning rate') - -parser.add_argument( - '--batch_size', type=int, default=1, - help='batch_size') -parser.add_argument( - '--train_path', type=str, default='/home/cvlab09/projects/seungjun_an/dataset/indoor2/', - help='Path to the directory of training imgs.') -parser.add_argument( - '--epoch', type=int, default=5, - help='Number of epoches') - - - -if __name__ == '__main__': - opt = parser.parse_args() - print(opt) - torch.manual_seed(225) - - # make sure the flags are properly used - assert not (opt.opencv_display and not opt.viz), 'Must use --viz with --opencv_display' - assert not (opt.opencv_display and not opt.fast_viz), 'Cannot use --opencv_display without --fast_viz' - assert not (opt.fast_viz and not opt.viz), 'Must use --viz with --fast_viz' - assert not (opt.fast_viz and opt.viz_extension == 'pdf'), 'Cannot use pdf extension with --fast_viz' - - numOftrainSet = 13100 - - # store viz results - eval_output_dir = Path(opt.eval_output_dir) - eval_output_dir.mkdir(exist_ok=True, parents=True) - print('Will write visualization images to', - 'directory \"{}\"'.format(eval_output_dir)) - config = { - 'superpoint': { - 'nms_radius': opt.nms_radius, - 'keypoint_threshold': opt.keypoint_threshold, - 'max_keypoints': opt.max_keypoints - }, - 'superglue': { - 'weights': opt.superglue, - 'sinkhorn_iterations': opt.sinkhorn_iterations, - 'match_threshold': opt.match_threshold, - } - } - matching = Matching(config).eval().to('cuda') - sum_loss = 0. - - optimizer = torch.optim.Adam(matching.parameters(), lr=opt.learning_rate) - - device = 'cuda' - - for epoch in range(1, opt.epoch+1): - epoch_loss = 0 - - #matching.float().train() ########################################## - for i in range(numOftrainSet): - file_name =opt.train_path+str(i+1)+'.jpg' - image0, inp0, scales0 = read_image( - file_name, opt.resize, 0, opt.resize_float) - - if str(type(image0)) != '' : continue - - width, height = image0.shape[:2] - - corners = np.array([[0, 0], [0, height], [width, 0], [width, height]], dtype=np.float32) - - warp = np.random.randint(-224, 224, size=(4, 2)).astype(np.float32) - - - - - # get the corresponding warped image - M = cv2.getPerspectiveTransform(corners, corners + warp) - warped = cv2.warpPerspective(src=image0, M=M, dsize=(image0.shape[1], image0.shape[0])) - - inp1 = frame2tensor(warped) - - - #print(i) - - scores, data, pred = matching({'image0': inp0, 'image1': inp1}) - - - ################################################################################################# - - if data['skip_train'] : continue - - - key1, key2, des1, des2 = pred['keypoints0'], pred['keypoints1'], pred['descriptors0'], pred['descriptors0'] - #all match 만들자 - ################################################################### - - kp1 = key1.squeeze() - kp2 = key2.squeeze() - kp1_np = np.array(key1.cpu()).squeeze() - kp2_np = np.array(key2.cpu()).squeeze() - descs1 = des1.cpu().detach().numpy().squeeze().transpose(0, 1) - descs2 = des2.cpu().detach().numpy().squeeze().transpose(0, 1) - - - - # obtain the matching matrix of the image pair - kp1_projected = cv2.perspectiveTransform(kp1_np.reshape((1, -1, 2)), M)[0, :, :] - - if str(type(kp1_projected[0])) != '' or str(type(kp2_np[0])) !='' : - print(str(type(kp1_projected[0]))) - continue - - dists = cdist(kp1_projected, kp2_np) - - min1 = np.argmin(dists, axis=0) - min2 = np.argmin(dists, axis=1) - - min1v = np.min(dists, axis=1) - min1f = min2[min1v < 3] - - xx = np.where(min2[min1] == np.arange(min1.shape[0]))[0] - matches = np.intersect1d(min1f, xx) - - missing1 = np.setdiff1d(np.arange(kp1_np.shape[0]), min1[matches]) - missing2 = np.setdiff1d(np.arange(kp2_np.shape[0]), matches) - - MN = np.concatenate([min1[matches][np.newaxis, :], matches[np.newaxis, :]]) - MN2 = np.concatenate([missing1[np.newaxis, :], (len(kp2)) * np.ones((1, len(missing1)), dtype=np.int64)]) - MN3 = np.concatenate([(len(kp1)) * np.ones((1, len(missing2)), dtype=np.int64), missing2[np.newaxis, :]]) - all_matches = np.concatenate([MN, MN2, MN3], axis=1) - all_matches = torch.tensor(all_matches).unsqueeze(1) - - - ####################################################################### - - if data['skip_train'] == True: # image has no keypoint - continue - - Loss = loss_superglue(scores, all_matches.permute(1, 2, 0)) - #print(Loss) - - if Loss == 0. : continue - - epoch_loss += Loss.item() - - sum_loss += Loss.item() - - Loss.requres_grad = True - - #print(Loss) - - matching.zero_grad() - Loss.backward() - optimizer.step() - - # for every 50 images, print progress and visualize the matches - test = 50 - if (i+1) % test == 0: ############################50 - print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' - .format(epoch, opt.epoch, i+1, numOftrainSet, sum_loss / float(test))) - sum_loss = 0. - - ### eval ### - # Visualize the matches. - print('model->eval') ############################################################################ - #matching.eval() - image0, image1 = pred['image0'].cpu().numpy()[0]*255., pred['image1'].cpu().numpy()[0]*255. - - image0, image1 = image0[0], image1[0] - - kpts0, kpts1 = pred['keypoints0'].cpu().numpy()[0], pred['keypoints1'].cpu().numpy()[0] - matches, conf = data['matches0'].cpu().detach().numpy(), data['matching_scores0'].cpu().detach().numpy() - - kpts0, kpts1 = kpts0[0], kpts1[0] - - image0 = read_image_modified(image0, opt.resize, opt.resize_float) - image1 = read_image_modified(image1, opt.resize, opt.resize_float) - valid = matches > -1 - mkpts0 = kpts0[valid] - mkpts1 = kpts1[matches[valid]] - mconf = conf[valid] - viz_path = eval_output_dir / '{}_matches.{}'.format(str(i), opt.viz_extension) - color = cm.jet(mconf) - stem = file_name - text = [] - - make_matching_plot( - image0, image1, kpts0, kpts1, mkpts0, mkpts1, color, - text, viz_path, stem, stem, opt.show_keypoints, - opt.fast_viz, opt.opencv_display, 'Matches') - - print('superglue->train')################################ - #matching.float().train()################################# - # process checkpoint for every 5e3 images - if (i+1) % 1000 == 0: - model_out_path = "indoor_state_dict_epoch{}.pth".format(epoch) - torch.save(matching.state_dict(), model_out_path) - #torch.save(matching, model_out_path) - #torch.save(model_state_dict(),) - print ('Epoch [{}/{}], Step [{}/{}], Checkpoint saved to {} ' - .format(epoch, opt.epoch, i+1, numOftrainSet, model_out_path)) - import sys;sys.exit() - - # save checkpoint when an epoch finishes - epoch_loss /= numOftrainSet - model_out_path = "indoor_state_dict_epoch{}.pth".format(epoch) - torch.save(matching.state_dict(), model_out_path) - #torch.save(matching, model_out_path) - print("Epoch [{}/{}] done. Epoch Loss {}. Checkpoint saved to {} " - .format(epoch, opt.epoch, epoch_loss, model_out_path)) - - - ''' - print(pred.keys()) - - print(pred['keypoints0'][0].size()) - print(pred['keypoints1'][0].size()) - print(pred['matches0'].size()) - - print(pred['matches1'].size()) - - - print(pred['matches0'].size()) - print(pred['matches1'].size()) - print(pred['matching_scores0'].size()) - print(pred['matching_scores1'].size()) - - print(pred['matches0']) - - print(pred['matching_scores0']) - ''' \ No newline at end of file