diff --git a/.gitignore b/.gitignore index dec5f00..858d7c7 100644 --- a/.gitignore +++ b/.gitignore @@ -37,3 +37,6 @@ venv last_match date_max log.txt + +# PyCharm +.idea/ diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..8274845 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,15 @@ +language: python +dist: xenial +python: + - "3.6" + - "3.7" + +branches: + only: + - master + +before_script: + - pip install tox tox-travis pipenv + +script: + - tox diff --git a/Pipfile b/Pipfile new file mode 100644 index 0000000..cede223 --- /dev/null +++ b/Pipfile @@ -0,0 +1,16 @@ +[[source]] +url = "https://pypi.org/simple" +verify_ssl = true +name = "pypi" + +[packages] +dota2py = "==0.1.3" +numpy = "==1.15.4" +progressbar = "==2.5" +pymongo = "==2.6.3" +scikit-learn = "==0.20.0" +Flask = "==0.10.1" +matplotlib = "==3.0.1" + +[dev-packages] +pytest = "==4.0.1" diff --git a/Pipfile.lock b/Pipfile.lock new file mode 100644 index 0000000..c28da27 --- /dev/null +++ b/Pipfile.lock @@ -0,0 +1,342 @@ +{ + "_meta": { + "hash": { + "sha256": "2d6ea6df0f5916d99282135eced5301d52f66e1ea48a1fc563ed5366d5a71d5b" + }, + "pipfile-spec": 6, + "requires": {}, + "sources": [ + { + "name": "pypi", + "url": "https://pypi.org/simple", + "verify_ssl": true + } + ] + }, + "default": { + "cycler": { + "hashes": [ + "sha256:1d8a5ae1ff6c5cf9b93e8811e581232ad8920aeec647c37316ceac982b08cb2d", + "sha256:cd7b2d1018258d7247a71425e9f26463dfb444d411c39569972f4ce586b0c9d8" + ], + "version": "==0.10.0" + }, + "dota2py": { + "hashes": [ + "sha256:32eed9f8a4fe51d0ee5df7f9b92880d1737eb1a7f34dcfd6a32e87a1487f54d2" + ], + "index": "pypi", + "version": "==0.1.3" + }, + "flask": { + "hashes": [ + "sha256:4c83829ff83d408b5e1d4995472265411d2c414112298f2eb4b359d9e4563373" + ], + "index": "pypi", + "version": "==0.10.1" + }, + "itsdangerous": { + "hashes": [ + "sha256:321b033d07f2a4136d3ec762eac9f16a10ccd60f53c0c91af90217ace7ba1f19", + "sha256:b12271b2047cb23eeb98c8b5622e2e5c5e9abd9784a153e9d8ef9cb4dd09d749" + ], + "version": "==1.1.0" + }, + "jinja2": { + "hashes": [ + "sha256:74c935a1b8bb9a3947c50a54766a969d4846290e1e788ea44c1392163723c3bd", + "sha256:f84be1bb0040caca4cea721fcbbbbd61f9be9464ca236387158b0feea01914a4" + ], + "version": "==2.10" + }, + "kiwisolver": { + "hashes": [ + "sha256:0ee4ed8b3ae8f5f712b0aa9ebd2858b5b232f1b9a96b0943dceb34df2a223bc3", + "sha256:0f7f532f3c94e99545a29f4c3f05637f4d2713e7fd91b4dd8abfc18340b86cd5", + "sha256:1a078f5dd7e99317098f0e0d490257fd0349d79363e8c923d5bb76428f318421", + "sha256:1aa0b55a0eb1bd3fa82e704f44fb8f16e26702af1a073cc5030eea399e617b56", + "sha256:2874060b91e131ceeff00574b7c2140749c9355817a4ed498e82a4ffa308ecbc", + "sha256:379d97783ba8d2934d52221c833407f20ca287b36d949b4bba6c75274bcf6363", + "sha256:3b791ddf2aefc56382aadc26ea5b352e86a2921e4e85c31c1f770f527eb06ce4", + "sha256:4329008a167fac233e398e8a600d1b91539dc33c5a3eadee84c0d4b04d4494fa", + "sha256:45813e0873bbb679334a161b28cb9606d9665e70561fd6caa8863e279b5e464b", + "sha256:53a5b27e6b5717bdc0125338a822605084054c80f382051fb945d2c0e6899a20", + "sha256:574f24b9805cb1c72d02b9f7749aa0cc0b81aa82571be5201aa1453190390ae5", + "sha256:66f82819ff47fa67a11540da96966fb9245504b7f496034f534b81cacf333861", + "sha256:79e5fe3ccd5144ae80777e12973027bd2f4f5e3ae8eb286cabe787bed9780138", + "sha256:83410258eb886f3456714eea4d4304db3a1fc8624623fc3f38a487ab36c0f653", + "sha256:8b6a7b596ce1d2a6d93c3562f1178ebd3b7bb445b3b0dd33b09f9255e312a965", + "sha256:9576cb63897fbfa69df60f994082c3f4b8e6adb49cccb60efb2a80a208e6f996", + "sha256:95a25d9f3449046ecbe9065be8f8380c03c56081bc5d41fe0fb964aaa30b2195", + "sha256:a424f048bebc4476620e77f3e4d1f282920cef9bc376ba16d0b8fe97eec87cde", + "sha256:aaec1cfd94f4f3e9a25e144d5b0ed1eb8a9596ec36d7318a504d813412563a85", + "sha256:acb673eecbae089ea3be3dcf75bfe45fc8d4dcdc951e27d8691887963cf421c7", + "sha256:b15bc8d2c2848a4a7c04f76c9b3dc3561e95d4dabc6b4f24bfabe5fd81a0b14f", + "sha256:b1c240d565e977d80c0083404c01e4d59c5772c977fae2c483f100567f50847b", + "sha256:c595693de998461bcd49b8d20568c8870b3209b8ea323b2a7b0ea86d85864694", + "sha256:ce3be5d520b4d2c3e5eeb4cd2ef62b9b9ab8ac6b6fedbaa0e39cdb6f50644278", + "sha256:e0f910f84b35c36a3513b96d816e6442ae138862257ae18a0019d2fc67b041dc", + "sha256:ea36e19ac0a483eea239320aef0bd40702404ff8c7e42179a2d9d36c5afcb55c", + "sha256:efabbcd4f406b532206b8801058c8bab9e79645b9880329253ae3322b7b02cd5", + "sha256:f923406e6b32c86309261b8195e24e18b6a8801df0cfc7814ac44017bfcb3939" + ], + "version": "==1.0.1" + }, + "markupsafe": { + "hashes": [ + "sha256:048ef924c1623740e70204aa7143ec592504045ae4429b59c30054cb31e3c432", + "sha256:130f844e7f5bdd8e9f3f42e7102ef1d49b2e6fdf0d7526df3f87281a532d8c8b", + "sha256:19f637c2ac5ae9da8bfd98cef74d64b7e1bb8a63038a3505cd182c3fac5eb4d9", + "sha256:1b8a7a87ad1b92bd887568ce54b23565f3fd7018c4180136e1cf412b405a47af", + "sha256:1c25694ca680b6919de53a4bb3bdd0602beafc63ff001fea2f2fc16ec3a11834", + "sha256:1f19ef5d3908110e1e891deefb5586aae1b49a7440db952454b4e281b41620cd", + "sha256:1fa6058938190ebe8290e5cae6c351e14e7bb44505c4a7624555ce57fbbeba0d", + "sha256:31cbb1359e8c25f9f48e156e59e2eaad51cd5242c05ed18a8de6dbe85184e4b7", + "sha256:3e835d8841ae7863f64e40e19477f7eb398674da6a47f09871673742531e6f4b", + "sha256:4e97332c9ce444b0c2c38dd22ddc61c743eb208d916e4265a2a3b575bdccb1d3", + "sha256:525396ee324ee2da82919f2ee9c9e73b012f23e7640131dd1b53a90206a0f09c", + "sha256:52b07fbc32032c21ad4ab060fec137b76eb804c4b9a1c7c7dc562549306afad2", + "sha256:52ccb45e77a1085ec5461cde794e1aa037df79f473cbc69b974e73940655c8d7", + "sha256:5c3fbebd7de20ce93103cb3183b47671f2885307df4a17a0ad56a1dd51273d36", + "sha256:5e5851969aea17660e55f6a3be00037a25b96a9b44d2083651812c99d53b14d1", + "sha256:5edfa27b2d3eefa2210fb2f5d539fbed81722b49f083b2c6566455eb7422fd7e", + "sha256:7d263e5770efddf465a9e31b78362d84d015cc894ca2c131901a4445eaa61ee1", + "sha256:83381342bfc22b3c8c06f2dd93a505413888694302de25add756254beee8449c", + "sha256:857eebb2c1dc60e4219ec8e98dfa19553dae33608237e107db9c6078b1167856", + "sha256:98e439297f78fca3a6169fd330fbe88d78b3bb72f967ad9961bcac0d7fdd1550", + "sha256:bf54103892a83c64db58125b3f2a43df6d2cb2d28889f14c78519394feb41492", + "sha256:d9ac82be533394d341b41d78aca7ed0e0f4ba5a2231602e2f05aa87f25c51672", + "sha256:e982fe07ede9fada6ff6705af70514a52beb1b2c3d25d4e873e82114cf3c5401", + "sha256:edce2ea7f3dfc981c4ddc97add8a61381d9642dc3273737e756517cc03e84dd6", + "sha256:efdc45ef1afc238db84cb4963aa689c0408912a0239b0721cb172b4016eb31d6", + "sha256:f137c02498f8b935892d5c0172560d7ab54bc45039de8805075e19079c639a9c", + "sha256:f82e347a72f955b7017a39708a3667f106e6ad4d10b25f237396a7115d8ed5fd", + "sha256:fb7c206e01ad85ce57feeaaa0bf784b97fa3cad0d4a5737bc5295785f5c613a1" + ], + "version": "==1.1.0" + }, + "matplotlib": { + "hashes": [ + "sha256:66a6b7264fb200dd217ebc95c53d59b5e5fa8cac6b8a650a50ed05438667ff32", + "sha256:69ff0d7139f3886be552ff29478c886b461081c0afb3a3ad46afb1a445bae722", + "sha256:70f8782c50ac2c7617aad0fa5ba59fc49f690a851d6afc0178813c49767644dd", + "sha256:716caa55ebfb82d66f7a5584ad818b349998d9cf7e6282e5eda5fdddf4752742", + "sha256:91bf4be2477aa7408131ae1a499b1c8904ea8eb1eb3f88412b4809ebe0698868", + "sha256:d1bd008db1e389d14523345719c30fd0fb3c724b71ae098360c3c8e85b7c560f", + "sha256:d419a5fb5654f620756ad9883bc3f1db6875f6f2760c367bee775357d1bbb38c", + "sha256:dc5b097546eeadc3a91eee35a1dbbf876e78ebed83b934c391f0f14605234c76", + "sha256:de25d893f54e1d50555e4a4babf66d337917499c33c78a24216838b3d2c6bf3b", + "sha256:e4ad891787ad2f181e7582997520a19912990b5d0644b1fdaae365b6699b953f", + "sha256:e69ab0def9b053f4ea5800306ff9c671776a2d151ec6b206465309bb468c0bcc", + "sha256:e9d37b22467e0e4d6f989892a998db5f59ddbf3ab811b515585dfdde9aacc5f9", + "sha256:ee4471dd1c5ed03f2f46149af351b7a2e6618eced329660f1b4b8bf573422b70" + ], + "index": "pypi", + "version": "==3.0.1" + }, + "numpy": { + "hashes": [ + "sha256:0df89ca13c25eaa1621a3f09af4c8ba20da849692dcae184cb55e80952c453fb", + "sha256:154c35f195fd3e1fad2569930ca51907057ae35e03938f89a8aedae91dd1b7c7", + "sha256:18e84323cdb8de3325e741a7a8dd4a82db74fde363dce32b625324c7b32aa6d7", + "sha256:1e8956c37fc138d65ded2d96ab3949bd49038cc6e8a4494b1515b0ba88c91565", + "sha256:23557bdbca3ccbde3abaa12a6e82299bc92d2b9139011f8c16ca1bb8c75d1e95", + "sha256:24fd645a5e5d224aa6e39d93e4a722fafa9160154f296fd5ef9580191c755053", + "sha256:36e36b6868e4440760d4b9b44587ea1dc1f06532858d10abba98e851e154ca70", + "sha256:3d734559db35aa3697dadcea492a423118c5c55d176da2f3be9c98d4803fc2a7", + "sha256:416a2070acf3a2b5d586f9a6507bb97e33574df5bd7508ea970bbf4fc563fa52", + "sha256:4a22dc3f5221a644dfe4a63bf990052cc674ef12a157b1056969079985c92816", + "sha256:4d8d3e5aa6087490912c14a3c10fbdd380b40b421c13920ff468163bc50e016f", + "sha256:4f41fd159fba1245e1958a99d349df49c616b133636e0cf668f169bce2aeac2d", + "sha256:561ef098c50f91fbac2cc9305b68c915e9eb915a74d9038ecf8af274d748f76f", + "sha256:56994e14b386b5c0a9b875a76d22d707b315fa037affc7819cda08b6d0489756", + "sha256:73a1f2a529604c50c262179fcca59c87a05ff4614fe8a15c186934d84d09d9a5", + "sha256:7da99445fd890206bfcc7419f79871ba8e73d9d9e6b82fe09980bc5bb4efc35f", + "sha256:99d59e0bcadac4aa3280616591fb7bcd560e2218f5e31d5223a2e12a1425d495", + "sha256:a4cc09489843c70b22e8373ca3dfa52b3fab778b57cf81462f1203b0852e95e3", + "sha256:a61dc29cfca9831a03442a21d4b5fd77e3067beca4b5f81f1a89a04a71cf93fa", + "sha256:b1853df739b32fa913cc59ad9137caa9cc3d97ff871e2bbd89c2a2a1d4a69451", + "sha256:b1f44c335532c0581b77491b7715a871d0dd72e97487ac0f57337ccf3ab3469b", + "sha256:b261e0cb0d6faa8fd6863af26d30351fd2ffdb15b82e51e81e96b9e9e2e7ba16", + "sha256:c857ae5dba375ea26a6228f98c195fec0898a0fd91bcf0e8a0cae6d9faf3eca7", + "sha256:cf5bb4a7d53a71bb6a0144d31df784a973b36d8687d615ef6a7e9b1809917a9b", + "sha256:db9814ff0457b46f2e1d494c1efa4111ca089e08c8b983635ebffb9c1573361f", + "sha256:df04f4bad8a359daa2ff74f8108ea051670cafbca533bb2636c58b16e962989e", + "sha256:ecf81720934a0e18526177e645cbd6a8a21bb0ddc887ff9738de07a1df5c6b61", + "sha256:edfa6fba9157e0e3be0f40168eb142511012683ac3dc82420bee4a3f3981b30e" + ], + "index": "pypi", + "version": "==1.15.4" + }, + "progressbar": { + "hashes": [ + "sha256:5d81cb529da2e223b53962afd6c8ca0f05c6670e40309a7219eacc36af9b6c63" + ], + "index": "pypi", + "version": "==2.5" + }, + "pymongo": { + "hashes": [ + "sha256:cabe1d785ad5db6ed8ff70dcb9c987958fc75400f066ec78911ca3f37184a4e2" + ], + "index": "pypi", + "version": "==2.6.3" + }, + "pyparsing": { + "hashes": [ + "sha256:40856e74d4987de5d01761a22d1621ae1c7f8774585acae358aa5c5936c6c90b", + "sha256:f353aab21fd474459d97b709e527b5571314ee5f067441dc9f88e33eecd96592" + ], + "version": "==2.3.0" + }, + "python-dateutil": { + "hashes": [ + "sha256:063df5763652e21de43de7d9e00ccf239f953a832941e37be541614732cdfc93", + "sha256:88f9287c0174266bb0d8cedd395cfba9c58e87e5ad86b2ce58859bc11be3cf02" + ], + "version": "==2.7.5" + }, + "scikit-learn": { + "hashes": [ + "sha256:1ca280bbdeb0f9950f9427c71e29d9f14e63b2ffa3e8fdf95f25e13773e6d898", + "sha256:33ad23aa0928c64567a24aac771aea4e179fab2a20f9f786ab00ca9fe0a13c82", + "sha256:344bc433ccbfbadcac8c16b4cec9d7c4722bcea9ce19f6da42e2c2f805571941", + "sha256:35ee532b5e992a6e8d8a71d325fd9e0b58716894657e7d3da3e7a1d888c2e7d4", + "sha256:37cbbba2d2a3895bba834d50488d22268a511279e053135bb291f637fe30512b", + "sha256:40cf1908ee712545f4286cc21f3ee21f3466c81438320204725ab37c96849f27", + "sha256:4130760ac54f5946523c1a1fb32a6c0925e5245f77285270a8f6fb5901b7b733", + "sha256:46cc8c32496f02affde7abe507af99cd752de0e41aec951a0bc40c693c2a1e07", + "sha256:4a364cf22be381a17c05ada9f9ce102733a0f75893c51b83718cd9358444921e", + "sha256:56aff3fa3417cd69807c1c74db69aee34ce08d7161cbdfebbff9b4023d9d224b", + "sha256:58debb34a15cfc03f4876e450068dbd711d9ec36ae5503ed2868f2c1f88522f7", + "sha256:7bcf7ade62ef3443470af32afb82646640d653f42502cf31a13cc17d3ff85d57", + "sha256:7d4eab203ed260075f47e2bf6a2bd656367e4e8683b3ad46d4651070c5d1e9aa", + "sha256:86697c6e4c2d74fbbf110c6d5979d34196a55108fa9896bf424f9795a8d935ad", + "sha256:911115db6669c9b11efd502dcc5483cd0c53e4e3c4bcdfe2e73bbb27eb5e81da", + "sha256:97d1d971f8ec257011e64b7d655df68081dd3097322690afa1a71a1d755f8c18", + "sha256:99f22c3228ec9ab3933597825dc7d595b6c8c7b9ae725cfa557f16353fac8314", + "sha256:a2e18e5a4095b3ca4852eb087d28335f3bb8515df4ccf906d380ee627613837f", + "sha256:a3070f71a4479a9827148609f24f2978f10acffa3b8012fe9606720d271066bd", + "sha256:a6a197499429d2eaa2ae922760aa3966ef353545422d5f47ea2ca9369cbf7d26", + "sha256:a7f6f5b3bc7b8e2066076098788579af12bd507ccea8ca6859e52761aa61eaca", + "sha256:a82b90b6037fcc6b311431395c11b02555a3fbf96921a0667c8f8b0c495991cb", + "sha256:ab2c4266b8cd159a266eb03c709ad5400756dca9c45aa48fb523263344475093", + "sha256:b983a2dfdb9d707c78790608bcfd63692e5c2d996865a9689f3db768d0a2978d", + "sha256:bb33d447f4c6fb164d426467d7bf8a4901c303333c5809b85319b2e0626763cd", + "sha256:bc2a0116a67081167f1fbfed731d361671e5925db291b70e65fa66170045c53f", + "sha256:bd189f6d0c2fdccb7c0d3fd1227c6626dc17d00257edbb63dd7c88f31928db61", + "sha256:d393f810da9cd4746cad7350fb89f0509c3ae702c79d2ba8bd875201be4102d1" + ], + "index": "pypi", + "version": "==0.20.0" + }, + "scipy": { + "hashes": [ + "sha256:0611ee97296265af4a21164a5323f8c1b4e8e15c582d3dfa7610825900136bb7", + "sha256:08237eda23fd8e4e54838258b124f1cd141379a5f281b0a234ca99b38918c07a", + "sha256:0e645dbfc03f279e1946cf07c9c754c2a1859cb4a41c5f70b25f6b3a586b6dbd", + "sha256:0e9bb7efe5f051ea7212555b290e784b82f21ffd0f655405ac4f87e288b730b3", + "sha256:108c16640849e5827e7d51023efb3bd79244098c3f21e4897a1007720cb7ce37", + "sha256:340ef70f5b0f4e2b4b43c8c8061165911bc6b2ad16f8de85d9774545e2c47463", + "sha256:3ad73dfc6f82e494195144bd3a129c7241e761179b7cb5c07b9a0ede99c686f3", + "sha256:3b243c77a822cd034dad53058d7c2abf80062aa6f4a32e9799c95d6391558631", + "sha256:404a00314e85eca9d46b80929571b938e97a143b4f2ddc2b2b3c91a4c4ead9c5", + "sha256:423b3ff76957d29d1cce1bc0d62ebaf9a3fdfaf62344e3fdec14619bb7b5ad3a", + "sha256:42d9149a2fff7affdd352d157fa5717033767857c11bd55aa4a519a44343dfef", + "sha256:625f25a6b7d795e8830cb70439453c9f163e6870e710ec99eba5722775b318f3", + "sha256:698c6409da58686f2df3d6f815491fd5b4c2de6817a45379517c92366eea208f", + "sha256:729f8f8363d32cebcb946de278324ab43d28096f36593be6281ca1ee86ce6559", + "sha256:8190770146a4c8ed5d330d5b5ad1c76251c63349d25c96b3094875b930c44692", + "sha256:878352408424dffaa695ffedf2f9f92844e116686923ed9aa8626fc30d32cfd1", + "sha256:8b984f0821577d889f3c7ca8445564175fb4ac7c7f9659b7c60bef95b2b70e76", + "sha256:8f841bbc21d3dad2111a94c490fb0a591b8612ffea86b8e5571746ae76a3deac", + "sha256:c22b27371b3866c92796e5d7907e914f0e58a36d3222c5d436ddd3f0e354227a", + "sha256:d0cdd5658b49a722783b8b4f61a6f1f9c75042d0e29a30ccb6cacc9b25f6d9e2", + "sha256:d40dc7f494b06dcee0d303e51a00451b2da6119acbeaccf8369f2d29e28917ac", + "sha256:d8491d4784aceb1f100ddb8e31239c54e4afab8d607928a9f7ef2469ec35ae01", + "sha256:dfc5080c38dde3f43d8fbb9c0539a7839683475226cf83e4b24363b227dfe552", + "sha256:e24e22c8d98d3c704bb3410bce9b69e122a8de487ad3dbfe9985d154e5c03a40", + "sha256:e7a01e53163818d56eabddcafdc2090e9daba178aad05516b20c6591c4811020", + "sha256:ee677635393414930541a096fc8e61634304bb0153e4e02b75685b11eba14cae", + "sha256:f0521af1b722265d824d6ad055acfe9bd3341765735c44b5a4d0069e189a0f40", + "sha256:f25c281f12c0da726c6ed00535ca5d1622ec755c30a3f8eafef26cf43fede694" + ], + "version": "==1.1.0" + }, + "six": { + "hashes": [ + "sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9", + "sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb" + ], + "version": "==1.11.0" + }, + "werkzeug": { + "hashes": [ + "sha256:c3fd7a7d41976d9f44db327260e263132466836cef6f91512889ed60ad26557c", + "sha256:d5da73735293558eb1651ee2fddc4d0dedcfa06538b8813a2e20011583c9e49b" + ], + "version": "==0.14.1" + } + }, + "develop": { + "atomicwrites": { + "hashes": [ + "sha256:0312ad34fcad8fac3704d441f7b317e50af620823353ec657a53e981f92920c0", + "sha256:ec9ae8adaae229e4f8446952d204a3e4b5fdd2d099f9be3aaf556120135fb3ee" + ], + "version": "==1.2.1" + }, + "attrs": { + "hashes": [ + "sha256:10cbf6e27dbce8c30807caf056c8eb50917e0eaafe86347671b57254006c3e69", + "sha256:ca4be454458f9dec299268d472aaa5a11f67a4ff70093396e1ceae9c76cf4bbb" + ], + "version": "==18.2.0" + }, + "colorama": { + "hashes": [ + "sha256:a3d89af5db9e9806a779a50296b5fdb466e281147c2c235e8225ecc6dbf7bbf3", + "sha256:c9b54bebe91a6a803e0772c8561d53f2926bfeb17cd141fbabcb08424086595c" + ], + "markers": "sys_platform == 'win32'", + "version": "==0.4.0" + }, + "more-itertools": { + "hashes": [ + "sha256:c187a73da93e7a8acc0001572aebc7e3c69daf7bf6881a2cea10650bd4420092", + "sha256:c476b5d3a34e12d40130bc2f935028b5f636df8f372dc2c1c01dc19681b2039e", + "sha256:fcbfeaea0be121980e15bc97b3817b5202ca73d0eae185b4550cbfce2a3ebb3d" + ], + "version": "==4.3.0" + }, + "pluggy": { + "hashes": [ + "sha256:447ba94990e8014ee25ec853339faf7b0fc8050cdc3289d4d71f7f410fb90095", + "sha256:bde19360a8ec4dfd8a20dcb811780a30998101f078fc7ded6162f0076f50508f" + ], + "version": "==0.8.0" + }, + "py": { + "hashes": [ + "sha256:bf92637198836372b520efcba9e020c330123be8ce527e535d185ed4b6f45694", + "sha256:e76826342cefe3c3d5f7e8ee4316b80d1dd8a300781612ddbc765c17ba25a6c6" + ], + "version": "==1.7.0" + }, + "pytest": { + "hashes": [ + "sha256:1d131cc532be0023ef8ae265e2a779938d0619bb6c2510f52987ffcba7fa1ee4", + "sha256:ca4761407f1acc85ffd1609f464ca20bb71a767803505bd4127d0e45c5a50e23" + ], + "index": "pypi", + "version": "==4.0.1" + }, + "six": { + "hashes": [ + "sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9", + "sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb" + ], + "version": "==1.11.0" + } + } +} diff --git a/README.md b/README.md index 5753144..bacba01 100644 --- a/README.md +++ b/README.md @@ -21,15 +21,9 @@ Everything has been tested to work on Mac OSX 10.8. To download our project and ### Dependencies -#### VirtualEnv +#### pipenv -We use [VirtualEnv](http://www.virtualenv.org/en/latest/) to help facilitate getting setup on a new machine. There are [a number of ways of installing it](http://www.virtualenv.org/en/latest/virtualenv.html#installation), depending on your operating system. - -#### GFortran - -[GFortran](http://gcc.gnu.org/wiki/GFortranBinaries) is required to install scipy. If you're running Mac OSX, we recommend using [Homebrew](http://brew.sh/) to install GFortran via its gcc formula: - - brew install gcc +We use [pipenv](https://pipenv.readthedocs.io/en/latest/) to help facilitate getting setup on a new machine. There are [a number of ways of installing it](https://pipenv.readthedocs.io/en/latest/#install-pipenv-today), depending on your operating system. #### MongoDB, Database Backup, and Environment Variables (optional for just running recommendation engine) @@ -50,26 +44,18 @@ You may find it helpful to add these commands to your bash profile in your home git clone git@github.com:kevincon/dotaml.git -### Initialize VirtualEnv - -From inside the repository root folder, initialize VirtualEnv by running: +### Initialize pipenv - virtualenv venv +From inside the repository root folder, initialize pipenv by running: -This creates a new folder in the directory called "venv." You only need to do this once. Don't worry about ever accidentally adding this folder to the repository. There's an entry for it in the .gitignore file. + pipenv install -Next, activate the VirtualEnv by running: +Next, activate the pipenv by running: - source venv/bin/activate + pipenv shell -You should now see "(venv)" as part of your terminal prompt, indicating you are now inside your VirtualEnv. Note that closing the terminal window deactivates VirtualEnv, so you must run ```source venv/bin/activate``` each time you open a new terminal window for development. +You should now see "(dotaml)" as part of your terminal prompt, indicating you are now inside your virtual environment. Note that closing the terminal window deactivates the virtual environment, so you must run `pipenv show` each time you open a new terminal window for development. -### Installing required packages - -Now that you're in VirtualEnv, run the following command to automatically install all of the Python modules that are required: - - pip install -r requirements.txt - ### Running the web app From the root folder of the project, run: @@ -94,7 +80,7 @@ Feel free to submit a pull request if you are interested in continuing developme ``` The MIT License (MIT) -Copyright (c) 2015 Kevin Conley +Copyright (c) 2015-2018 Kevin Conley Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/app.py b/app.py index e23e6e6..2f442ac 100644 --- a/app.py +++ b/app.py @@ -11,7 +11,7 @@ #engine = Engine(D2LogisticRegression()) def get_api_string(recommendations, prob): - recommendations = map(str, recommendations) + recommendations = list(map(str, recommendations)) return json.dumps({'x': recommendations, 'prob_x': prob}) @app.route("/") @@ -26,13 +26,13 @@ def api(): if len(my_team) == 1 and my_team[0] == '': my_team = [] else: - my_team = map(int, my_team) + my_team = list(map(int, my_team)) their_team = request.args['y'].split(',') if len(their_team) == 1 and their_team[0] == '': their_team = [] else: - their_team = map(int, their_team) + their_team = list(map(int, their_team)) prob_recommendation_pairs = engine.recommend(my_team, their_team) recommendations = [hero for prob, hero in prob_recommendation_pairs] diff --git a/data_collection/dotabot.py b/data_collection/dotabot.py index 18051dc..2961751 100644 --- a/data_collection/dotabot.py +++ b/data_collection/dotabot.py @@ -133,7 +133,7 @@ def main(start_match_id): saved_id = int(f.readline()) ans = False try: - ans = raw_input('Start at last_match %d? ' % saved_id) + ans = input(f'Start at last_match {saved_id}?') if ans in ['yes', 'y', 'Y', 'YES', 'Yes']: ans = True except KeyboardInterrupt: @@ -144,12 +144,12 @@ def main(start_match_id): date_max = int(d.readline()) match_id = saved_id except IOError: - print 'Could not open date_max file, ignoring last_match value.' + print('Could not open date_max file, ignoring last_match value.') except IOError: pass - print 'OK, starting at match_id=%s' % match_id + print(f'OK, starting at match_id={match_id}') setup() main(match_id) diff --git a/data_collection/dotabot2.py b/data_collection/dotabot2.py index fc08a10..bde8a0b 100644 --- a/data_collection/dotabot2.py +++ b/data_collection/dotabot2.py @@ -9,7 +9,7 @@ db = client[os.getenv('DOTABOT_DB_NAME', 'dotabot')] match_collection = db.matches -logging.basicConfig(filename='/home/kcon/dota2project/log.txt') +logging.basicConfig(filename='log.txt') logger = logging.getLogger('dotabot') def setup(): diff --git a/data_collection/util.py b/data_collection/util.py index 9305c9a..6c54a1f 100644 --- a/data_collection/util.py +++ b/data_collection/util.py @@ -1,6 +1,6 @@ import smtplib, os from email.mime.text import MIMEText -from email.Utils import formatdate +from email.utils import formatdate from datetime import datetime from dota2py import data @@ -9,7 +9,7 @@ def print_match_history(gmh_result): for match in gmh_result['matches']: match_id = match['match_id'] start_time = datetime.fromtimestamp(int(match['start_time'])) - print 'Match %d - %s' % (match_id, start_time) + print(f'Match {match_id} - {start_time}') def get_game_mode_string(game_mode_id): '''Return a human-readable string for a game_mode id.''' @@ -20,8 +20,12 @@ def get_game_mode_string(game_mode_id): def send_email(body, subject='Quick Message From DOTA2 Python Script', - recipients=['kcon@stanford.edu', 'djperry@stanford.edu']): + recipients=None): '''Send an email.''' + + if not recipients: + recipients = ['kcon@stanford.edu', 'djperry@stanford.edu'] + # Credentials username = os.getenv('DOTABOT_USERNAME') hostname = os.getenv('DOTABOT_HOSTNAME') diff --git a/engine.py b/engine.py index b1cbfea..0fcf0b9 100644 --- a/engine.py +++ b/engine.py @@ -17,13 +17,13 @@ def main(): my_team = [76, 54] their_team = [5, 15, 46, 91, 13] - print 'My Team: %s' % [get_hero_human_readable(hero_id) for hero_id in my_team] - print 'Their Team: %s' % [get_hero_human_readable(hero_id) for hero_id in their_team] - print 'Recommend:' + print(f'My Team: {[get_hero_human_readable(hero_id) for hero_id in my_team]}') + print(f'Their Team: {[get_hero_human_readable(hero_id) for hero_id in their_team]}') + print('Recommend:') #engine = Engine(D2KNearestNeighbors()) engine = Engine(D2LogisticRegression()) recommendations = engine.recommend(my_team, their_team) - print [(prob, get_hero_human_readable(hero)) for prob, hero in recommendations] + print([(prob, get_hero_human_readable(hero)) for prob, hero in recommendations]) class Engine: def __init__(self, algorithm): diff --git a/k_nearest_neighbors/evaluate_model_10000.pkl b/k_nearest_neighbors/evaluate_model_10000.pkl index 28f18e9..3d93339 100644 Binary files a/k_nearest_neighbors/evaluate_model_10000.pkl and b/k_nearest_neighbors/evaluate_model_10000.pkl differ diff --git a/k_nearest_neighbors/evaluate_model_51022.pkl b/k_nearest_neighbors/evaluate_model_51022.pkl new file mode 100644 index 0000000..a20ff2c Binary files /dev/null and b/k_nearest_neighbors/evaluate_model_51022.pkl differ diff --git a/k_nearest_neighbors/evaluate_model_51022.tar.gz b/k_nearest_neighbors/evaluate_model_51022.tar.gz deleted file mode 100644 index d7881c8..0000000 Binary files a/k_nearest_neighbors/evaluate_model_51022.tar.gz and /dev/null differ diff --git a/k_nearest_neighbors/k_nearest_neighbors.py b/k_nearest_neighbors/k_nearest_neighbors.py index a084039..361ac5d 100644 --- a/k_nearest_neighbors/k_nearest_neighbors.py +++ b/k_nearest_neighbors/k_nearest_neighbors.py @@ -29,9 +29,9 @@ def __init__(self, model_root='k_nearest_neighbors'): recommend_path = os.path.join(model_root, 'recommend_models_%d.pkl' % TRAINING_SET_SIZE) evaluate_path = os.path.join(model_root, 'evaluate_model_%d.pkl' % TRAINING_SET_SIZE) - with open(recommend_path, 'r') as input_file: + with open(recommend_path, 'rb') as input_file: self.recommend_models = pickle.load(input_file) - with open(evaluate_path, 'r') as input_file: + with open(evaluate_path, 'rb') as input_file: self.evaluate_model = pickle.load(input_file) def transform(self, my_team, their_team): @@ -40,7 +40,7 @@ def transform(self, my_team, their_team): X[hero_id - 1] = 1 for hero_id in their_team: X[hero_id - 1 + NUM_HEROES] = 1 - return X + return X.reshape(1, -1) def recommend(self, my_team, their_team, hero_candidates): '''Returns a list of (hero, probablility of winning with hero added) recommended to complete my_team.''' @@ -62,7 +62,11 @@ def recommend(self, my_team, their_team, hero_candidates): def score(self, query): '''Score the query using the evaluation model, considering both radiant and dire teams.''' radiant_query = query - dire_query = np.concatenate((radiant_query[NUM_HEROES:NUM_FEATURES], radiant_query[0:NUM_HEROES])) + + underlying_features = query[0] + dire_features = np.concatenate((underlying_features[NUM_HEROES:NUM_FEATURES], underlying_features[0:NUM_HEROES])) + dire_query = dire_features.reshape(1, -1) + rad_prob = self.evaluate_model.predict_proba(radiant_query)[0][1] dire_prob = self.evaluate_model.predict_proba(dire_query)[0][0] return (rad_prob + dire_prob) / 2 diff --git a/k_nearest_neighbors/kfcv_d.py b/k_nearest_neighbors/kfcv_d.py index c36c10c..3f5dd04 100644 --- a/k_nearest_neighbors/kfcv_d.py +++ b/k_nearest_neighbors/kfcv_d.py @@ -1,5 +1,5 @@ from sklearn.neighbors import KNeighborsClassifier -from sklearn import cross_validation +from sklearn.model_selection import cross_val_score, KFold import numpy as np from progressbar import ProgressBar, Bar, Percentage, FormatLabel, ETA @@ -17,18 +17,23 @@ def poly_weights(distances): def score(estimator, X, y): global pbar, FOLDS_FINISHED correct_predictions = 0 - for i, radiant_query in enumerate(X): + for i, radiant_features in enumerate(X): pbar.update(FOLDS_FINISHED) - dire_query = np.concatenate((radiant_query[NUM_HEROES:NUM_FEATURES], radiant_query[0:NUM_HEROES])) + + radiant_query = radiant_features.reshape(1, -1) rad_prob = estimator.predict_proba(radiant_query)[0][1] + + dire_features = np.concatenate((radiant_features[NUM_HEROES:NUM_FEATURES], radiant_features[0:NUM_HEROES])) + dire_query = dire_features.reshape(1, -1) dire_prob = estimator.predict_proba(dire_query)[0][0] + overall_prob = (rad_prob + dire_prob) / 2 prediction = 1 if (overall_prob > 0.5) else -1 result = 1 if prediction == y[i] else 0 correct_predictions += result FOLDS_FINISHED += 1 accuracy = float(correct_predictions) / len(X) - print 'Accuracy: %f' % accuracy + print(f'Accuracy: {accuracy}') return accuracy NUM_HEROES = 108 @@ -45,9 +50,8 @@ def score(estimator, X, y): X = X[0:NUM_MATCHES] Y = Y[0:NUM_MATCHES] -print 'Training using data from %d matches...' % NUM_MATCHES - -k_fold = cross_validation.KFold(n=NUM_MATCHES, n_folds=K, indices=True) +print(f'Training using data from {NUM_MATCHES} matches...') +kfold = KFold(K) d_tries = [3, 4, 5] @@ -56,9 +60,9 @@ def score(estimator, X, y): d_accuracy_pairs = [] for d_index, d in enumerate(d_tries): - model = KNeighborsClassifier(n_neighbors=NUM_MATCHES/K,metric=my_distance,weights=poly_param(d)) - model_accuracies = cross_validation.cross_val_score(model, X, Y, scoring=score, cv=k_fold) + model = KNeighborsClassifier(n_neighbors=NUM_MATCHES//K,metric=my_distance,weights=poly_param(d)) + model_accuracies = cross_val_score(model, X, Y, scoring=score, cv=kfold) model_accuracy = model_accuracies.mean() d_accuracy_pairs.append((d, model_accuracy)) pbar.finish() -print d_accuracy_pairs +print(d_accuracy_pairs) diff --git a/k_nearest_neighbors/preprocess.py b/k_nearest_neighbors/preprocess.py index 0bd84b3..35d3efd 100644 --- a/k_nearest_neighbors/preprocess.py +++ b/k_nearest_neighbors/preprocess.py @@ -49,7 +49,7 @@ pbar.finish() -print "Permuting, generating train and test sets." +print("Permuting, generating train and test sets.") indices = np.random.permutation(NUM_MATCHES) test_indices = indices[0:NUM_MATCHES/10] train_indices = indices[NUM_MATCHES/10:NUM_MATCHES] @@ -60,7 +60,7 @@ X_train = X[train_indices] Y_train = Y[train_indices] -print "Saving output file now..." +print("Saving output file now...") np.savez_compressed('test_%d.npz' % len(test_indices), X=X_test, Y=Y_test) np.savez_compressed('train_%d.npz' % len(train_indices), X=X_train, Y=Y_train) diff --git a/k_nearest_neighbors/recommend_models_10000.pkl b/k_nearest_neighbors/recommend_models_10000.pkl index a6e11de..1df4cc8 100644 Binary files a/k_nearest_neighbors/recommend_models_10000.pkl and b/k_nearest_neighbors/recommend_models_10000.pkl differ diff --git a/k_nearest_neighbors/test.py b/k_nearest_neighbors/test.py index 02520e9..8657993 100644 --- a/k_nearest_neighbors/test.py +++ b/k_nearest_neighbors/test.py @@ -25,7 +25,7 @@ def poly_weights_evaluate(distances): return np.array([weights]) def test(): - with open('evaluate_model_51022.pkl', 'r') as input_file: + with open('evaluate_model_51022.pkl', 'rb') as input_file: model = pickle.load(input_file) widgets = [FormatLabel('Processed: %(value)d/%(max)d matches. '), ETA(), Percentage(), ' ', Bar()] @@ -33,11 +33,16 @@ def test(): correct_predictions = 0 Y_pred = np.zeros(NUM_MATCHES) - for i, radiant_query in enumerate(X): + for i, radiant_features in enumerate(X): pbar.update(i) - dire_query = np.concatenate((radiant_query[NUM_HEROES:NUM_FEATURES], radiant_query[0:NUM_HEROES])) + + radiant_query = radiant_features.reshape(1, -1) rad_prob = model.predict_proba(radiant_query)[0][1] + + dire_features = np.concatenate((radiant_features[NUM_HEROES:NUM_FEATURES], radiant_features[0:NUM_HEROES])) + dire_query = dire_features.reshape(1, -1) dire_prob = model.predict_proba(dire_query)[0][0] + overall_prob = (rad_prob + dire_prob) / 2 prediction = 1 if (overall_prob > 0.5) else -1 Y_pred[i] = 1 if prediction == 1 else 0 @@ -49,24 +54,22 @@ def test(): pbar.finish() accuracy = float(correct_predictions) / NUM_MATCHES - print 'Accuracy of KNN model: %f' % accuracy + print(f'Accuracy of KNN model: {accuracy}') # flip all -1 true labels to 0 for f1 scoring for i, match in enumerate(Y): if match == -1: Y[i] = 0 - prec, recall, f1, support = precision_recall_fscore_support(Y, Y_pred, average='macro') - print 'Precision: ',prec - print 'Recall: ',recall - print 'F1 Score: ',f1 - print 'Support: ',support + prec, recall, f1, _ = precision_recall_fscore_support(Y, Y_pred, average='binary') + print('Precision: ',prec) + print('Recall: ',recall) + print('F1 Score: ',f1) # Accuracy of KNN model: 0.678074 # Precision: 0.764119601329 # Recall: 0.673499267936 # F1 Score: 0.715953307393 - # Support: 3415 if __name__ == '__main__': test() diff --git a/k_nearest_neighbors/train_evaluate.py b/k_nearest_neighbors/train_evaluate.py index 58670de..e5f7d5c 100644 --- a/k_nearest_neighbors/train_evaluate.py +++ b/k_nearest_neighbors/train_evaluate.py @@ -7,7 +7,7 @@ X = preprocessed['X'] Y = preprocessed['Y'] -relevant_indices = range(0, 10000) +relevant_indices = slice(0, 10000) X = X[relevant_indices] Y = Y[relevant_indices] @@ -23,10 +23,10 @@ def poly_weights_evaluate(distances): NUM_HEROES = 108 NUM_MATCHES = len(X) -print 'Training evaluation model using data from %d matches...' % NUM_MATCHES +print(f'Training evaluation model using data from {NUM_MATCHES} matches...') model = KNeighborsClassifier(n_neighbors=NUM_MATCHES,metric=my_distance,weights=poly_weights_evaluate).fit(X, Y) # Populate model pickle -with open('evaluate_model_%d.pkl' % NUM_MATCHES, 'w') as output_file: +with open('evaluate_model_%d.pkl' % NUM_MATCHES, 'wb') as output_file: pickle.dump(model, output_file) diff --git a/k_nearest_neighbors/train_recommend.py b/k_nearest_neighbors/train_recommend.py index 07efecb..a80c6ac 100644 --- a/k_nearest_neighbors/train_recommend.py +++ b/k_nearest_neighbors/train_recommend.py @@ -7,7 +7,7 @@ X = preprocessed['X'] Y = preprocessed['Y'] -relevant_indices = range(0, 10000) +relevant_indices = slice(0, 10000) X = X[relevant_indices] Y = Y[relevant_indices] @@ -23,7 +23,7 @@ def poly_weights_recommend(distances): NUM_HEROES = 108 NUM_MATCHES = len(X) -print 'Training recommendation models using data from %d matches...' % NUM_MATCHES +print(f'Training recommendation models using data from {NUM_MATCHES} matches...') models = [] @@ -42,8 +42,8 @@ def poly_weights_recommend(distances): Y_filtered = np.array(Y_filtered) try: models.append(KNeighborsClassifier(n_neighbors=len(X_filtered),metric=my_distance,weights=poly_weights_recommend).fit(X_filtered, Y_filtered)) - except Exception,e: - print "Radiant fit error!!! %s" % e + except Exception as e: + print(f"Radiant fit error!!! {e}") # Dire Loop for hero_id in range(1, 109): @@ -60,9 +60,9 @@ def poly_weights_recommend(distances): Y_filtered = np.array(Y_filtered) try: models.append(KNeighborsClassifier(n_neighbors=len(X_filtered),metric=my_distance,weights=poly_weights_recommend).fit(X_filtered, Y_filtered)) - except Exception,e: - print "Dire fit error!!! %s" % e + except Exception as e: + print(f"Dire fit error!!! {e}") # Populate model pickle -with open('recommend_models_%d.pkl' % NUM_MATCHES, 'w') as output_file: +with open('recommend_models_%d.pkl' % NUM_MATCHES, 'wb') as output_file: pickle.dump(models, output_file) diff --git a/logistic_regression/f1score.py b/logistic_regression/f1score.py index ff298b2..285d640 100644 --- a/logistic_regression/f1score.py +++ b/logistic_regression/f1score.py @@ -1,33 +1,24 @@ import numpy as np from sklearn.metrics import precision_recall_fscore_support -from logistic_regression import D2LogisticRegression +from .logistic_regression import D2LogisticRegression POSITIVE_LABEL = 1 NEGATIVE_LABEL = 0 + def make_prediction(algo, query): - prob = algo.score(query) + prob = algo.score(query.reshape(1, -1)) return POSITIVE_LABEL if prob > 0.5 else NEGATIVE_LABEL -algo = D2LogisticRegression(model_root='.') - -testing_data = np.load('test_5669.npz') -X = testing_data['X'] -Y_true = testing_data['Y'] -num_matches = len(Y_true) - -Y_pred = np.zeros(num_matches) -for i, match in enumerate(X): - Y_pred[i] = make_prediction(algo, match) - -prec, recall, f1, support = precision_recall_fscore_support(Y_true, Y_pred, average='macro') - -print 'Precision: ',prec -print 'Recall: ',recall -print 'F1 Score: ',f1 -print 'Support: ',support -# Precision: 0.781616907078 -# Recall: 0.68468997943 -# F1 Score: 0.729949874687 -# Support: 3403 +def calculate_precision_recall_fscore(): + algo = D2LogisticRegression() + testing_data = np.load('logistic_regression/test_5669.npz') + X = testing_data['X'] + Y_true = testing_data['Y'] + num_matches = len(Y_true) + Y_pred = np.zeros(num_matches) + for i, match in enumerate(X): + Y_pred[i] = make_prediction(algo, match) + prec, recall, f1, _ = precision_recall_fscore_support(Y_true, Y_pred, average='binary') + return prec, recall, f1 diff --git a/logistic_regression/learning_curve.py b/logistic_regression/learning_curve.py index 2f1b321..baa3d17 100644 --- a/logistic_regression/learning_curve.py +++ b/logistic_regression/learning_curve.py @@ -6,11 +6,15 @@ NUM_HEROES = 108 NUM_FEATURES = NUM_HEROES * 2 -def score(model, radiant_query): +def score(model, radiant_features): '''Return the probability of the query being in the positive class.''' - dire_query = np.concatenate((radiant_query[NUM_HEROES:NUM_FEATURES], radiant_query[0:NUM_HEROES])) + radiant_query = radiant_features.reshape(1, -1) rad_prob = model.predict_proba(radiant_query)[0][1] + + dire_features = np.concatenate((radiant_features[NUM_HEROES:NUM_FEATURES], radiant_features[0:NUM_HEROES])) + dire_query = dire_features.reshape(1, -1) dire_prob = model.predict_proba(dire_query)[0][0] + return (rad_prob + dire_prob) / 2 def evaluate(model, X, Y, positive_class, negative_class): @@ -35,7 +39,7 @@ def plot_learning_curve(num_points, X_train, Y_train, X_test, Y_test, positive_c model = train(X_train, Y_train, training_set_size) accuracy = evaluate(model, X_test, Y_test, positive_class, negative_class) accuracies.append(accuracy) - print 'Accuracy for %d training examples: %f' % (training_set_size, accuracy) + print(f'Accuracy for {training_set_size} training examples: {accuracy}') plt.plot(np.array(training_set_sizes), np.array(accuracies)) plt.ylabel('Accuracy') @@ -47,7 +51,7 @@ def plot_learning_curves(num_points, X_train, Y_train, X_test, Y_test, positive_ total_num_matches = len(X_train) training_set_sizes = [] for div in list(reversed(range(1, num_points + 1))): - training_set_sizes.append(total_num_matches / div) + training_set_sizes.append(total_num_matches // div) test_errors = [] training_errors = [] @@ -75,7 +79,6 @@ def main(): X_test = testing_data['X'] Y_test = testing_data['Y'] - #plot_learning_curve(30, X_train, Y_train, X_test, Y_test) plot_learning_curves(100, X_train, Y_train, X_test, Y_test) if __name__ == '__main__': diff --git a/logistic_regression/logistic_regression.py b/logistic_regression/logistic_regression.py index 86e2e6a..5ada946 100644 --- a/logistic_regression/logistic_regression.py +++ b/logistic_regression/logistic_regression.py @@ -7,7 +7,7 @@ class D2LogisticRegression: def __init__(self, model_root='logistic_regression'): model_path = os.path.join(model_root, 'model.pkl') - with open(model_path, 'r') as input_file: + with open(model_path, 'rb') as input_file: self.model = pickle.load(input_file) def transform(self, my_team, their_team): @@ -16,7 +16,7 @@ def transform(self, my_team, their_team): X[hero_id - 1] = 1 for hero_id in their_team: X[hero_id - 1 + NUM_HEROES] = 1 - return X + return X.reshape(1, -1) def recommend(self, my_team, their_team, hero_candidates): '''Returns a list of (hero, probablility of winning with hero added) recommended to complete my_team.''' @@ -33,7 +33,12 @@ def recommend(self, my_team, their_team, hero_candidates): def score(self, query): '''Score the query using the model, considering both radiant and dire teams.''' radiant_query = query - dire_query = np.concatenate((radiant_query[NUM_HEROES:NUM_FEATURES], radiant_query[0:NUM_HEROES])) + + underlying_features = query[0] + dire_features = np.concatenate( + (underlying_features[NUM_HEROES:NUM_FEATURES], underlying_features[0:NUM_HEROES])) + dire_query = np.array([dire_features]) + rad_prob = self.model.predict_proba(radiant_query)[0][1] dire_prob = self.model.predict_proba(dire_query)[0][0] return (rad_prob + dire_prob) / 2 @@ -42,4 +47,3 @@ def predict(self, dream_team, their_team): '''Returns the probability of the dream_team winning against their_team.''' dream_team_query = self.transform(dream_team, their_team) return self.score(dream_team_query) - #return self.model.predict_proba(dream_team_query)[0][1] diff --git a/logistic_regression/model.pkl b/logistic_regression/model.pkl index 877b513..6cb9817 100644 Binary files a/logistic_regression/model.pkl and b/logistic_regression/model.pkl differ diff --git a/logistic_regression/preprocess.py b/logistic_regression/preprocess.py index b413b28..fc3cc6f 100644 --- a/logistic_regression/preprocess.py +++ b/logistic_regression/preprocess.py @@ -49,7 +49,7 @@ pbar.finish() -print "Permuting, generating train and test sets." +print("Permuting, generating train and test sets.") indices = np.random.permutation(NUM_MATCHES) test_indices = indices[0:NUM_MATCHES/10] train_indices = indices[NUM_MATCHES/10:NUM_MATCHES] @@ -60,7 +60,7 @@ X_train = X[train_indices] Y_train = Y[train_indices] -print "Saving output file now..." +print("Saving output file now...") np.savez_compressed('test_%d.npz' % len(test_indices), X=X_test, Y=Y_test) np.savez_compressed('train_%d.npz' % len(train_indices), X=X_train, Y=Y_train) diff --git a/logistic_regression/train.py b/logistic_regression/train.py index a081ddd..e7d4664 100644 --- a/logistic_regression/train.py +++ b/logistic_regression/train.py @@ -3,8 +3,8 @@ import numpy as np def train(X, Y, num_samples): - print 'Training using data from %d matches...' % num_samples - return LogisticRegression().fit(X[0:num_samples], Y[0:num_samples]) + print(f'Training using data from {num_samples} matches...') + return LogisticRegression(solver='saga').fit(X[0:num_samples], Y[0:num_samples]) def main(): # Import the preprocessed training X matrix and Y vector @@ -14,7 +14,7 @@ def main(): model = train(X_train, Y_train, len(X_train)) - with open('model.pkl', 'w') as output_file: + with open('model.pkl', 'wb') as output_file: pickle.dump(model, output_file) if __name__ == "__main__": diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index cc96c1b..0000000 --- a/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -Flask==0.10.1 -dota2py==0.1.3 -numpy==1.10.1 -progressbar==2.3 -pymongo==2.6.3 -scikit-learn==0.14.1 -scipy==0.16.1 diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..22427d3 --- /dev/null +++ b/setup.py @@ -0,0 +1,7 @@ +import setuptools + +setuptools.setup( + name='dotaml', + packages=setuptools.find_packages(exclude=['docs', 'static', 'templates', 'tests']), + python_requires='>=3.6', +) diff --git a/templates/index.html b/templates/index.html index e1c28ec..4140a54 100644 --- a/templates/index.html +++ b/templates/index.html @@ -24,7 +24,7 @@

DOTA2 Recommendation Engine

By Kevin Conley and Daniel Perry

-

Note: queries have about a 5 second delay between responses.

+

Note: the UI may take up to 5 seconds to update between queries.

Our team's heroes / Recommended heroes:

diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_logistic_regression.py b/tests/test_logistic_regression.py new file mode 100644 index 0000000..88d5397 --- /dev/null +++ b/tests/test_logistic_regression.py @@ -0,0 +1,9 @@ +from logistic_regression.f1score import calculate_precision_recall_fscore + + +def test_logistic_regression_precision_recall_fscore(): + precision, recall, f1 = calculate_precision_recall_fscore() + + assert precision == 0.7816169070781617 + assert recall == 0.6846899794299148 + assert f1 == 0.7299498746867168 diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..7980ca4 --- /dev/null +++ b/tox.ini @@ -0,0 +1,15 @@ +[tox] +minversion = 3.5.3 +envlist = + py36 + py37 +requires = + pipenv + + +[testenv] +whitelist_externals = + pipenv +commands = + pipenv install --dev + pytest tests/