From 6ea25f6d98acf8e0d01ffbd032bf7800fb07b6be Mon Sep 17 00:00:00 2001 From: freakeinstein Date: Fri, 31 Dec 2021 10:57:15 +0530 Subject: [PATCH 1/4] added aquila search engine --- .gitignore | 152 +++++++++++++++++++++++++++++++++++++++++++++++ search_server.py | 41 ++++++++++++- 2 files changed, 192 insertions(+), 1 deletion(-) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..d9005f2 --- /dev/null +++ b/.gitignore @@ -0,0 +1,152 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintainted in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ diff --git a/search_server.py b/search_server.py index 91a1a6d..85c1470 100644 --- a/search_server.py +++ b/search_server.py @@ -33,6 +33,9 @@ # Bing Search API documentation: # https://docs.microsoft.com/en-us/bing/search-apis/bing-web-search/reference/query-parameters +# Aquila Network website: +# https://aquila.network + def _parse_host(host: str) -> Tuple[str, int]: """ Parse the host string. Should be in the format HOSTNAME:PORT. @@ -46,8 +49,12 @@ def _parse_host(host: str) -> Tuple[str, int]: def _get_and_parse(url: str) -> Dict[str, str]: """ Download a webpage and parse it. """ + headers = { + 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36' + } + try: - resp = requests.get(url, timeout=_REQUESTS_GET_TIMEOUT) + resp = requests.get(url, headers=headers, timeout=_REQUESTS_GET_TIMEOUT) except requests.exceptions.RequestException as e: print(f"[!] {e} for url {url}") return None @@ -348,6 +355,30 @@ def search(self, q: str, n: int, return googlesearch.search(q, num=n, stop=None, pause=_DELAY_SEARCH) +class AquilaSearchServer(SearchABCRequestHandler): + def search(self, q: str, n: int, + subscription_key: str = None, + use_description_only: bool = False + ) -> Generator[str, None, None]: + + payload = json.dumps({ + "publicIndexId": subscription_key, + "query": q + }) + headers = { + 'Content-Type': 'application/json' + } + + aquila_url_ev = "https://x.aquila.network/api/search" + ret_ = [] + + response = requests.request("POST", aquila_url_ev, headers=headers, data=payload) + if response.status_code == 200: + resp_ = dict(sorted(response.json()['result'].items(), key=lambda item: item[1], reverse=True)) + ret_ = list(resp_.keys())[:n] + + return ret_ + class SearchABCServer(http.server.ThreadingHTTPServer): def __init__(self, server_address, RequestHandlerClass, @@ -408,6 +439,8 @@ def serve( if search_engine == "Bing": request_handler = BingSearchRequestHandler + if search_engine == "Aquila": + request_handler = AquilaSearchServer else: request_handler = GoogleSearchRequestHandler @@ -438,6 +471,12 @@ def check_and_print_cmdline_args( if subscription_key is not None: print("Warning: subscription_key is not supported for Google Search Engine") exit() + elif search_engine == "Aquila": + if subscription_key is None: + print("Warning: subscription_key is required for Aquila Search Engine") + print("To get one go to url:") + print("https://aquila.network") + exit() print("Command line args used:") print(f" requests_get_timeout={_REQUESTS_GET_TIMEOUT}") From d893ff4a5b50df29e6d3fbf3cbf9d3d67f1e1e3d Mon Sep 17 00:00:00 2001 From: jubin jose Date: Fri, 31 Dec 2021 11:09:44 +0530 Subject: [PATCH 2/4] Update README.md --- README.md | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 70c380c..bfb2249 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ way to allow very easily add new search engine support. Bing search requires a A which can be obtained for free at: https://www.microsoft.com/en-us/bing/apis/bing-entity-search-api -Using the `googlesearch` module is very slow because it parses Google search webpages instead of querying cloud webservices. This is fine for playing with the model, but makes that searcher unusable for training or large scale inference purposes. In the paper, Bing cloud services are used, matching the results over Common Crawl instead of just downloading the page. +Using the `googlesearch` module is very slow because it parses Google search webpages instead of querying cloud webservices. This is fine for playing with the model, but makes that searcher unusable for training or large scale inference purposes. In the paper, Bing cloud services are used, matching the results over Common Crawl instead of just downloading the page. Added `Aquila Search` to support searching on a limited collection of web pages like a collection of bookmarks. # Quick Start: @@ -69,9 +69,9 @@ python search_server.py test_parser www.some_url_of_your_choice.com/ - requests_get_timeout - sets the timeout for URL requests to fetch content of URLs found during search. Defaults to 5 seconds. - strip_html_menus - removes likely HTML menus to clean up text. This returns significantly higher quality and informationally dense text. - max_text_bytes limits the bytes returned per web page. Defaults to no max. Note, ParlAI current defaults to only use the first 512 byte. -- search_engine set to "Google" default or "Bing". Note, the Bing Search engine was used in the Blenderbot2 paper to achieve their results. This implementation not only uses web pages but also news, entities and places. +- search_engine set to "Google" default, "Bing" or "Aquila". Note, the Bing Search engine was used in the Blenderbot2 paper to achieve their results. This implementation not only uses web pages but also news, entities and places. - use_description_only are short but 10X faster since no url gets for Bing only. It also has the advantage of being very concise without an HTML irrelevant text normally returned. -- use_subscription_key required to use Bing only. Can get a free one at: https://www.microsoft.com/en-us/bing/apis/bing-entity-search-api +- use_subscription_key required to use with Bing and Aquila. Get it for free: Bing: https://www.microsoft.com/en-us/bing/apis/bing-entity-search-api Aquila: https://aquila.network # Advanced Examples @@ -95,6 +95,11 @@ Bing Search Engine returning very relevant concise information 10X faster. Retur python search_server.py serve --host 0.0.0.0:8080 --search_engine="Bing" --use_description_only --subscription_key "put your bing api subscription key here" ``` +Aquila Custom Search Engine: +```bash +python search_server.py serve --host 0.0.0.0:8080 --search_engine="Aquila" --subscription_key "put your Aquila public key here" +``` + # Additional Command Line Example Test Calls ```bash From ee84cbe45692b45f71af7355ccb8fbcde3133224 Mon Sep 17 00:00:00 2001 From: jubin jose Date: Fri, 31 Dec 2021 11:10:29 +0530 Subject: [PATCH 3/4] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index bfb2249..15a5601 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ alt="Shows lines with search results, the titles and the urls."> - Uses `html2text` to strip the markup out of the page. - Uses `beautifulsoup4` to parse the title. -- Supports both Google (default) and Bing search, but is coded in a modular / search engine agnostic +- Supports Google (default), Bing and Aquila search, but is coded in a modular / search engine agnostic way to allow very easily add new search engine support. Bing search requires a API subscription key, which can be obtained for free at: https://www.microsoft.com/en-us/bing/apis/bing-entity-search-api From 6ec54348de501517534c10ddc2768780001b0bd1 Mon Sep 17 00:00:00 2001 From: jubin jose Date: Fri, 31 Dec 2021 11:11:20 +0530 Subject: [PATCH 4/4] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 15a5601..5510c4a 100644 --- a/README.md +++ b/README.md @@ -14,8 +14,8 @@ alt="Shows lines with search results, the titles and the urls."> - Uses `html2text` to strip the markup out of the page. - Uses `beautifulsoup4` to parse the title. - Supports Google (default), Bing and Aquila search, but is coded in a modular / search engine agnostic -way to allow very easily add new search engine support. Bing search requires a API subscription key, -which can be obtained for free at: https://www.microsoft.com/en-us/bing/apis/bing-entity-search-api +way to allow very easily add new search engine support. Both Bing and Aquila search requires a API subscription key, +which can be obtained for free at: https://www.microsoft.com/en-us/bing/apis/bing-entity-search-api & https://aquila.network Using the `googlesearch` module is very slow because it parses Google search webpages instead of querying cloud webservices. This is fine for playing with the model, but makes that searcher unusable for training or large scale inference purposes. In the paper, Bing cloud services are used, matching the results over Common Crawl instead of just downloading the page. Added `Aquila Search` to support searching on a limited collection of web pages like a collection of bookmarks.