Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
152 changes: 152 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
.pybuilder/
target/

# Jupyter Notebook
.ipynb_checkpoints

# IPython
profile_default/
ipython_config.py

# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version

# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock

# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock

# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/

# Celery stuff
celerybeat-schedule
celerybeat.pid

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json

# Pyre type checker
.pyre/

# pytype static type analyzer
.pytype/

# Cython debug symbols
cython_debug/

# PyCharm
# JetBrains specific template is maintainted in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
17 changes: 11 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,12 @@ alt="Shows lines with search results, the titles and the urls.">

- Uses `html2text` to strip the markup out of the page.
- Uses `beautifulsoup4` to parse the title.
- Supports both Google (default) and Bing search, but is coded in a modular / search engine agnostic
way to allow very easily add new search engine support. Bing search requires a API subscription key,
which can be obtained for free at: https://www.microsoft.com/en-us/bing/apis/bing-entity-search-api
- Supports Google (default), Bing and Aquila search, but is coded in a modular / search engine agnostic
way to allow very easily add new search engine support. Both Bing and Aquila search requires a API subscription key,
which can be obtained for free at: https://www.microsoft.com/en-us/bing/apis/bing-entity-search-api & https://aquila.network


Using the `googlesearch` module is very slow because it parses Google search webpages instead of querying cloud webservices. This is fine for playing with the model, but makes that searcher unusable for training or large scale inference purposes. In the paper, Bing cloud services are used, matching the results over Common Crawl instead of just downloading the page.
Using the `googlesearch` module is very slow because it parses Google search webpages instead of querying cloud webservices. This is fine for playing with the model, but makes that searcher unusable for training or large scale inference purposes. In the paper, Bing cloud services are used, matching the results over Common Crawl instead of just downloading the page. Added `Aquila Search` to support searching on a limited collection of web pages like a collection of bookmarks.

# Quick Start:

Expand Down Expand Up @@ -69,9 +69,9 @@ python search_server.py test_parser www.some_url_of_your_choice.com/
- requests_get_timeout - sets the timeout for URL requests to fetch content of URLs found during search. Defaults to 5 seconds.
- strip_html_menus - removes likely HTML menus to clean up text. This returns significantly higher quality and informationally dense text.
- max_text_bytes limits the bytes returned per web page. Defaults to no max. Note, ParlAI current defaults to only use the first 512 byte.
- search_engine set to "Google" default or "Bing". Note, the Bing Search engine was used in the Blenderbot2 paper to achieve their results. This implementation not only uses web pages but also news, entities and places.
- search_engine set to "Google" default, "Bing" or "Aquila". Note, the Bing Search engine was used in the Blenderbot2 paper to achieve their results. This implementation not only uses web pages but also news, entities and places.
- use_description_only are short but 10X faster since no url gets for Bing only. It also has the advantage of being very concise without an HTML irrelevant text normally returned.
- use_subscription_key required to use Bing only. Can get a free one at: https://www.microsoft.com/en-us/bing/apis/bing-entity-search-api
- use_subscription_key required to use with Bing and Aquila. Get it for free: Bing: https://www.microsoft.com/en-us/bing/apis/bing-entity-search-api Aquila: https://aquila.network

# Advanced Examples

Expand All @@ -95,6 +95,11 @@ Bing Search Engine returning very relevant concise information 10X faster. Retur
python search_server.py serve --host 0.0.0.0:8080 --search_engine="Bing" --use_description_only --subscription_key "put your bing api subscription key here"
```

Aquila Custom Search Engine:
```bash
python search_server.py serve --host 0.0.0.0:8080 --search_engine="Aquila" --subscription_key "put your Aquila public key here"
```

# Additional Command Line Example Test Calls

```bash
Expand Down
41 changes: 40 additions & 1 deletion search_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,9 @@
# Bing Search API documentation:
# https://docs.microsoft.com/en-us/bing/search-apis/bing-web-search/reference/query-parameters

# Aquila Network website:
# https://aquila.network

def _parse_host(host: str) -> Tuple[str, int]:
""" Parse the host string.
Should be in the format HOSTNAME:PORT.
Expand All @@ -46,8 +49,12 @@ def _parse_host(host: str) -> Tuple[str, int]:
def _get_and_parse(url: str) -> Dict[str, str]:
""" Download a webpage and parse it. """

headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'
}

try:
resp = requests.get(url, timeout=_REQUESTS_GET_TIMEOUT)
resp = requests.get(url, headers=headers, timeout=_REQUESTS_GET_TIMEOUT)
except requests.exceptions.RequestException as e:
print(f"[!] {e} for url {url}")
return None
Expand Down Expand Up @@ -348,6 +355,30 @@ def search(self, q: str, n: int,

return googlesearch.search(q, num=n, stop=None, pause=_DELAY_SEARCH)

class AquilaSearchServer(SearchABCRequestHandler):
def search(self, q: str, n: int,
subscription_key: str = None,
use_description_only: bool = False
) -> Generator[str, None, None]:

payload = json.dumps({
"publicIndexId": subscription_key,
"query": q
})
headers = {
'Content-Type': 'application/json'
}

aquila_url_ev = "https://x.aquila.network/api/search"
ret_ = []

response = requests.request("POST", aquila_url_ev, headers=headers, data=payload)
if response.status_code == 200:
resp_ = dict(sorted(response.json()['result'].items(), key=lambda item: item[1], reverse=True))
ret_ = list(resp_.keys())[:n]

return ret_

class SearchABCServer(http.server.ThreadingHTTPServer):
def __init__(self,
server_address, RequestHandlerClass,
Expand Down Expand Up @@ -408,6 +439,8 @@ def serve(

if search_engine == "Bing":
request_handler = BingSearchRequestHandler
if search_engine == "Aquila":
request_handler = AquilaSearchServer
else:
request_handler = GoogleSearchRequestHandler

Expand Down Expand Up @@ -438,6 +471,12 @@ def check_and_print_cmdline_args(
if subscription_key is not None:
print("Warning: subscription_key is not supported for Google Search Engine")
exit()
elif search_engine == "Aquila":
if subscription_key is None:
print("Warning: subscription_key is required for Aquila Search Engine")
print("To get one go to url:")
print("https://aquila.network")
exit()

print("Command line args used:")
print(f" requests_get_timeout={_REQUESTS_GET_TIMEOUT}")
Expand Down