33from nedrexapi .config import config as _config
44from langchain_community .llms .ollama import Ollama
55
6- _LLM_BASE = _config ["embeddings.server_base" ]
7- _LLM_model = _config [f"embeddings.model" ]
8- _LLM_path = _config [f"embeddings.path" ]
6+ def get_conf_or_none (key ):
7+ try :
8+ return _config .get (key )
9+ except :
10+ return None
911
10- # _LLM_user=_config[f"embeddings.user"]
11- # _LLM_pass=_config[f"embeddings.pass"]
12+ _LLM_BASE = get_conf_or_none ("embeddings.server_base" )
13+ _LLM_model = get_conf_or_none ("embeddings.model" )
14+ _LLM_path = get_conf_or_none ("embeddings.path" )
15+
16+ _LLM_chat_model = get_conf_or_none ("chat.model" )
17+ _LLM_chat_base = get_conf_or_none ("chat.server_base" )
18+ _LLM_chat_api_key = get_conf_or_none ("chat.api_key" )
19+
20+ headers = {"Authorization" : "Bearer " + str (_LLM_chat_api_key )}
1221
13- _LLM_chat_model = _config ["chat.model" ]
14- _LLM_chat_base = _config ["chat.server_base" ]
15- _LLM_chat_api_key = _config ["chat.api_key" ]
1622
17- headers = {"Authorization" : "Bearer " + _LLM_chat_api_key }
1823
1924
2025def get_embedder ():
@@ -24,15 +29,12 @@ def get_generator():
2429 return Ollama (base_url = _LLM_chat_base , model = _LLM_chat_model , temperature = 0.0 , headers = headers )
2530
2631def get_chat ():
27- print (headers )
2832 return ChatOllama (base_url = _LLM_chat_base , model = _LLM_chat_model , temperature = 0.0 , client_kwargs = {'headers' : headers })
2933
3034def get_embedding (query ):
3135 embedder = get_embedder ()
3236 return embedder .embed (query )
3337
34-
35-
3638def generate (query ):
3739 ollama_llm = get_generator ()
3840 response = ollama_llm .invoke (query )
0 commit comments