From 8fa0cc5699590152149ffd464e16cbfe47006363 Mon Sep 17 00:00:00 2001 From: matano Date: Tue, 20 Jan 2026 15:40:13 +0200 Subject: [PATCH 01/43] support a custom openai api base --- docker-compose.yml | 4 +++- src/api/provider_validation.py | 17 +++++++++++++---- src/services/models_service.py | 6 +++++- 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 305745061..c88b59bac 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -67,6 +67,7 @@ services: - OPENSEARCH_USERNAME=admin - OPENSEARCH_PASSWORD=${OPENSEARCH_PASSWORD} - OPENAI_API_KEY=${OPENAI_API_KEY} + - OPENAI_API_BASE=${OPENAI_API_BASE:-None} - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} - WATSONX_API_KEY=${WATSONX_API_KEY} - WATSONX_ENDPOINT=${WATSONX_ENDPOINT} @@ -114,6 +115,7 @@ services: - LANGFUSE_PUBLIC_KEY=${LANGFUSE_PUBLIC_KEY:-} - LANGFUSE_HOST=${LANGFUSE_HOST:-} - OPENAI_API_KEY=${OPENAI_API_KEY:-None} + - OPENAI_API_BASE=${OPENAI_API_BASE:-None} - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-None} - WATSONX_API_KEY=${WATSONX_API_KEY:-None} - WATSONX_ENDPOINT=${WATSONX_ENDPOINT:-None} @@ -133,7 +135,7 @@ services: - MIMETYPE=None - FILESIZE=0 - SELECTED_EMBEDDING_MODEL=${SELECTED_EMBEDDING_MODEL:-} - - LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,FILENAME,MIMETYPE,FILESIZE,SELECTED_EMBEDDING_MODEL,OPENAI_API_KEY,ANTHROPIC_API_KEY,WATSONX_API_KEY,WATSONX_ENDPOINT,WATSONX_PROJECT_ID,OLLAMA_BASE_URL + - LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,FILENAME,MIMETYPE,FILESIZE,SELECTED_EMBEDDING_MODEL,OPENAI_API_KEY,OPENAI_API_BASE,ANTHROPIC_API_KEY,WATSONX_API_KEY,WATSONX_ENDPOINT,WATSONX_PROJECT_ID,OLLAMA_BASE_URL - LANGFLOW_LOG_LEVEL=DEBUG - LANGFLOW_AUTO_LOGIN=${LANGFLOW_AUTO_LOGIN} - LANGFLOW_SUPERUSER=${LANGFLOW_SUPERUSER} diff --git a/src/api/provider_validation.py b/src/api/provider_validation.py index 813826a18..2f587fb49 100644 --- a/src/api/provider_validation.py +++ b/src/api/provider_validation.py @@ -1,6 +1,8 @@ """Provider validation utilities for testing API keys and models during onboarding.""" import json +import os + import httpx from utils.container_utils import transform_localhost_url from utils.logging_config import get_logger @@ -107,6 +109,9 @@ def _extract_error_details(response: httpx.Response) -> str: return parsed return response_text +def get_openai_url(endpoint: str) -> str: + api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com") + return f"{api_base}{endpoint}" async def validate_provider_setup( provider: str, @@ -248,10 +253,11 @@ async def _test_openai_lightweight_health(api_key: str) -> None: "Content-Type": "application/json", } + url = get_openai_url(endpoint="/v1/models") async with httpx.AsyncClient() as client: # Use /v1/models endpoint which validates the key without consuming credits response = await client.get( - "https://api.openai.com/v1/models", + url=url, headers=headers, timeout=10.0, # Short timeout for lightweight check ) @@ -309,8 +315,9 @@ async def _test_openai_completion_with_tools(api_key: str, llm_model: str) -> No async with httpx.AsyncClient() as client: # Try with max_tokens first payload = {**base_payload, "max_tokens": 50} + url = get_openai_url(endpoint="/v1/chat/completions") response = await client.post( - "https://api.openai.com/v1/chat/completions", + url=url, headers=headers, json=payload, timeout=30.0, @@ -320,8 +327,9 @@ async def _test_openai_completion_with_tools(api_key: str, llm_model: str) -> No if response.status_code != 200: logger.info("max_tokens parameter failed, trying max_completion_tokens instead") payload = {**base_payload, "max_completion_tokens": 50} + url = get_openai_url(endpoint="/v1/chat/completions") response = await client.post( - "https://api.openai.com/v1/chat/completions", + url=url, headers=headers, json=payload, timeout=30.0, @@ -356,8 +364,9 @@ async def _test_openai_embedding(api_key: str, embedding_model: str) -> None: } async with httpx.AsyncClient() as client: + url = get_openai_url(endpoint="/v1/embeddings") response = await client.post( - "https://api.openai.com/v1/embeddings", + url=url, headers=headers, json=payload, timeout=30.0, diff --git a/src/services/models_service.py b/src/services/models_service.py index cd08b7085..318dfecea 100644 --- a/src/services/models_service.py +++ b/src/services/models_service.py @@ -1,3 +1,5 @@ +import os + import httpx from typing import Dict, List from utils.container_utils import transform_localhost_url @@ -54,8 +56,10 @@ async def get_openai_models(self, api_key: str) -> Dict[str, List[Dict[str, str] async with httpx.AsyncClient() as client: # Lightweight validation: just check if API key is valid # This doesn't consume credits, only validates the key + from api.provider_validation import get_openai_url + url = get_openai_url(endpoint="/v1/models") response = await client.get( - "https://api.openai.com/v1/models", headers=headers, timeout=10.0 + url, headers=headers, timeout=10.0 ) if response.status_code == 200: From cc2187b3eccac473837f10f182b1790f145c2f7a Mon Sep 17 00:00:00 2001 From: matano Date: Thu, 29 Jan 2026 13:17:30 +0200 Subject: [PATCH 02/43] fix merge --- docker-compose.yml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 8a5e8d41e..f28e682a6 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -139,11 +139,7 @@ services: - MIMETYPE=None - FILESIZE=0 - SELECTED_EMBEDDING_MODEL=${SELECTED_EMBEDDING_MODEL:-} -<<<<<<< HEAD - - LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,FILENAME,MIMETYPE,FILESIZE,SELECTED_EMBEDDING_MODEL,OPENAI_API_KEY,OPENAI_API_BASE,ANTHROPIC_API_KEY,WATSONX_API_KEY,WATSONX_ENDPOINT,WATSONX_PROJECT_ID,OLLAMA_BASE_URL -======= - - LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OPENSEARCH_URL,DOCLING_SERVE_URL,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,FILENAME,MIMETYPE,FILESIZE,SELECTED_EMBEDDING_MODEL,OPENAI_API_KEY,ANTHROPIC_API_KEY,WATSONX_API_KEY,WATSONX_ENDPOINT,WATSONX_PROJECT_ID,OLLAMA_BASE_URL ->>>>>>> remotes/upstream/main + - LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OPENSEARCH_URL,DOCLING_SERVE_URL,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,FILENAME,MIMETYPE,FILESIZE,SELECTED_EMBEDDING_MODEL,OPENAI_API_KEY,OPENAI_API_BASE,ANTHROPIC_API_KEY,WATSONX_API_KEY,WATSONX_ENDPOINT,WATSONX_PROJECT_ID,OLLAMA_BASE_URL - LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OPENSEARCH_URL,DOCLING_SERVE_URL,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,FILENAME,MIMETYPE,FILESIZE,SELECTED_EMBEDDING_MODEL,OPENAI_API_KEY,ANTHROPIC_API_KEY,WATSONX_API_KEY,WATSONX_ENDPOINT,WATSONX_PROJECT_ID,OLLAMA_BASE_URL>>>>>>> remotes/upstream/main - LANGFLOW_LOG_LEVEL=DEBUG - LANGFLOW_AUTO_LOGIN=${LANGFLOW_AUTO_LOGIN} - LANGFLOW_SUPERUSER=${LANGFLOW_SUPERUSER} From 1794b0df13deb8c6002c17f9f39e084a9a1260c4 Mon Sep 17 00:00:00 2001 From: matano Date: Thu, 29 Jan 2026 14:43:03 +0200 Subject: [PATCH 03/43] further changes to support custom endpoints --- src/api/models.py | 18 ++++++++++++- src/api/provider_health.py | 2 +- src/api/provider_validation.py | 49 +++++++++++++++++----------------- src/services/models_service.py | 6 ++--- 4 files changed, 46 insertions(+), 29 deletions(-) diff --git a/src/api/models.py b/src/api/models.py index 118126029..961e1d8c9 100644 --- a/src/api/models.py +++ b/src/api/models.py @@ -1,3 +1,5 @@ +import os + from starlette.responses import JSONResponse from utils.logging_config import get_logger from config.settings import get_openrag_config @@ -10,9 +12,11 @@ async def get_openai_models(request, models_service, session_manager): try: # Get API key from request body api_key = None + api_base = None try: body = await request.json() api_key = body.get("api_key") if body else None + api_base = body.get("api_base") if body else None except Exception: # Body might be empty or invalid JSON, continue to fallback pass @@ -36,7 +40,19 @@ async def get_openai_models(request, models_service, session_manager): status_code=400, ) - models = await models_service.get_openai_models(api_key=api_key) + if not api_base: + try: + config = get_openrag_config() + api_base = config.providers.openai.endpoint + logger.info( + f"Retrieved OpenAI API base from config: {'yes' if api_base else 'no'}" + ) + except Exception as e: + logger.error(f"Failed to get config: {e}") + if not api_base: + api_base = os.environ.get("OPENAI_BASE_API", "https://api.openai.com") + + models = await models_service.get_openai_models(api_key=api_key, api_base=api_base) return JSONResponse(models) except Exception as e: logger.error(f"Failed to get OpenAI models: {str(e)}") diff --git a/src/api/provider_health.py b/src/api/provider_health.py index 3802d8dcb..36238e279 100644 --- a/src/api/provider_health.py +++ b/src/api/provider_health.py @@ -42,7 +42,7 @@ async def check_provider_health(request): provider = current_config.agent.llm_provider # Validate provider name - valid_providers = ["openai", "ollama", "watsonx", "anthropic"] + valid_providers = ["openai_ete", "openai", "ollama", "watsonx", "anthropic"] if provider not in valid_providers: return JSONResponse( { diff --git a/src/api/provider_validation.py b/src/api/provider_validation.py index 2f587fb49..358264998 100644 --- a/src/api/provider_validation.py +++ b/src/api/provider_validation.py @@ -109,10 +109,6 @@ def _extract_error_details(response: httpx.Response) -> str: return parsed return response_text -def get_openai_url(endpoint: str) -> str: - api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com") - return f"{api_base}{endpoint}" - async def validate_provider_setup( provider: str, api_key: str = None, @@ -130,7 +126,7 @@ async def validate_provider_setup( api_key: API key for the provider (optional for ollama) embedding_model: Embedding model to test llm_model: LLM model to test - endpoint: Provider endpoint (required for ollama and watsonx) + endpoint: Provider endpoint (required for ollama and watsonx, optional for openai) project_id: Project ID (required for watsonx) test_completion: If True, performs full validation with completion/embedding tests (consumes credits). If False, performs lightweight validation (no credits consumed). Default: False. @@ -143,11 +139,14 @@ async def validate_provider_setup( try: logger.info(f"Starting validation for provider: {provider_lower} (test_completion={test_completion})") + if provider == "openai" and not endpoint: + endpoint = os.environ.get("OPENAI_BASE_API", "https://api.openai.com") + if test_completion: # Full validation with completion/embedding tests (consumes credits) if embedding_model: # Test embedding - await test_embedding( + await _test_embedding( provider=provider_lower, api_key=api_key, embedding_model=embedding_model, @@ -156,7 +155,7 @@ async def validate_provider_setup( ) elif llm_model: # Test completion with tool calling - await test_completion_with_tools( + await _test_completion_with_tools( provider=provider_lower, api_key=api_key, llm_model=llm_model, @@ -165,7 +164,7 @@ async def validate_provider_setup( ) else: # Lightweight validation (no credits consumed) - await test_lightweight_health( + await _test_lightweight_health( provider=provider_lower, api_key=api_key, endpoint=endpoint, @@ -180,7 +179,7 @@ async def validate_provider_setup( raise -async def test_lightweight_health( +async def _test_lightweight_health( provider: str, api_key: str = None, endpoint: str = None, @@ -188,8 +187,8 @@ async def test_lightweight_health( ) -> None: """Test provider health with lightweight check (no credits consumed).""" - if provider == "openai": - await _test_openai_lightweight_health(api_key) + if provider.startswith("openai"): + await _test_openai_lightweight_health(api_key, endpoint) elif provider == "watsonx": await _test_watsonx_lightweight_health(api_key, endpoint, project_id) elif provider == "ollama": @@ -200,7 +199,7 @@ async def test_lightweight_health( raise ValueError(f"Unknown provider: {provider}") -async def test_completion_with_tools( +async def _test_completion_with_tools( provider: str, api_key: str = None, llm_model: str = None, @@ -209,8 +208,8 @@ async def test_completion_with_tools( ) -> None: """Test completion with tool calling for the provider.""" - if provider == "openai": - await _test_openai_completion_with_tools(api_key, llm_model) + if provider.startswith("openai"): + await _test_openai_completion_with_tools(api_key, llm_model, endpoint) elif provider == "watsonx": await _test_watsonx_completion_with_tools(api_key, llm_model, endpoint, project_id) elif provider == "ollama": @@ -221,7 +220,7 @@ async def test_completion_with_tools( raise ValueError(f"Unknown provider: {provider}") -async def test_embedding( +async def _test_embedding( provider: str, api_key: str = None, embedding_model: str = None, @@ -230,8 +229,8 @@ async def test_embedding( ) -> None: """Test embedding generation for the provider.""" - if provider == "openai": - await _test_openai_embedding(api_key, embedding_model) + if provider.startswith("openai"): + await _test_openai_embedding(api_key, embedding_model, endpoint) elif provider == "watsonx": await _test_watsonx_embedding(api_key, embedding_model, endpoint, project_id) elif provider == "ollama": @@ -241,7 +240,7 @@ async def test_embedding( # OpenAI validation functions -async def _test_openai_lightweight_health(api_key: str) -> None: +async def _test_openai_lightweight_health(api_key: str, endpoint: str) -> None: """Test OpenAI API key validity with lightweight check. Only checks if the API key is valid without consuming credits. @@ -253,7 +252,8 @@ async def _test_openai_lightweight_health(api_key: str) -> None: "Content-Type": "application/json", } - url = get_openai_url(endpoint="/v1/models") + url = f"{endpoint}/v1/models" + logger.debug("Testing openai lightweight health", url=url) async with httpx.AsyncClient() as client: # Use /v1/models endpoint which validates the key without consuming credits response = await client.get( @@ -277,7 +277,7 @@ async def _test_openai_lightweight_health(api_key: str) -> None: raise -async def _test_openai_completion_with_tools(api_key: str, llm_model: str) -> None: +async def _test_openai_completion_with_tools(api_key: str, llm_model: str, endpoint: str) -> None: """Test OpenAI completion with tool calling.""" try: headers = { @@ -315,7 +315,8 @@ async def _test_openai_completion_with_tools(api_key: str, llm_model: str) -> No async with httpx.AsyncClient() as client: # Try with max_tokens first payload = {**base_payload, "max_tokens": 50} - url = get_openai_url(endpoint="/v1/chat/completions") + url = f"{endpoint}/v1/chat/completions" + logger.debug("Test openai completion tools", url=url) response = await client.post( url=url, headers=headers, @@ -327,7 +328,7 @@ async def _test_openai_completion_with_tools(api_key: str, llm_model: str) -> No if response.status_code != 200: logger.info("max_tokens parameter failed, trying max_completion_tokens instead") payload = {**base_payload, "max_completion_tokens": 50} - url = get_openai_url(endpoint="/v1/chat/completions") + logger.debug("Test openai completion tools", url=url) response = await client.post( url=url, headers=headers, @@ -350,7 +351,7 @@ async def _test_openai_completion_with_tools(api_key: str, llm_model: str) -> No raise -async def _test_openai_embedding(api_key: str, embedding_model: str) -> None: +async def _test_openai_embedding(api_key: str, embedding_model: str, endpoint: str) -> None: """Test OpenAI embedding generation.""" try: headers = { @@ -364,7 +365,7 @@ async def _test_openai_embedding(api_key: str, embedding_model: str) -> None: } async with httpx.AsyncClient() as client: - url = get_openai_url(endpoint="/v1/embeddings") + url = f"{endpoint}/v1/embeddings" response = await client.post( url=url, headers=headers, diff --git a/src/services/models_service.py b/src/services/models_service.py index 318dfecea..61fc574f1 100644 --- a/src/services/models_service.py +++ b/src/services/models_service.py @@ -45,7 +45,7 @@ class ModelsService: def __init__(self): self.session_manager = None - async def get_openai_models(self, api_key: str) -> Dict[str, List[Dict[str, str]]]: + async def get_openai_models(self, api_key: str, api_base: str) -> Dict[str, List[Dict[str, str]]]: """Fetch available models from OpenAI API with lightweight validation""" try: headers = { @@ -56,8 +56,8 @@ async def get_openai_models(self, api_key: str) -> Dict[str, List[Dict[str, str] async with httpx.AsyncClient() as client: # Lightweight validation: just check if API key is valid # This doesn't consume credits, only validates the key - from api.provider_validation import get_openai_url - url = get_openai_url(endpoint="/v1/models") + url = f"{api_base}/v1/models" + logger.debug("Getting openai models.", url=url) response = await client.get( url, headers=headers, timeout=10.0 ) From 16a1ad4a4c17cd10d99453afaf76f007fd111974 Mon Sep 17 00:00:00 2001 From: matano Date: Thu, 29 Jan 2026 15:14:28 +0200 Subject: [PATCH 04/43] add missing field --- src/config/config_manager.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/config/config_manager.py b/src/config/config_manager.py index 6af07db4d..46caad0c2 100644 --- a/src/config/config_manager.py +++ b/src/config/config_manager.py @@ -14,6 +14,7 @@ class OpenAIConfig: """OpenAI provider configuration.""" api_key: str = "" + endpoint: str = "" configured: bool = False @@ -223,6 +224,8 @@ def _load_env_overrides( # OpenAI provider settings if os.getenv("OPENAI_API_KEY"): config_data["providers"]["openai"]["api_key"] = os.getenv("OPENAI_API_KEY") + if os.getenv("OPENAI_API_BASE"): + config_data["providers"]["openai"]["endpoint"] = os.getenv("OPENAI_API_BASE") # Anthropic provider settings if os.getenv("ANTHROPIC_API_KEY"): From c72358e8facd4f2d20cf0dbfada842b63d19ce47 Mon Sep 17 00:00:00 2001 From: matano Date: Thu, 29 Jan 2026 15:14:41 +0200 Subject: [PATCH 05/43] fix env var name --- src/api/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/models.py b/src/api/models.py index 961e1d8c9..bc43d1bb2 100644 --- a/src/api/models.py +++ b/src/api/models.py @@ -50,7 +50,7 @@ async def get_openai_models(request, models_service, session_manager): except Exception as e: logger.error(f"Failed to get config: {e}") if not api_base: - api_base = os.environ.get("OPENAI_BASE_API", "https://api.openai.com") + api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com") models = await models_service.get_openai_models(api_key=api_key, api_base=api_base) return JSONResponse(models) From e200f6b38e60e26d0f0c83ebb71160b83134c80a Mon Sep 17 00:00:00 2001 From: matano Date: Sun, 1 Feb 2026 10:05:12 +0200 Subject: [PATCH 06/43] openai providers startwith 'openai' --- src/api/provider_validation.py | 10 +++++----- src/config/config_manager.py | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/api/provider_validation.py b/src/api/provider_validation.py index 358264998..ed051ad88 100644 --- a/src/api/provider_validation.py +++ b/src/api/provider_validation.py @@ -139,8 +139,8 @@ async def validate_provider_setup( try: logger.info(f"Starting validation for provider: {provider_lower} (test_completion={test_completion})") - if provider == "openai" and not endpoint: - endpoint = os.environ.get("OPENAI_BASE_API", "https://api.openai.com") + if provider.startswith("openai") and not endpoint: + endpoint = os.environ.get("OPENAI_API_BASE", "https://api.openai.com") if test_completion: # Full validation with completion/embedding tests (consumes credits) @@ -253,7 +253,7 @@ async def _test_openai_lightweight_health(api_key: str, endpoint: str) -> None: } url = f"{endpoint}/v1/models" - logger.debug("Testing openai lightweight health", url=url) + logger.info("Testing openai lightweight health", url=url) async with httpx.AsyncClient() as client: # Use /v1/models endpoint which validates the key without consuming credits response = await client.get( @@ -316,7 +316,7 @@ async def _test_openai_completion_with_tools(api_key: str, llm_model: str, endpo # Try with max_tokens first payload = {**base_payload, "max_tokens": 50} url = f"{endpoint}/v1/chat/completions" - logger.debug("Test openai completion tools", url=url) + logger.info("Test openai completion tools", url=url) response = await client.post( url=url, headers=headers, @@ -328,7 +328,7 @@ async def _test_openai_completion_with_tools(api_key: str, llm_model: str, endpo if response.status_code != 200: logger.info("max_tokens parameter failed, trying max_completion_tokens instead") payload = {**base_payload, "max_completion_tokens": 50} - logger.debug("Test openai completion tools", url=url) + logger.info("Test openai completion tools", url=url) response = await client.post( url=url, headers=headers, diff --git a/src/config/config_manager.py b/src/config/config_manager.py index 46caad0c2..902552f07 100644 --- a/src/config/config_manager.py +++ b/src/config/config_manager.py @@ -52,7 +52,7 @@ class ProvidersConfig: def get_provider_config(self, provider: str): """Get configuration for a specific provider.""" provider_lower = provider.lower() - if provider_lower == "openai": + if provider_lower.startswith("openai"): return self.openai elif provider_lower == "anthropic": return self.anthropic From 830271f195bec462fbdffe57d6a052f2be555abd Mon Sep 17 00:00:00 2001 From: matano Date: Sun, 1 Feb 2026 10:35:04 +0200 Subject: [PATCH 07/43] revert to a single openai provider --- src/api/provider_validation.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/api/provider_validation.py b/src/api/provider_validation.py index ed051ad88..458190193 100644 --- a/src/api/provider_validation.py +++ b/src/api/provider_validation.py @@ -139,7 +139,7 @@ async def validate_provider_setup( try: logger.info(f"Starting validation for provider: {provider_lower} (test_completion={test_completion})") - if provider.startswith("openai") and not endpoint: + if provider == "openai" and not endpoint: endpoint = os.environ.get("OPENAI_API_BASE", "https://api.openai.com") if test_completion: @@ -187,7 +187,7 @@ async def _test_lightweight_health( ) -> None: """Test provider health with lightweight check (no credits consumed).""" - if provider.startswith("openai"): + if provider == "openai": await _test_openai_lightweight_health(api_key, endpoint) elif provider == "watsonx": await _test_watsonx_lightweight_health(api_key, endpoint, project_id) @@ -208,7 +208,7 @@ async def _test_completion_with_tools( ) -> None: """Test completion with tool calling for the provider.""" - if provider.startswith("openai"): + if provider == "openai": await _test_openai_completion_with_tools(api_key, llm_model, endpoint) elif provider == "watsonx": await _test_watsonx_completion_with_tools(api_key, llm_model, endpoint, project_id) @@ -229,7 +229,7 @@ async def _test_embedding( ) -> None: """Test embedding generation for the provider.""" - if provider.startswith("openai"): + if provider == "openai": await _test_openai_embedding(api_key, embedding_model, endpoint) elif provider == "watsonx": await _test_watsonx_embedding(api_key, embedding_model, endpoint, project_id) From fe5562220062753279bdc4f2054332fe10f7a709 Mon Sep 17 00:00:00 2001 From: matano Date: Sun, 1 Feb 2026 10:36:00 +0200 Subject: [PATCH 08/43] revert to a single openai provider --- src/api/provider_health.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/provider_health.py b/src/api/provider_health.py index 36238e279..3802d8dcb 100644 --- a/src/api/provider_health.py +++ b/src/api/provider_health.py @@ -42,7 +42,7 @@ async def check_provider_health(request): provider = current_config.agent.llm_provider # Validate provider name - valid_providers = ["openai_ete", "openai", "ollama", "watsonx", "anthropic"] + valid_providers = ["openai", "ollama", "watsonx", "anthropic"] if provider not in valid_providers: return JSONResponse( { From 7f6ebe6992a7027b0bddbb967afd40d1a73c1874 Mon Sep 17 00:00:00 2001 From: matano Date: Sun, 1 Feb 2026 10:39:17 +0200 Subject: [PATCH 09/43] revert to a single openai provider --- src/config/config_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/config/config_manager.py b/src/config/config_manager.py index 902552f07..46caad0c2 100644 --- a/src/config/config_manager.py +++ b/src/config/config_manager.py @@ -52,7 +52,7 @@ class ProvidersConfig: def get_provider_config(self, provider: str): """Get configuration for a specific provider.""" provider_lower = provider.lower() - if provider_lower.startswith("openai"): + if provider_lower == "openai": return self.openai elif provider_lower == "anthropic": return self.anthropic From 787e5da6c15eadb66c1d8fb0364c59bd5277c1ed Mon Sep 17 00:00:00 2001 From: matano Date: Sun, 1 Feb 2026 10:42:44 +0200 Subject: [PATCH 10/43] fix LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index f28e682a6..d5466c8ff 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -139,7 +139,7 @@ services: - MIMETYPE=None - FILESIZE=0 - SELECTED_EMBEDDING_MODEL=${SELECTED_EMBEDDING_MODEL:-} - - LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OPENSEARCH_URL,DOCLING_SERVE_URL,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,FILENAME,MIMETYPE,FILESIZE,SELECTED_EMBEDDING_MODEL,OPENAI_API_KEY,OPENAI_API_BASE,ANTHROPIC_API_KEY,WATSONX_API_KEY,WATSONX_ENDPOINT,WATSONX_PROJECT_ID,OLLAMA_BASE_URL - LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OPENSEARCH_URL,DOCLING_SERVE_URL,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,FILENAME,MIMETYPE,FILESIZE,SELECTED_EMBEDDING_MODEL,OPENAI_API_KEY,ANTHROPIC_API_KEY,WATSONX_API_KEY,WATSONX_ENDPOINT,WATSONX_PROJECT_ID,OLLAMA_BASE_URL>>>>>>> remotes/upstream/main + - LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OPENSEARCH_URL,DOCLING_SERVE_URL,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,FILENAME,MIMETYPE,FILESIZE,SELECTED_EMBEDDING_MODEL,OPENAI_API_KEY,OPENAI_API_BASE,ANTHROPIC_API_KEY,WATSONX_API_KEY,WATSONX_ENDPOINT,WATSONX_PROJECT_ID,OLLAMA_BASE_URL - LANGFLOW_LOG_LEVEL=DEBUG - LANGFLOW_AUTO_LOGIN=${LANGFLOW_AUTO_LOGIN} - LANGFLOW_SUPERUSER=${LANGFLOW_SUPERUSER} From 371029f88e14ef161902b5d9f100a1e33111e596 Mon Sep 17 00:00:00 2001 From: matano Date: Sun, 1 Feb 2026 10:47:55 +0200 Subject: [PATCH 11/43] throw if api_base is not in request or in configuration (as done for the api_key) --- src/api/models.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/api/models.py b/src/api/models.py index bc43d1bb2..8d4ceb281 100644 --- a/src/api/models.py +++ b/src/api/models.py @@ -50,7 +50,12 @@ async def get_openai_models(request, models_service, session_manager): except Exception as e: logger.error(f"Failed to get config: {e}") if not api_base: - api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com") + return JSONResponse( + { + "error": "OpenAI API base is required either in request body or in configuration" + }, + status_code=400, + ) models = await models_service.get_openai_models(api_key=api_key, api_base=api_base) return JSONResponse(models) From 4241f31f2c9d449e815e41be0e15134a4dbbfe8d Mon Sep 17 00:00:00 2001 From: matano Date: Sun, 8 Feb 2026 17:35:52 +0200 Subject: [PATCH 12/43] add OPENAI_API_BASE to add_provider_credentials_to_headers() and build_mcp_global_vars_from_config() --- src/utils/langflow_headers.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/utils/langflow_headers.py b/src/utils/langflow_headers.py index e3447e611..bfc5f1271 100644 --- a/src/utils/langflow_headers.py +++ b/src/utils/langflow_headers.py @@ -14,7 +14,10 @@ def add_provider_credentials_to_headers(headers: Dict[str, str], config) -> None # Add OpenAI credentials if config.providers.openai.api_key: headers["X-LANGFLOW-GLOBAL-VAR-OPENAI_API_KEY"] = str(config.providers.openai.api_key) - + + if config.providers.openai.endpoint: + headers["X-LANGFLOW-GLOBAL-VAR-OPENAI_API_BASE"] = str(config.providers.openai.endpoint) + # Add Anthropic credentials if config.providers.anthropic.api_key: headers["X-LANGFLOW-GLOBAL-VAR-ANTHROPIC_API_KEY"] = str(config.providers.anthropic.api_key) @@ -47,6 +50,9 @@ def build_mcp_global_vars_from_config(config) -> Dict[str, str]: if config.providers.openai.api_key: global_vars["OPENAI_API_KEY"] = config.providers.openai.api_key + if config.providers.openai.endpoint: + global_vars["OPENAI_API_BASE"] = config.providers.openai.endpoint + # Add Anthropic credentials if config.providers.anthropic.api_key: global_vars["ANTHROPIC_API_KEY"] = config.providers.anthropic.api_key From 6d2e809f2d17cc0d4f03336a1e0a326e1648a61c Mon Sep 17 00:00:00 2001 From: matano Date: Mon, 9 Feb 2026 12:51:22 +0200 Subject: [PATCH 13/43] add OPENAI_API_BASE as default for the api_base field of the OPENAI embedder --- flows/ingestion_flow.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flows/ingestion_flow.json b/flows/ingestion_flow.json index 6f65c04e0..78a4ddad9 100644 --- a/flows/ingestion_flow.json +++ b/flows/ingestion_flow.json @@ -5317,7 +5317,7 @@ "_type": "Component", "api_base": { "_input_type": "MessageTextInput", - "advanced": true, + "advanced": false, "display_name": "OpenAI API Base URL", "dynamic": false, "info": "Base URL for the API. Leave empty for default.", @@ -5326,7 +5326,7 @@ ], "list": false, "list_add_label": "Add More", - "load_from_db": false, + "load_from_db": true, "name": "api_base", "override_skip": false, "placeholder": "", @@ -5338,7 +5338,7 @@ "trace_as_metadata": true, "track_in_telemetry": false, "type": "str", - "value": "" + "value": "OPENAI_API_BASE" }, "api_key": { "_input_type": "SecretStrInput", From 73ab1b3a735c78eb375024fbfa4fdce9b1bb29ff Mon Sep 17 00:00:00 2001 From: matano Date: Mon, 9 Feb 2026 14:07:50 +0200 Subject: [PATCH 14/43] add OPENAI_API_BASE to embedder and agent --- flows/openrag_agent.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/flows/openrag_agent.json b/flows/openrag_agent.json index f04405ebe..d8541c598 100644 --- a/flows/openrag_agent.json +++ b/flows/openrag_agent.json @@ -1808,14 +1808,14 @@ }, "openai_api_base": { "_input_type": "StrInput", - "advanced": true, + "advanced": false, "display_name": "OpenAI API Base", "dynamic": false, "info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.", "input_types": [], "list": false, "list_add_label": "Add More", - "load_from_db": false, + "load_from_db": true, "name": "openai_api_base", "override_skip": false, "placeholder": "", @@ -1826,7 +1826,7 @@ "trace_as_metadata": true, "track_in_telemetry": false, "type": "str", - "value": "" + "value": "OPENAI_API_BASE" }, "output_schema": { "_input_type": "TableInput", @@ -2344,7 +2344,7 @@ ], "list": false, "list_add_label": "Add More", - "load_from_db": false, + "load_from_db": true, "name": "api_base", "override_skip": false, "placeholder": "", @@ -2356,7 +2356,7 @@ "trace_as_metadata": true, "track_in_telemetry": false, "type": "str", - "value": "" + "value": "OPENAI_API_BASE" }, "api_key": { "_input_type": "SecretStrInput", From 6624c4c2440854523ad05c2576535d11801d4467 Mon Sep 17 00:00:00 2001 From: matano Date: Mon, 9 Feb 2026 14:09:18 +0200 Subject: [PATCH 15/43] when updating an openai api_base, use the OPENAI_API_BASE --- src/services/flows_service.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/services/flows_service.py b/src/services/flows_service.py index e97ac2d3a..2d147353d 100644 --- a/src/services/flows_service.py +++ b/src/services/flows_service.py @@ -1393,8 +1393,8 @@ async def _update_component_fields( template["api_key"]["advanced"] = False updated = True if provider == "openai" and "api_base" in template: - template["api_base"]["value"] = "" - template["api_base"]["load_from_db"] = False + template["api_base"]["value"] = "OPENAI_API_BASE" + template["api_base"]["load_from_db"] = True template["api_base"]["show"] = True template["api_base"]["advanced"] = False updated = True From 9fbb7cc206e1b811f9c8055fd8da43578e9a72ae Mon Sep 17 00:00:00 2001 From: matano Date: Mon, 9 Feb 2026 14:15:17 +0200 Subject: [PATCH 16/43] add claude-opus to OpenAI allowed LLMs --- src/services/models_service.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/services/models_service.py b/src/services/models_service.py index 61fc574f1..d99e91e33 100644 --- a/src/services/models_service.py +++ b/src/services/models_service.py @@ -30,6 +30,7 @@ class ModelsService: "o3-pro", "o4-mini", "o4-mini-high", + "claude-opus-4-5-20251101", ] ANTHROPIC_MODELS = [ From d95869c2de2cb51ca6b2a2c7a2f96562f3aa678d Mon Sep 17 00:00:00 2001 From: Rico Furtado Date: Mon, 2 Mar 2026 15:37:17 -0500 Subject: [PATCH 17/43] feat: implement OpenRAG image management and cleanup functionality --- Makefile | 24 +- src/tui/managers/container_manager.py | 237 ++++++++----------- src/tui/utils/startup_checks.py | 79 ++++++- tests/unit/test_container_manager_cleanup.py | 85 +++++++ tests/unit/test_startup_checks_cleanup.py | 50 ++++ 5 files changed, 330 insertions(+), 145 deletions(-) create mode 100644 tests/unit/test_container_manager_cleanup.py create mode 100644 tests/unit/test_startup_checks_cleanup.py diff --git a/Makefile b/Makefile index 20d611226..122a020d6 100644 --- a/Makefile +++ b/Makefile @@ -19,6 +19,7 @@ REPO ?= https://github.com/langflow-ai/langflow.git # Auto-detect container runtime: prefer docker, fall back to podman CONTAINER_RUNTIME := $(shell command -v docker >/dev/null 2>&1 && echo "docker" || echo "podman") +OPENRAG_IMAGE_REPOS := langflowai/openrag-backend langflowai/openrag-frontend langflowai/openrag-langflow langflowai/openrag-opensearch langflowai/openrag-dashboards langflow/langflow opensearchproject/opensearch opensearchproject/opensearch-dashboards # Only pass --env-file if the file actually exists ifneq (,$(wildcard $(ENV_FILE))) COMPOSE_CMD := $(CONTAINER_RUNTIME) compose --env-file $(ENV_FILE) @@ -74,6 +75,7 @@ endef ###################### .PHONY: help check_tools help_docker help_dev help_test help_local help_utils \ dev dev-cpu dev-local dev-local-cpu stop clean build logs \ + remove-openrag-images \ shell-backend shell-frontend install \ test test-unit test-integration test-ci test-ci-local test-sdk test-os-jwt lint \ backend frontend docling docling-stop install-be install-fe build-be build-fe build-os build-lf logs-be logs-fe logs-lf logs-os \ @@ -447,10 +449,24 @@ stop: ## Stop and remove all OpenRAG containers restart: stop dev ## Restart all containers +remove-openrag-images: ## Remove OpenRAG images only (safe for other projects) + @echo "$(YELLOW)Removing OpenRAG images only...$(NC)" + @removed=0; total=0; \ + for repo in $(OPENRAG_IMAGE_REPOS); do \ + ids=$$($(CONTAINER_RUNTIME) images "$$repo" -q 2>/dev/null | sort -u); \ + for id in $$ids; do \ + total=$$((total+1)); \ + if $(CONTAINER_RUNTIME) rmi -f "$$id" >/dev/null 2>&1; then \ + removed=$$((removed+1)); \ + fi; \ + done; \ + done; \ + echo "$(PURPLE)Removed $$removed/$$total OpenRAG image(s).$(NC)" + clean: stop ## Stop containers and remove volumes @echo "$(YELLOW)Cleaning up containers and volumes...$(NC)" $(COMPOSE_CMD) down -v --remove-orphans - $(CONTAINER_RUNTIME) system prune -f + @$(MAKE) remove-openrag-images @echo "$(PURPLE)Cleanup complete!$(NC)" factory-reset: ## Complete reset (stop, remove volumes, clear data, remove images) @@ -461,7 +477,7 @@ factory-reset: ## Complete reset (stop, remove volumes, clear data, remove image echo " - Delete opensearch-data directory"; \ echo " - Delete config directory"; \ echo " - Delete JWT keys (private_key.pem, public_key.pem)"; \ - echo " - Remove local OpenRAG images"; \ + echo " - Remove OpenRAG images"; \ echo ""; \ echo ""; \ if [ "$(FORCE)" != "true" ]; then \ @@ -492,8 +508,8 @@ factory-reset: ## Complete reset (stop, remove volumes, clear data, remove image rm -f keys/private_key.pem keys/public_key.pem; \ echo "$(PURPLE)JWT keys removed$(NC)"; \ fi; \ - echo "$(YELLOW)Cleaning up system...$(NC)"; \ - $(CONTAINER_RUNTIME) system prune -f; \ + echo "$(YELLOW)Removing OpenRAG images...$(NC)"; \ + $(MAKE) remove-openrag-images; \ echo ""; \ echo "$(PURPLE)Factory reset complete!$(NC)"; \ echo "$(CYAN)Run 'make dev' or 'make dev-cpu' to start fresh.$(NC)"; diff --git a/src/tui/managers/container_manager.py b/src/tui/managers/container_manager.py index 89c0c121d..91fccceac 100644 --- a/src/tui/managers/container_manager.py +++ b/src/tui/managers/container_manager.py @@ -74,6 +74,17 @@ def format_port_conflict_message(conflicts: List[tuple[str, int, str]], max_show class ContainerManager: """Manages Docker/Podman container lifecycle for OpenRAG.""" + OPENRAG_IMAGE_REPOS = { + "langflowai/openrag-backend", + "langflowai/openrag-frontend", + "langflowai/openrag-langflow", + "langflowai/openrag-opensearch", + "langflowai/openrag-dashboards", + "langflow/langflow", + "opensearchproject/opensearch", + "opensearchproject/opensearch-dashboards", + } + def __init__(self, compose_file: Optional[Path] = None): self.platform_detector = PlatformDetector() self.runtime_info = self.platform_detector.detect_runtime() @@ -106,6 +117,16 @@ def __init__(self, compose_file: Optional[Path] = None): "langflow": "langflow", } + @staticmethod + def _extract_repository(image_tag: str) -> str: + """Extract repository name from : image reference.""" + return image_tag.rsplit(":", 1)[0] if ":" in image_tag else image_tag + + def _is_openrag_repository(self, repository: str) -> bool: + """Check whether repository is OpenRAG-related, with optional registry prefix.""" + repo = repository.lower() + return any(repo == known or repo.endswith(f"/{known}") for known in self.OPENRAG_IMAGE_REPOS) + def _find_compose_file(self, filename: str) -> Path: """Find compose file in centralized TUI directory, current directory, or package resources.""" from utils.paths import get_tui_compose_file @@ -556,6 +577,51 @@ async def _run_runtime_command(self, args: List[str]) -> tuple[bool, str, str]: except Exception as e: return False, "", f"Command execution failed: {e}" + async def _list_openrag_images( + self, include_created: bool = False + ) -> tuple[bool, List[Dict[str, str]], str]: + """List OpenRAG-related images available in the container runtime.""" + format_parts = ["{{.Repository}}:{{.Tag}}", "{{.ID}}"] + if include_created: + format_parts.append("{{.CreatedAt}}") + + success, stdout, stderr = await self._run_runtime_command( + ["images", "--format", "\t".join(format_parts)] + ) + if not success: + return False, [], stderr + + images: List[Dict[str, str]] = [] + for raw_line in stdout.strip().splitlines(): + if not raw_line.strip(): + continue + + parts = raw_line.split("\t") + if len(parts) < 2: + continue + + image_tag = parts[0].strip() + image_id = parts[1].strip() + + # Skip untagged entries to avoid broad cleanup touching unrelated images. + if "" in image_tag: + continue + + repository = self._extract_repository(image_tag) + if not self._is_openrag_repository(repository): + continue + + image_data = { + "full_tag": image_tag, + "repo": repository, + "id": image_id, + } + if include_created and len(parts) >= 3: + image_data["created"] = parts[2].strip() + images.append(image_data) + + return True, images, "" + def _process_service_json( self, service: Dict, services: Dict[str, ServiceInfo] ) -> None: @@ -1243,15 +1309,37 @@ async def reset_services(self) -> AsyncIterator[tuple[bool, str]]: yield False, f"Failed to stop services: {stderr}" return - yield False, "Cleaning up container data..." + yield False, "Removing OpenRAG images..." + success, images, stderr = await self._list_openrag_images() + if not success: + yield False, f"Failed to list OpenRAG images: {stderr}" + return + + if not images: + yield True, "System reset completed - OpenRAG containers and volumes removed" + return - # Additional cleanup - remove any remaining containers/volumes - # This is more thorough than just compose down - await self._run_runtime_command(["system", "prune", "-f"]) + # Deduplicate by image ID (same ID can have multiple tags) + image_ids = [] + seen = set() + for image in images: + image_id = image["id"] + if image_id in seen: + continue + seen.add(image_id) + image_ids.append((image_id, image["full_tag"])) + + removed = 0 + for image_id, image_tag in image_ids: + success, _, stderr = await self._run_runtime_command(["rmi", image_id]) + if success: + removed += 1 + else: + yield False, f"Could not remove {image_tag}: {stderr.strip()}" yield ( True, - "System reset completed - all containers, volumes, and local images removed", + f"System reset completed - removed {removed} OpenRAG image(s)", ) async def get_service_logs( @@ -1391,7 +1479,6 @@ async def prune_old_images(self) -> AsyncIterator[tuple[bool, str]]: 2. Identifies OpenRAG-related images (openrag-backend, openrag-frontend, langflow, opensearch, dashboards) 3. For each repository, keeps only the latest/currently used image 4. Removes old images - 5. Prunes dangling images Yields: Tuples of (success, message) for progress updates @@ -1402,69 +1489,20 @@ async def prune_old_images(self) -> AsyncIterator[tuple[bool, str]]: yield False, "Scanning for OpenRAG images..." - # Get list of all images - success, stdout, stderr = await self._run_runtime_command( - ["images", "--format", "{{.Repository}}:{{.Tag}}\t{{.ID}}\t{{.CreatedAt}}"] - ) - + success, images, stderr = await self._list_openrag_images(include_created=True) if not success: yield False, f"Failed to list images: {stderr}" return - # Parse images and group by repository - openrag_repos = { - "langflowai/openrag-backend", - "langflowai/openrag-frontend", - "langflowai/openrag-langflow", - "langflowai/openrag-opensearch", - "langflowai/openrag-dashboards", - "langflow/langflow", # Also include base langflow images - "opensearchproject/opensearch", - "opensearchproject/opensearch-dashboards", - } - images_by_repo = {} - for line in stdout.strip().split("\n"): - if not line.strip(): - continue - - parts = line.split("\t") - if len(parts) < 3: - continue - - image_tag, image_id, created_at = parts[0], parts[1], parts[2] - - # Skip tags (dangling images will be handled separately) - if "" in image_tag: - continue - - # Extract repository name (without tag) - if ":" in image_tag: - repo = image_tag.rsplit(":", 1)[0] - else: - repo = image_tag - - # Check if this is an OpenRAG-related image - if any(openrag_repo in repo for openrag_repo in openrag_repos): - if repo not in images_by_repo: - images_by_repo[repo] = [] - images_by_repo[repo].append({ - "full_tag": image_tag, - "id": image_id, - "created": created_at, - }) + for image in images: + repo = image["repo"] + if repo not in images_by_repo: + images_by_repo[repo] = [] + images_by_repo[repo].append(image) if not images_by_repo: yield True, "No OpenRAG images found to prune" - # Still run dangling image cleanup - yield False, "Cleaning up dangling images..." - success, stdout, stderr = await self._run_runtime_command( - ["image", "prune", "-f"] - ) - if success: - yield True, "Dangling images cleaned up" - else: - yield False, f"Failed to prune dangling images: {stderr}" return # Get currently used images (from running/stopped containers) @@ -1522,21 +1560,6 @@ async def prune_old_images(self) -> AsyncIterator[tuple[bool, str]]: else: yield True, "No old images were removed" - # Clean up dangling images (untagged images) - yield False, "Cleaning up dangling images..." - success, stdout, stderr = await self._run_runtime_command( - ["image", "prune", "-f"] - ) - - if success: - # Parse output to see if anything was removed - if stdout.strip(): - yield True, f"Dangling images cleaned: {stdout.strip()}" - else: - yield True, "No dangling images to clean" - else: - yield False, f"Failed to prune dangling images: {stderr}" - yield True, "Image pruning completed" async def prune_all_images(self) -> AsyncIterator[tuple[bool, str]]: @@ -1545,7 +1568,6 @@ async def prune_all_images(self) -> AsyncIterator[tuple[bool, str]]: This is a more aggressive pruning that: 1. Stops all running services 2. Removes ALL OpenRAG-related images (not just old versions) - 3. Prunes dangling images This frees up maximum disk space but requires re-downloading images on next start. @@ -1570,55 +1592,11 @@ async def prune_all_images(self) -> AsyncIterator[tuple[bool, str]]: yield False, "Scanning for OpenRAG images..." - # Get list of all images - success, stdout, stderr = await self._run_runtime_command( - ["images", "--format", "{{.Repository}}:{{.Tag}}\t{{.ID}}"] - ) - + success, images_to_remove, stderr = await self._list_openrag_images() if not success: yield False, f"Failed to list images: {stderr}" return - # Parse images and identify ALL OpenRAG-related images - openrag_repos = { - "langflowai/openrag-backend", - "langflowai/openrag-frontend", - "langflowai/openrag-langflow", - "langflowai/openrag-opensearch", - "langflowai/openrag-dashboards", - "langflow/langflow", - "opensearchproject/opensearch", - "opensearchproject/opensearch-dashboards", - } - - images_to_remove = [] - for line in stdout.strip().split("\n"): - if not line.strip(): - continue - - parts = line.split("\t") - if len(parts) < 2: - continue - - image_tag, image_id = parts[0], parts[1] - - # Skip tags (will be handled by prune) - if "" in image_tag: - continue - - # Extract repository name (without tag) - if ":" in image_tag: - repo = image_tag.rsplit(":", 1)[0] - else: - repo = image_tag - - # Check if this is an OpenRAG-related image - if any(openrag_repo in repo for openrag_repo in openrag_repos): - images_to_remove.append({ - "full_tag": image_tag, - "id": image_id, - }) - if not images_to_remove: yield True, "No OpenRAG images found to remove" else: @@ -1642,19 +1620,4 @@ async def prune_all_images(self) -> AsyncIterator[tuple[bool, str]]: else: yield False, "No images were removed" - # Clean up dangling images - yield False, "Cleaning up dangling images..." - success, stdout, stderr = await self._run_runtime_command( - ["image", "prune", "-f"] - ) - - if success: - if stdout.strip(): - yield True, f"Dangling images cleaned: {stdout.strip()}" - else: - yield True, "No dangling images to clean" - else: - yield False, f"Failed to prune dangling images: {stderr}" - yield True, "All OpenRAG images removed successfully" - diff --git a/src/tui/utils/startup_checks.py b/src/tui/utils/startup_checks.py index 24aeb7da3..a7b3143f9 100644 --- a/src/tui/utils/startup_checks.py +++ b/src/tui/utils/startup_checks.py @@ -13,6 +13,16 @@ from pathlib import Path MIN_PODMAN_MEMORY_MB = 8192 # 8 GB minimum +OPENRAG_IMAGE_REPOS = { + "langflowai/openrag-backend", + "langflowai/openrag-frontend", + "langflowai/openrag-langflow", + "langflowai/openrag-opensearch", + "langflowai/openrag-dashboards", + "langflow/langflow", + "opensearchproject/opensearch", + "opensearchproject/opensearch-dashboards", +} # ============================================================================= @@ -97,6 +107,63 @@ def docker_is_podman() -> bool: return False +def _extract_repository(image_tag: str) -> str: + """Extract repository name from : image reference.""" + return image_tag.rsplit(":", 1)[0] if ":" in image_tag else image_tag + + +def _is_openrag_repository(repository: str) -> bool: + """Check whether repository is OpenRAG-related, with optional registry prefix.""" + repo = repository.lower() + return any(repo == known or repo.endswith(f"/{known}") for known in OPENRAG_IMAGE_REPOS) + + +def remove_openrag_images(runtime: str) -> tuple[int, int]: + """Remove only OpenRAG-related images for the given runtime.""" + result = subprocess.run( + [runtime, "images", "--format", "{{.Repository}}:{{.Tag}}\t{{.ID}}"], + capture_output=True, + text=True, + ) + if result.returncode != 0: + return 0, 0 + + image_ids: list[tuple[str, str]] = [] + seen_ids = set() + for raw_line in result.stdout.splitlines(): + if not raw_line.strip(): + continue + parts = raw_line.split("\t") + if len(parts) < 2: + continue + + image_tag = parts[0].strip() + image_id = parts[1].strip() + if "" in image_tag: + continue + + repository = _extract_repository(image_tag) + if not _is_openrag_repository(repository): + continue + + if image_id in seen_ids: + continue + seen_ids.add(image_id) + image_ids.append((image_id, image_tag)) + + removed = 0 + for image_id, _ in image_ids: + delete_result = subprocess.run( + [runtime, "rmi", "-f", image_id], + capture_output=True, + text=True, + ) + if delete_result.returncode == 0: + removed += 1 + + return removed, len(image_ids) + + # ============================================================================= # Runtime Detection # ============================================================================= @@ -432,7 +499,11 @@ def check_storage_corruption(runtime: str) -> Tuple[bool, Optional[str]]: def fix_storage_corruption(runtime: str, version: str) -> bool: """Reset storage to fix corruption.""" say("Storage corruption detected.") - if not ask_yes_no(f"Reset {runtime} storage? (WARNING: deletes all containers/images)"): + if runtime == "podman": + prompt = f"Reset {runtime} storage? (WARNING: deletes all containers/images)" + else: + prompt = "Remove OpenRAG Docker images? (OpenRAG images only)" + if not ask_yes_no(prompt): return False if runtime == "podman": @@ -460,9 +531,9 @@ def fix_storage_corruption(runtime: str, version: str) -> bool: say("Done.") return True else: - say("Pruning Docker system...") - subprocess.run(["docker", "system", "prune", "-af"], capture_output=True) - say("Done.") + say("Removing OpenRAG Docker images...") + removed, total = remove_openrag_images("docker") + say(f"Removed {removed}/{total} OpenRAG image(s).") return True diff --git a/tests/unit/test_container_manager_cleanup.py b/tests/unit/test_container_manager_cleanup.py new file mode 100644 index 000000000..0ca06097e --- /dev/null +++ b/tests/unit/test_container_manager_cleanup.py @@ -0,0 +1,85 @@ +"""Tests for OpenRAG-only image cleanup behavior in ContainerManager.""" + +from unittest.mock import AsyncMock + +import pytest + +from src.tui.managers.container_manager import ContainerManager + + +async def _collect(async_iterable): + """Collect all items from an async iterator into a list.""" + return [item async for item in async_iterable] + + +def _make_manager() -> ContainerManager: + """Create a minimal ContainerManager instance for unit tests.""" + manager = ContainerManager.__new__(ContainerManager) + return manager + + +@pytest.mark.asyncio +async def test_list_openrag_images_filters_non_openrag_and_dangling(): + manager = _make_manager() + manager._run_runtime_command = AsyncMock( + return_value=( + True, + ( + "langflowai/openrag-backend:latest\timg-openrag-1\n" + "docker.io/langflowai/openrag-frontend:v1\timg-openrag-2\n" + "library/ubuntu:latest\timg-ubuntu\n" + ":\timg-dangling\n" + ), + "", + ) + ) + + success, images, error = await manager._list_openrag_images() + + assert success is True + assert error == "" + assert [img["id"] for img in images] == ["img-openrag-1", "img-openrag-2"] + assert all("openrag" in img["full_tag"] for img in images) + + +@pytest.mark.asyncio +async def test_reset_services_removes_only_openrag_images_without_system_prune(): + manager = _make_manager() + manager._run_compose_command = AsyncMock(return_value=(True, "", "")) + manager._list_openrag_images = AsyncMock( + return_value=( + True, + [ + {"full_tag": "langflowai/openrag-backend:latest", "id": "img1"}, + {"full_tag": "langflowai/openrag-frontend:latest", "id": "img2"}, + ], + "", + ) + ) + manager._run_runtime_command = AsyncMock(return_value=(True, "", "")) + + updates = await _collect(manager.reset_services()) + + assert updates[-1] == ( + True, + "System reset completed - removed 2 OpenRAG image(s)", + ) + runtime_calls = [call.args[0] for call in manager._run_runtime_command.call_args_list] + assert runtime_calls == [["rmi", "img1"], ["rmi", "img2"]] + assert all(call[:2] != ["system", "prune"] for call in runtime_calls) + + +@pytest.mark.asyncio +async def test_reset_services_handles_no_openrag_images(): + manager = _make_manager() + manager._run_compose_command = AsyncMock(return_value=(True, "", "")) + manager._list_openrag_images = AsyncMock(return_value=(True, [], "")) + manager._run_runtime_command = AsyncMock() + + updates = await _collect(manager.reset_services()) + + assert updates[-1] == ( + True, + "System reset completed - OpenRAG containers and volumes removed", + ) + manager._run_runtime_command.assert_not_called() diff --git a/tests/unit/test_startup_checks_cleanup.py b/tests/unit/test_startup_checks_cleanup.py new file mode 100644 index 000000000..327b76d1f --- /dev/null +++ b/tests/unit/test_startup_checks_cleanup.py @@ -0,0 +1,50 @@ +"""Tests for OpenRAG-only image cleanup behavior in startup checks.""" + +from unittest.mock import MagicMock, patch + +from src.tui.utils import startup_checks + + +def _run_result(returncode: int = 0, stdout: str = "", stderr: str = "") -> MagicMock: + """Create a subprocess.run-like result mock.""" + result = MagicMock() + result.returncode = returncode + result.stdout = stdout + result.stderr = stderr + return result + + +def test_remove_openrag_images_filters_to_openrag_repos(): + with patch("src.tui.utils.startup_checks.subprocess.run") as mock_run: + mock_run.side_effect = [ + _run_result( + stdout=( + "langflowai/openrag-backend:latest\timg-openrag-1\n" + "docker.io/langflowai/openrag-frontend:v1\timg-openrag-2\n" + "library/ubuntu:latest\timg-ubuntu\n" + ":\timg-dangling\n" + ) + ), + _run_result(returncode=0), + _run_result(returncode=0), + ] + + removed, total = startup_checks.remove_openrag_images("docker") + + assert (removed, total) == (2, 2) + calls = [call.args[0] for call in mock_run.call_args_list] + assert calls[0] == ["docker", "images", "--format", "{{.Repository}}:{{.Tag}}\t{{.ID}}"] + assert ["docker", "rmi", "-f", "img-openrag-1"] in calls + assert ["docker", "rmi", "-f", "img-openrag-2"] in calls + assert all("img-ubuntu" not in call for call in calls) + + +def test_fix_storage_corruption_docker_avoids_system_prune(): + with patch("src.tui.utils.startup_checks.ask_yes_no", return_value=True), patch( + "src.tui.utils.startup_checks.remove_openrag_images", return_value=(1, 1) + ) as mock_remove, patch("src.tui.utils.startup_checks.subprocess.run") as mock_run: + ok = startup_checks.fix_storage_corruption(runtime="docker", version="26.1.0") + + assert ok is True + mock_remove.assert_called_once_with("docker") + mock_run.assert_not_called() From 07d48308f192920af9972178be0147f4220cfb99 Mon Sep 17 00:00:00 2001 From: Rico Furtado Date: Thu, 5 Mar 2026 13:41:41 -0500 Subject: [PATCH 18/43] fix: Enhance EnvManager to preserve unmanaged .env variables during save --- src/tui/managers/env_manager.py | 66 ++++++++++++++++++++++++++------- tests/unit/test_env_manager.py | 19 ++++++++++ 2 files changed, 72 insertions(+), 13 deletions(-) diff --git a/src/tui/managers/env_manager.py b/src/tui/managers/env_manager.py index b940f2a0e..3d707103e 100644 --- a/src/tui/managers/env_manager.py +++ b/src/tui/managers/env_manager.py @@ -1,6 +1,7 @@ """Environment configuration manager for OpenRAG TUI.""" import os +import re import secrets import string from dataclasses import dataclass, field @@ -155,19 +156,9 @@ def _quote_env_value(self, value: str) -> str: escaped_value = value.replace("'", "'\\''") return f"'{escaped_value}'" - def load_existing_env(self) -> bool: - """Load existing .env file if it exists, or fall back to environment variables. - - Uses python-dotenv's load_dotenv() for standard .env file parsing, which handles: - - Quoted values (single and double quotes) - - Variable expansion (${VAR}) - - Multiline values - - Escaped characters - - Comments - """ - # Map env vars to config attributes - # These are environment variable names, not actual secrets - attr_map = { # pragma: allowlist secret + def _env_attr_map(self) -> Dict[str, str]: + """Map env vars to EnvConfig attribute names.""" + return { # pragma: allowlist secret "OPENAI_API_KEY": "openai_api_key", # pragma: allowlist secret "ANTHROPIC_API_KEY": "anthropic_api_key", # pragma: allowlist secret "OLLAMA_ENDPOINT": "ollama_endpoint", @@ -212,6 +203,48 @@ def load_existing_env(self) -> bool: "LANGFUSE_HOST": "langfuse_host", } + def _collect_preserved_env_lines(self) -> list[str]: + """Collect existing .env assignments not managed by this TUI.""" + if not self.env_file.exists(): + return [] + + managed_vars = set(self._env_attr_map().keys()) + assignment_pattern = re.compile(r"^\s*(?:export\s+)?([A-Za-z_][A-Za-z0-9_]*)\s*=") + preserved_lines: list[str] = [] + + try: + for raw_line in self.env_file.read_text().splitlines(): + stripped = raw_line.strip() + if not stripped or stripped.startswith("#"): + continue + + match = assignment_pattern.match(raw_line) + if not match: + continue + + env_var = match.group(1) + if env_var in managed_vars: + continue + preserved_lines.append(raw_line) + except Exception: + logger.warning(f"Failed to preserve custom .env lines from {self.env_file}") + + return preserved_lines + + def load_existing_env(self) -> bool: + """Load existing .env file if it exists, or fall back to environment variables. + + Uses python-dotenv's load_dotenv() for standard .env file parsing, which handles: + - Quoted values (single and double quotes) + - Variable expansion (${VAR}) + - Multiline values + - Escaped characters + - Comments + """ + # Map env vars to config attributes + # These are environment variable names, not actual secrets + attr_map = self._env_attr_map() + loaded_from_file = False # Load .env file using python-dotenv for standard parsing @@ -371,6 +404,7 @@ def save_env_file(self) -> bool: try: # Ensure secure defaults (including Langflow secret key) are set before saving self.setup_secure_defaults() + preserved_custom_lines = self._collect_preserved_env_lines() # Create timestamped backup if file exists if self.env_file.exists(): timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") @@ -552,6 +586,12 @@ def save_env_file(self) -> bool: if langfuse_written: f.write("\n") + if preserved_custom_lines: + f.write("# Preserved custom settings\n") + for line in preserved_custom_lines: + f.write(f"{line}\n") + f.write("\n") + f.flush() os.fsync(f.fileno()) diff --git a/tests/unit/test_env_manager.py b/tests/unit/test_env_manager.py index 8cc605781..267fb3e93 100644 --- a/tests/unit/test_env_manager.py +++ b/tests/unit/test_env_manager.py @@ -87,6 +87,25 @@ def test_backup_file_has_secure_permissions(self, env_manager, tmp_path): f"expected 0o600, got {oct(_perms(backups[0]))}" ) + def test_preserves_unmanaged_env_variables(self, env_manager, tmp_path): + """Saving config must keep existing .env keys that TUI does not manage.""" + env_file = tmp_path / ".env" + env_file.write_text( + "OPENRAG_BACKEND_HOST='my-host'\n" + "OPENSEARCH_PASSWORD='old-password'\n" + ) + + env_manager.config.opensearch_password = "NewSecurePass!123" + + with patch("tui.utils.version_check.get_current_version", return_value="1.0.0"): + result = env_manager.save_env_file() + + assert result is True + content = env_file.read_text() + assert "OPENRAG_BACKEND_HOST='my-host'" in content + assert content.count("OPENRAG_BACKEND_HOST=") == 1 + assert "OPENSEARCH_PASSWORD='NewSecurePass!123'" in content + # --------------------------------------------------------------------------- # ensure_openrag_version From f29e35d13b89d116e93931ffc9fb7d24d77c4b66 Mon Sep 17 00:00:00 2001 From: Rico Furtado Date: Thu, 5 Mar 2026 13:57:19 -0500 Subject: [PATCH 19/43] fix: Update EnvManager to use class-level regex for environment variable assignment --- src/tui/managers/env_manager.py | 9 ++++++--- tests/unit/test_env_manager.py | 1 + 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/tui/managers/env_manager.py b/src/tui/managers/env_manager.py index 3d707103e..ad467d6bd 100644 --- a/src/tui/managers/env_manager.py +++ b/src/tui/managers/env_manager.py @@ -120,6 +120,7 @@ def __init__(self, env_file: Optional[Path] = None): logger.warning(f"Failed to migrate .env file: {e}") self.config = EnvConfig() + self.assignment_pattern = re.compile(r"^\s*(?:export\s+)?([A-Za-z_][A-Za-z0-9_]*)\s*=") def generate_secure_password(self) -> str: """Generate a secure password for OpenSearch.""" @@ -209,7 +210,6 @@ def _collect_preserved_env_lines(self) -> list[str]: return [] managed_vars = set(self._env_attr_map().keys()) - assignment_pattern = re.compile(r"^\s*(?:export\s+)?([A-Za-z_][A-Za-z0-9_]*)\s*=") preserved_lines: list[str] = [] try: @@ -218,7 +218,7 @@ def _collect_preserved_env_lines(self) -> list[str]: if not stripped or stripped.startswith("#"): continue - match = assignment_pattern.match(raw_line) + match = self.assignment_pattern.match(raw_line) if not match: continue @@ -227,7 +227,10 @@ def _collect_preserved_env_lines(self) -> list[str]: continue preserved_lines.append(raw_line) except Exception: - logger.warning(f"Failed to preserve custom .env lines from {self.env_file}") + logger.warning( + f"Failed to preserve custom .env lines from {self.env_file}", + exc_info=True, + ) return preserved_lines diff --git a/tests/unit/test_env_manager.py b/tests/unit/test_env_manager.py index 267fb3e93..d81d80daf 100644 --- a/tests/unit/test_env_manager.py +++ b/tests/unit/test_env_manager.py @@ -105,6 +105,7 @@ def test_preserves_unmanaged_env_variables(self, env_manager, tmp_path): assert "OPENRAG_BACKEND_HOST='my-host'" in content assert content.count("OPENRAG_BACKEND_HOST=") == 1 assert "OPENSEARCH_PASSWORD='NewSecurePass!123'" in content + assert "OPENSEARCH_PASSWORD='old-password'" not in content # --------------------------------------------------------------------------- From 4b884ffce1d7f7c6506bfe8355677a6af0b40635 Mon Sep 17 00:00:00 2001 From: Rico Furtado Date: Thu, 5 Mar 2026 16:14:58 -0500 Subject: [PATCH 20/43] Update src/tui/utils/startup_checks.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- src/tui/utils/startup_checks.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/tui/utils/startup_checks.py b/src/tui/utils/startup_checks.py index a7b3143f9..d4ecbe1bd 100644 --- a/src/tui/utils/startup_checks.py +++ b/src/tui/utils/startup_checks.py @@ -534,6 +534,17 @@ def fix_storage_corruption(runtime: str, version: str) -> bool: say("Removing OpenRAG Docker images...") removed, total = remove_openrag_images("docker") say(f"Removed {removed}/{total} OpenRAG image(s).") + # Treat outcomes based on how many images were removed vs. found. + if total == 0: + # No OpenRAG images found; nothing to clean up, but this is not an error. + return True + if removed < total: + # We attempted cleanup but some images could not be removed. + say( + "Warning: Some OpenRAG Docker images could not be removed. " + "Please try removing them manually or check your Docker installation." + ) + return False return True From 35f15c65d9085042c450dcdefa3536fa1ffcf54f Mon Sep 17 00:00:00 2001 From: Rico Furtado Date: Thu, 5 Mar 2026 16:16:29 -0500 Subject: [PATCH 21/43] Update tests/unit/test_container_manager_cleanup.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- tests/unit/test_container_manager_cleanup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_container_manager_cleanup.py b/tests/unit/test_container_manager_cleanup.py index 0ca06097e..0a9e157e2 100644 --- a/tests/unit/test_container_manager_cleanup.py +++ b/tests/unit/test_container_manager_cleanup.py @@ -4,7 +4,7 @@ import pytest -from src.tui.managers.container_manager import ContainerManager +from tui.managers.container_manager import ContainerManager async def _collect(async_iterable): From f844a858ae17f0982baa26f4cdd136211868d4b9 Mon Sep 17 00:00:00 2001 From: Rico Furtado Date: Thu, 5 Mar 2026 16:20:19 -0500 Subject: [PATCH 22/43] Update Makefile Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 122a020d6..b54f6164b 100644 --- a/Makefile +++ b/Makefile @@ -449,8 +449,8 @@ stop: ## Stop and remove all OpenRAG containers restart: stop dev ## Restart all containers -remove-openrag-images: ## Remove OpenRAG images only (safe for other projects) - @echo "$(YELLOW)Removing OpenRAG images only...$(NC)" +remove-openrag-images: ## Remove OpenRAG-related images and dependencies (may affect other projects using shared images) + @echo "$(YELLOW)Removing OpenRAG-related images and dependencies...$(NC)" @removed=0; total=0; \ for repo in $(OPENRAG_IMAGE_REPOS); do \ ids=$$($(CONTAINER_RUNTIME) images "$$repo" -q 2>/dev/null | sort -u); \ From bb7947117501e6c9e7a7e479fe56905ab632c85d Mon Sep 17 00:00:00 2001 From: Rico Furtado Date: Thu, 5 Mar 2026 16:23:45 -0500 Subject: [PATCH 23/43] Update tests/unit/test_env_manager.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- tests/unit/test_env_manager.py | 54 ++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/tests/unit/test_env_manager.py b/tests/unit/test_env_manager.py index d81d80daf..49dfcfe08 100644 --- a/tests/unit/test_env_manager.py +++ b/tests/unit/test_env_manager.py @@ -107,7 +107,61 @@ def test_preserves_unmanaged_env_variables(self, env_manager, tmp_path): assert "OPENSEARCH_PASSWORD='NewSecurePass!123'" in content assert "OPENSEARCH_PASSWORD='old-password'" not in content + def test_preserves_unmanaged_multiline_quoted_value( + self, env_manager, tmp_path + ): + """Unmanaged python-dotenv–style multiline quoted values are preserved.""" + env_file = tmp_path / ".env" + multiline_block = ( + 'UNMANAGED_MULTILINE="line1\\n' + "line2\\n" + 'line3"\n' + ) + env_file.write_text( + multiline_block + + 'OPENSEARCH_PASSWORD="old-password"\n' + ) + env_manager.config.opensearch_password = "NewSecurePass!456" + + with patch("tui.utils.version_check.get_current_version", return_value="1.0.0"): + result = env_manager.save_env_file() + + assert result is True + content = env_file.read_text() + # Multiline unmanaged value should be preserved exactly once. + assert multiline_block in content + assert content.count("UNMANAGED_MULTILINE=") == 1 + # Managed password should be updated, not duplicated. + assert 'OPENSEARCH_PASSWORD="NewSecurePass!456"' in content + assert 'OPENSEARCH_PASSWORD="old-password"' not in content + + def test_preserves_unmanaged_continued_line(self, env_manager, tmp_path): + """Unmanaged values using backslash continuation are preserved.""" + env_file = tmp_path / ".env" + continued_block = ( + "UNMANAGED_LONG_VALUE=first part \\\n" + " second part \\\n" + " third part\n" + ) + env_file.write_text( + continued_block + + 'OPENSEARCH_PASSWORD="old-password"\n' + ) + + env_manager.config.opensearch_password = "AnotherNewPass!789" + + with patch("tui.utils.version_check.get_current_version", return_value="1.0.0"): + result = env_manager.save_env_file() + + assert result is True + content = env_file.read_text() + # Continued-line unmanaged value should be preserved exactly once. + assert continued_block in content + assert content.count("UNMANAGED_LONG_VALUE=") == 1 + # Managed password should be updated, not duplicated. + assert 'OPENSEARCH_PASSWORD="AnotherNewPass!789"' in content + assert 'OPENSEARCH_PASSWORD="old-password"' not in content # --------------------------------------------------------------------------- # ensure_openrag_version # --------------------------------------------------------------------------- From cbdb9e935885d7ea9ace06d020d90bbe8430f689 Mon Sep 17 00:00:00 2001 From: matano Date: Mon, 9 Mar 2026 12:30:41 +0200 Subject: [PATCH 24/43] add index_name to SettingsUpdateOptions --- sdks/python/openrag_sdk/models.py | 1 + 1 file changed, 1 insertion(+) diff --git a/sdks/python/openrag_sdk/models.py b/sdks/python/openrag_sdk/models.py index 6496332ac..607de7121 100644 --- a/sdks/python/openrag_sdk/models.py +++ b/sdks/python/openrag_sdk/models.py @@ -178,6 +178,7 @@ class SettingsUpdateOptions(BaseModel): table_structure: bool | None = None ocr: bool | None = None picture_descriptions: bool | None = None + index_name: str | None = None class SettingsUpdateResponse(BaseModel): From a51778bd8ccf510f1b7354db90e3b2b1d225795e Mon Sep 17 00:00:00 2001 From: matano Date: Mon, 9 Mar 2026 14:20:15 +0200 Subject: [PATCH 25/43] feat(sdk): add filename_exists method to SDK and v1 API endpoint - Add FilenameExistsResponse model to SDK - Add filename_exists() async method to DocumentsClient - Add GET /v1/documents/check-filename endpoint with API key auth - Export FilenameExistsResponse in SDK __init__.py This enables SDK users to check if a file exists in the knowledge base before ingestion, avoiding duplicate uploads. --- sdks/python/openrag_sdk/__init__.py | 2 ++ sdks/python/openrag_sdk/documents.py | 22 +++++++++++++++ sdks/python/openrag_sdk/models.py | 6 +++++ src/api/v1/documents.py | 40 ++++++++++++++++++++++++++++ src/main.py | 1 + 5 files changed, 71 insertions(+) diff --git a/sdks/python/openrag_sdk/__init__.py b/sdks/python/openrag_sdk/__init__.py index 3d19d3d10..287067cf7 100644 --- a/sdks/python/openrag_sdk/__init__.py +++ b/sdks/python/openrag_sdk/__init__.py @@ -49,6 +49,7 @@ DeleteDocumentResponse, DeleteKnowledgeFilterResponse, DoneEvent, + FilenameExistsResponse, GetKnowledgeFilterResponse, IngestResponse, KnowledgeFilter, @@ -94,6 +95,7 @@ "SearchFilters", "IngestResponse", "DeleteDocumentResponse", + "FilenameExistsResponse", "Conversation", "ConversationDetail", "ConversationListResponse", diff --git a/sdks/python/openrag_sdk/documents.py b/sdks/python/openrag_sdk/documents.py index 867fbc9c7..9c4f333e6 100644 --- a/sdks/python/openrag_sdk/documents.py +++ b/sdks/python/openrag_sdk/documents.py @@ -144,3 +144,25 @@ async def delete(self, filename: str) -> DeleteDocumentResponse: data = response.json() return DeleteDocumentResponse(**data) + + async def filename_exists(self, filename: str) -> bool: + """ + Check if a filename exists in the knowledge base. + + Args: + filename: Name of the file to check. + + Returns: + True if the file exists, False otherwise. + """ + from .models import FilenameExistsResponse + + response = await self._client._request( + "GET", + "/api/v1/documents/check-filename", + params={"filename": filename}, + ) + + data = response.json() + exists_response = FilenameExistsResponse(**data) + return exists_response.exists diff --git a/sdks/python/openrag_sdk/models.py b/sdks/python/openrag_sdk/models.py index 607de7121..94530f4e2 100644 --- a/sdks/python/openrag_sdk/models.py +++ b/sdks/python/openrag_sdk/models.py @@ -97,6 +97,12 @@ class DeleteDocumentResponse(BaseModel): deleted_chunks: int = 0 +class FilenameExistsResponse(BaseModel): + """Response from checking if a filename exists.""" + + exists: bool + + # Chat history models class Message(BaseModel): """A message in a conversation.""" diff --git a/src/api/v1/documents.py b/src/api/v1/documents.py index 6876979f1..8033bb2b4 100644 --- a/src/api/v1/documents.py +++ b/src/api/v1/documents.py @@ -111,3 +111,43 @@ async def delete_document_endpoint( return JSONResponse({"error": error_msg}, status_code=403) else: return JSONResponse({"error": error_msg}, status_code=500) + + +async def check_filename_exists_endpoint( + filename: str, + session_manager=Depends(get_session_manager), + user: User = Depends(get_api_key_user_async), +): + """Check if a document with a specific filename exists. GET /v1/documents/check-filename""" + from config.settings import get_index_name + from utils.opensearch_queries import build_filename_search_body + + try: + opensearch_client = session_manager.get_user_opensearch_client(user.user_id, None) + search_body = build_filename_search_body(filename, size=1, source=["filename"]) + + logger.debug("Checking filename existence", filename=filename, index_name=get_index_name()) + + try: + response = await opensearch_client.search( + index=get_index_name(), + body=search_body + ) + except Exception as search_err: + if "index_not_found_exception" in str(search_err): + logger.info("Index does not exist, file does not exist") + return JSONResponse({"exists": False, "filename": filename}, status_code=200) + raise + + hits = response.get("hits", {}).get("hits", []) + exists = len(hits) > 0 + + return JSONResponse({"exists": exists, "filename": filename}, status_code=200) + + except Exception as e: + logger.error("Error checking filename existence", filename=filename, error=str(e)) + error_str = str(e) + if "AuthenticationException" in error_str or "access denied" in error_str.lower(): + return JSONResponse({"error": "Access denied: insufficient permissions"}, status_code=403) + else: + return JSONResponse({"error": str(e)}, status_code=500) diff --git a/src/main.py b/src/main.py index 598263f78..0443d6823 100644 --- a/src/main.py +++ b/src/main.py @@ -835,6 +835,7 @@ async def create_app(): app.add_api_route("/v1/documents/ingest", v1_documents.ingest_endpoint, methods=["POST"], tags=["public"]) app.add_api_route("/v1/tasks/{task_id}", v1_documents.task_status_endpoint, methods=["GET"], tags=["public"]) app.add_api_route("/v1/documents", v1_documents.delete_document_endpoint, methods=["DELETE"], tags=["public"]) + app.add_api_route("/v1/documents/check-filename", v1_documents.check_filename_exists_endpoint, methods=["GET"], tags=["public"]) # Settings endpoints app.add_api_route("/v1/settings", v1_settings.get_settings_endpoint, methods=["GET"], tags=["public"]) From 21aecf01289516a7958e6d943727f383c7f6dc39 Mon Sep 17 00:00:00 2001 From: matano Date: Mon, 9 Mar 2026 14:20:53 +0200 Subject: [PATCH 26/43] sdk version to 0.1.5 --- sdks/python/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdks/python/pyproject.toml b/sdks/python/pyproject.toml index 1df13f010..b9c8d819b 100644 --- a/sdks/python/pyproject.toml +++ b/sdks/python/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "openrag-sdk" -version = "0.1.4" +version = "0.1.5" description = "Official Python SDK for OpenRAG API" readme = "README.md" license = "MIT" From 4a1a39af4ec319856cba7d5ce5d583a7f39af12d Mon Sep 17 00:00:00 2001 From: matano Date: Tue, 10 Feb 2026 10:31:39 +0200 Subject: [PATCH 27/43] update openai_api_base when updating the template --- src/services/flows_service.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/services/flows_service.py b/src/services/flows_service.py index 2d147353d..9a0614b69 100644 --- a/src/services/flows_service.py +++ b/src/services/flows_service.py @@ -1398,6 +1398,12 @@ async def _update_component_fields( template["api_base"]["show"] = True template["api_base"]["advanced"] = False updated = True + if provider == "openai" and "openai_api_base" in template: + template["openai_api_base"]["value"] = "OPENAI_API_BASE" + template["openai_api_base"]["load_from_db"] = True + template["openai_api_base"]["show"] = True + template["openai_api_base"]["advanced"] = False + updated = True if provider == "anthropic" and "api_key" in template: template["api_key"]["value"] = "ANTHROPIC_API_KEY" From a1beeb210ce8611a912d781025029cee5a4cef56 Mon Sep 17 00:00:00 2001 From: matano Date: Mon, 23 Feb 2026 10:58:49 +0200 Subject: [PATCH 28/43] fix for redundant /v1 extension --- src/api/provider_validation.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/api/provider_validation.py b/src/api/provider_validation.py index 458190193..c2eb1794c 100644 --- a/src/api/provider_validation.py +++ b/src/api/provider_validation.py @@ -141,6 +141,11 @@ async def validate_provider_setup( if provider == "openai" and not endpoint: endpoint = os.environ.get("OPENAI_API_BASE", "https://api.openai.com") + + # Strip /v1 suffix from OpenAI endpoint if present to avoid double /v1 paths + if provider == "openai" and endpoint and endpoint.endswith("/v1"): + endpoint = endpoint.rstrip("/v1") + logger.info(f"Stripped /v1 suffix from OpenAI endpoint: {endpoint}") if test_completion: # Full validation with completion/embedding tests (consumes credits) From b49315f5dbb91c546f37819306c6439e595ea6b6 Mon Sep 17 00:00:00 2001 From: matano Date: Mon, 9 Mar 2026 14:46:21 +0200 Subject: [PATCH 29/43] update OPEN_API_BASE env variable when the endpoint is loaded from the config --- src/config/settings.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/config/settings.py b/src/config/settings.py index 78cf03a65..e82db0d20 100644 --- a/src/config/settings.py +++ b/src/config/settings.py @@ -415,6 +415,10 @@ def patched_async_client(self): if config.providers.openai.api_key: os.environ["OPENAI_API_KEY"] = config.providers.openai.api_key logger.debug("Loaded OpenAI API key from config") + + if config.providers.openai.endpoint: + os.environ["OPENAI_API_BASE"] = config.providers.openai.endpoint + logger.debug(f"Loaded OpenAI endpoint from config: '{config.providers.openai.endpoint}'") # Set Anthropic credentials if config.providers.anthropic.api_key: From 8fc4ce3c8dbc73508dcdea641a3ebe9d184c157e Mon Sep 17 00:00:00 2001 From: matano Date: Mon, 9 Mar 2026 15:16:02 +0200 Subject: [PATCH 30/43] update api_base setup in get_openai_models --- src/api/models.py | 28 ++++++++++++---------------- src/services/models_service.py | 2 -- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/src/api/models.py b/src/api/models.py index ce6865796..be3a084b4 100644 --- a/src/api/models.py +++ b/src/api/models.py @@ -1,3 +1,4 @@ +import os from typing import Optional from fastapi import Depends @@ -45,23 +46,18 @@ async def get_openai_models( {"error": "OpenAI API key is required either in request body or in configuration"}, status_code=400, ) - - if not api_base: - try: - config = get_openrag_config() - api_base = config.providers.openai.endpoint - logger.info( - f"Retrieved OpenAI API base from config: {'yes' if api_base else 'no'}" - ) - except Exception as e: - logger.error(f"Failed to get config: {e}") - if not api_base: - return JSONResponse( - { - "error": "OpenAI API base is required either in request body or in configuration" - }, - status_code=400, + + api_base = None + try: + config = get_openrag_config() + api_base = config.providers.openai.endpoint + logger.info( + f"Retrieved OpenAI API base from config: {'yes' if api_base else 'no'}" ) + except Exception as e: + logger.error(f"Failed to get config: {e}") + if not api_base: + api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com") models = await models_service.get_openai_models(api_key=api_key, api_base=api_base) return JSONResponse(models) diff --git a/src/services/models_service.py b/src/services/models_service.py index 218275eb2..95271ae0b 100644 --- a/src/services/models_service.py +++ b/src/services/models_service.py @@ -1,5 +1,3 @@ -import os - import httpx from typing import Dict, List from config.model_constants import ( From 60a8639f27a3aede11dd498a04d3844e8d9f7910 Mon Sep 17 00:00:00 2001 From: Edwin Jose Date: Mon, 9 Mar 2026 11:51:42 -0500 Subject: [PATCH 31/43] Update CI runner labels to ephemeral Switch runs-on from an array to a labels mapping and update the self-hosted runner label to "langflow-ai-arm64-40gb-ephemeral". This ensures the workflow targets the ephemeral ARM64 self-hosted runner while leaving existing environment variables unchanged. --- .github/workflows/test-integration.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test-integration.yml b/.github/workflows/test-integration.yml index 3cfbccdce..4dd3485e3 100644 --- a/.github/workflows/test-integration.yml +++ b/.github/workflows/test-integration.yml @@ -20,7 +20,8 @@ on: jobs: tests: - runs-on: [self-hosted, linux, ARM64, langflow-ai-arm64-40gb] + runs-on: + labels: ["self-hosted", "linux", "ARM64", "langflow-ai-arm64-40gb-ephemeral"] env: # Prefer repository/environment variable first, then secret, then a sane fallback OPENSEARCH_PASSWORD: ${{ vars.OPENSEARCH_PASSWORD || secrets.OPENSEARCH_PASSWORD || 'OpenRag#2025!' }} From ba103007a5e715bc23a9ef35c005f58470ccab75 Mon Sep 17 00:00:00 2001 From: Mike Pawlowski Date: Fri, 27 Feb 2026 12:55:48 -0800 Subject: [PATCH 32/43] feat: Re-affirm Python 3.13 minimum version required for OpenRAG app via PyPI classifiers Issues - #1023 Summary Updated PyPI classifiers across core package and SDKs. Package Metadata - Added classifiers block to the root pyproject.toml, including development status, environment, audience, license, Python version, and topic classifiers - Added "Programming Language :: Python :: 3 :: Only" classifier to the MCP SDK and Python SDK pyproject.toml files to explicitly indicate Python 3 exclusivity - Added "Programming Language :: Python :: 3.13" classifier to the MCP SDK to reflect supported version coverage - Added "Topic :: Scientific/Engineering :: Artificial Intelligence" classifier to the MCP SDK and Python SDK - Added "Topic :: Software Development :: Libraries :: Python Modules" classifier to the MCP SDK --- pyproject.toml | 12 ++++++++++++ sdks/mcp/pyproject.toml | 4 ++++ sdks/python/pyproject.toml | 2 ++ 3 files changed, 18 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 422c052bd..dea4dd379 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,6 +8,18 @@ version = "0.2.5" description = "OpenRAG is a comprehensive Retrieval-Augmented Generation platform that enables intelligent document search and AI-powered conversations." readme = "README.md" requires-python = ">=3.13" +classifiers = [ + "Development Status :: 4 - Beta", + "Environment :: Console", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.13", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Software Development :: Libraries :: Python Modules", +] dependencies = [ "agentd>=0.2.2", "aiofiles>=24.1.0", diff --git a/sdks/mcp/pyproject.toml b/sdks/mcp/pyproject.toml index d5c371857..578268ee0 100644 --- a/sdks/mcp/pyproject.toml +++ b/sdks/mcp/pyproject.toml @@ -12,9 +12,13 @@ classifiers = [ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Software Development :: Libraries :: Python Modules", ] dependencies = [ "mcp>=1.0.0", diff --git a/sdks/python/pyproject.toml b/sdks/python/pyproject.toml index 1df13f010..6b9e4a5a6 100644 --- a/sdks/python/pyproject.toml +++ b/sdks/python/pyproject.toml @@ -17,10 +17,12 @@ classifiers = [ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", + "Topic :: Scientific/Engineering :: Artificial Intelligence", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed", ] From 2af139835bd85e33aea487aad2a16ba961a1ec4b Mon Sep 17 00:00:00 2001 From: Edwin Jose Date: Mon, 9 Mar 2026 16:23:10 -0500 Subject: [PATCH 33/43] feat: upgrade langflow version (#682) * Update Dockerfile.langflow * change to 1.7.1 * Make update compatible with new Model Providers structure for updating the components * Update to latest nightly * remove unused tweaks, add step to enable models, add step to set watsonx url * changed flows to be compatible with 1.8 * Bump langflow base image to 1.8.0.dev72 Update Dockerfile.langflow to use langflowai/langflow-nightly:1.8.0.dev72 instead of 1.7.1.dev14 to pick up the latest nightly changes and fixes. No other changes were made. * fixed search service getting filename by keyword * Fixed OpenRAG URL MCP flow to work as a tool for the agent * fixed openrag docs filter not being created * added make dev local build lf to build just langflow * updated langflow version * updated url openrag * updated langflow to latest stable version * added pip install uv on langflow dockerfile * Update Dockerfile.langflow * Remove 'None' defaults from env vars Update docker-compose.yml to stop injecting the literal string 'None' for missing API credentials. OPENAI_API_KEY, ANTHROPIC_API_KEY, WATSONX_API_KEY, WATSONX_PROJECT_ID, and OLLAMA_BASE_URL now expand without a 'None' fallback so they are empty/unset if not provided. WATSONX_URL now falls back to WATSONX_ENDPOINT when unset. This prevents services from receiving the string 'None' and improves handling of absent credentials. * update default of select embedding model to text embedding 3 * updated openrag agent flow * removed None from openrag agent * Update settings to pass correct Ollama URL to langflow and to set global variables * updated to reset when not provider * Implement tasks id * updated flows service to update component before choosing the model --------- Co-authored-by: Lucas Oliveira Co-authored-by: Lucas Oliveira <62335616+lucaseduoli@users.noreply.github.com> --- Dockerfile.langflow | 4 +- Makefile | 31 +- docker-compose.yml | 16 +- flows/ingestion_flow.json | 4190 ++++++++++------- flows/openrag_agent.json | 2347 +++++---- flows/openrag_nudges.json | 2899 +++++++----- flows/openrag_url_mcp.json | 3220 +++++++------ .../api/mutations/useOnboardingMutation.ts | 1 + .../_components/onboarding-card.tsx | 98 +- .../templates/langflow/deployment.yaml | 4 +- kubernetes/helm/openrag/values.yaml | 2 +- src/api/settings.py | 39 +- src/config/settings.py | 1 + src/main.py | 25 +- src/models/processors.py | 16 - src/services/flows_service.py | 273 +- src/services/langflow_file_service.py | 19 +- src/services/search_service.py | 2 +- src/utils/container_utils.py | 9 +- src/utils/langflow_headers.py | 4 +- 20 files changed, 7595 insertions(+), 5605 deletions(-) diff --git a/Dockerfile.langflow b/Dockerfile.langflow index aaa9f5d68..2f8286cd0 100644 --- a/Dockerfile.langflow +++ b/Dockerfile.langflow @@ -1,4 +1,6 @@ -FROM langflowai/langflow-nightly:1.7.0.dev21 +FROM langflowai/langflow:1.8.0 + +RUN pip install uv EXPOSE 7860 diff --git a/Makefile b/Makefile index b54f6164b..ac37c4a9a 100644 --- a/Makefile +++ b/Makefile @@ -74,8 +74,7 @@ endef # PHONY TARGETS ###################### .PHONY: help check_tools help_docker help_dev help_test help_local help_utils \ - dev dev-cpu dev-local dev-local-cpu stop clean build logs \ - remove-openrag-images \ + dev dev-cpu dev-local dev-local-cpu dev-local-build-lf dev-local-build-lf-cpu stop clean build logs \ shell-backend shell-frontend install \ test test-unit test-integration test-ci test-ci-local test-sdk test-os-jwt lint \ backend frontend docling docling-stop install-be install-fe build-be build-fe build-os build-lf logs-be logs-fe logs-lf logs-os \ @@ -179,6 +178,8 @@ help_dev: ## Show development environment commands @echo "$(PURPLE)Infrastructure Only:$(NC)" @echo " $(PURPLE)make dev-local$(NC) - Start infrastructure only (for local backend/frontend)" @echo " $(PURPLE)make dev-local-cpu$(NC) - Start infrastructure for local backend/frontend with CPU only" + @echo " $(PURPLE)make dev-local-build-lf$(NC) - Start infrastructure, building only Langflow image" + @echo " $(PURPLE)make dev-local-build-lf-cpu$(NC) - Same as above, with CPU only" @echo '' @echo "$(PURPLE)Branch Development (build Langflow from source):$(NC)" @echo " $(PURPLE)make dev-branch$(NC) - Build & run with custom Langflow branch" @@ -360,6 +361,32 @@ dev-local-cpu: ## Start infrastructure for local development, with CPU only @echo "" @echo "$(YELLOW)Now run 'make backend' and 'make frontend' in separate terminals$(NC)" +dev-local-build-lf: ## Start infrastructure for local development, building only Langflow image + @echo "$(YELLOW)Building Langflow image...$(NC)" + $(COMPOSE_CMD) -f docker-compose.yml -f docker-compose.gpu.yml build langflow + @echo "$(YELLOW)Starting infrastructure only (for local development)...$(NC)" + $(COMPOSE_CMD) -f docker-compose.yml -f docker-compose.gpu.yml up -d opensearch openrag-backend dashboards langflow + @echo "$(PURPLE)Infrastructure started!$(NC)" + @echo " $(CYAN)Backend:$(NC) http://openrag-backend" + @echo " $(CYAN)Langflow:$(NC) http://localhost:7860" + @echo " $(CYAN)OpenSearch:$(NC) http://localhost:9200" + @echo " $(CYAN)Dashboards:$(NC) http://localhost:5601" + @echo "" + @echo "$(YELLOW)Now run 'make backend' and 'make frontend' in separate terminals$(NC)" + +dev-local-build-lf-cpu: ## Start infrastructure for local development, building only Langflow image with CPU only + @echo "$(YELLOW)Building Langflow image (CPU)...$(NC)" + $(COMPOSE_CMD) build langflow + @echo "$(YELLOW)Starting infrastructure only (for local development)...$(NC)" + $(COMPOSE_CMD) up -d opensearch openrag-backend dashboards langflow + @echo "$(PURPLE)Infrastructure started!$(NC)" + @echo " $(CYAN)Backend:$(NC) http://openrag-backend" + @echo " $(CYAN)Langflow:$(NC) http://localhost:7860" + @echo " $(CYAN)OpenSearch:$(NC) http://localhost:9200" + @echo " $(CYAN)Dashboards:$(NC) http://localhost:5601" + @echo "" + @echo "$(YELLOW)Now run 'make backend' and 'make frontend' in separate terminals$(NC)" + ###################### # BRANCH DEVELOPMENT ###################### diff --git a/docker-compose.yml b/docker-compose.yml index b705ee106..0aef3a172 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -131,12 +131,12 @@ services: - LANGFUSE_PUBLIC_KEY=${LANGFUSE_PUBLIC_KEY:-} - LANGFUSE_HOST=${LANGFUSE_HOST:-} - LANGFLOW_DEACTIVATE_TRACING - - OPENAI_API_KEY=${OPENAI_API_KEY:-None} - - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-None} - - WATSONX_API_KEY=${WATSONX_API_KEY:-None} - - WATSONX_ENDPOINT=${WATSONX_ENDPOINT:-None} - - WATSONX_PROJECT_ID=${WATSONX_PROJECT_ID:-None} - - OLLAMA_BASE_URL=${OLLAMA_ENDPOINT:-None} + - OPENAI_API_KEY=${OPENAI_API_KEY} + - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} + - WATSONX_API_KEY=${WATSONX_API_KEY} + - WATSONX_URL=${WATSONX_URL:-${WATSONX_ENDPOINT}} + - WATSONX_PROJECT_ID=${WATSONX_PROJECT_ID} + - OLLAMA_BASE_URL=${OLLAMA_ENDPOINT} - LANGFLOW_LOAD_FLOWS_PATH=/app/flows - LANGFLOW_SECRET_KEY=${LANGFLOW_SECRET_KEY} - JWT=None @@ -159,8 +159,8 @@ services: - FILENAME=None - MIMETYPE=None - FILESIZE=0 - - SELECTED_EMBEDDING_MODEL=${SELECTED_EMBEDDING_MODEL:-} - - LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OPENSEARCH_URL,DOCLING_SERVE_URL,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,DOCUMENT_ID,SOURCE_URL,ALLOWED_USERS,ALLOWED_GROUPS,FILENAME,MIMETYPE,FILESIZE,SELECTED_EMBEDDING_MODEL,OPENAI_API_KEY,ANTHROPIC_API_KEY,WATSONX_API_KEY,WATSONX_ENDPOINT,WATSONX_PROJECT_ID,OLLAMA_BASE_URL,OPENSEARCH_INDEX_NAME + - SELECTED_EMBEDDING_MODEL=${SELECTED_EMBEDDING_MODEL:-text-embedding-3-small} + - LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OPENSEARCH_URL,DOCLING_SERVE_URL,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,DOCUMENT_ID,SOURCE_URL,ALLOWED_USERS,ALLOWED_GROUPS,FILENAME,MIMETYPE,FILESIZE,SELECTED_EMBEDDING_MODEL,OPENAI_API_KEY,ANTHROPIC_API_KEY,WATSONX_API_KEY,WATSONX_URL,WATSONX_PROJECT_ID,OLLAMA_BASE_URL,OPENSEARCH_INDEX_NAME - LANGFLOW_LOG_LEVEL=DEBUG - LANGFLOW_WORKERS=${LANGFLOW_WORKERS:-1} - LANGFLOW_AUTO_LOGIN=${LANGFLOW_AUTO_LOGIN} diff --git a/flows/ingestion_flow.json b/flows/ingestion_flow.json index c5c2fc375..f389252bd 100644 --- a/flows/ingestion_flow.json +++ b/flows/ingestion_flow.json @@ -1,122 +1,6 @@ { "data": { "edges": [ - { - "animated": false, - "className": "", - "data": { - "sourceHandle": { - "dataType": "SecretInput", - "id": "SecretInput-F34VJ", - "name": "text", - "output_types": [ - "Message" - ] - }, - "targetHandle": { - "fieldName": "dynamic_connector_type", - "id": "AdvancedDynamicFormBuilder-81Exw", - "inputTypes": [ - "Text", - "Message" - ], - "type": "str" - } - }, - "id": "xy-edge__SecretInput-F34VJ{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-F34VJœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-81Exw{œfieldNameœ:œdynamic_connector_typeœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", - "selected": false, - "source": "SecretInput-F34VJ", - "sourceHandle": "{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-F34VJœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", - "target": "AdvancedDynamicFormBuilder-81Exw", - "targetHandle": "{œfieldNameœ:œdynamic_connector_typeœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" - }, - { - "animated": false, - "className": "", - "data": { - "sourceHandle": { - "dataType": "SecretInput", - "id": "SecretInput-b2cab", - "name": "text", - "output_types": [ - "Message" - ] - }, - "targetHandle": { - "fieldName": "dynamic_owner", - "id": "AdvancedDynamicFormBuilder-81Exw", - "inputTypes": [ - "Text", - "Message" - ], - "type": "str" - } - }, - "id": "xy-edge__SecretInput-b2cab{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-b2cabœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-81Exw{œfieldNameœ:œdynamic_ownerœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", - "selected": false, - "source": "SecretInput-b2cab", - "sourceHandle": "{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-b2cabœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", - "target": "AdvancedDynamicFormBuilder-81Exw", - "targetHandle": "{œfieldNameœ:œdynamic_ownerœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" - }, - { - "animated": false, - "className": "", - "data": { - "sourceHandle": { - "dataType": "SecretInput", - "id": "SecretInput-ZVfuS", - "name": "text", - "output_types": [ - "Message" - ] - }, - "targetHandle": { - "fieldName": "dynamic_owner_email", - "id": "AdvancedDynamicFormBuilder-81Exw", - "inputTypes": [ - "Text", - "Message" - ], - "type": "str" - } - }, - "id": "xy-edge__SecretInput-ZVfuS{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-ZVfuSœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-81Exw{œfieldNameœ:œdynamic_owner_emailœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", - "selected": false, - "source": "SecretInput-ZVfuS", - "sourceHandle": "{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-ZVfuSœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", - "target": "AdvancedDynamicFormBuilder-81Exw", - "targetHandle": "{œfieldNameœ:œdynamic_owner_emailœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" - }, - { - "animated": false, - "className": "", - "data": { - "sourceHandle": { - "dataType": "SecretInput", - "id": "SecretInput-Iqtxd", - "name": "text", - "output_types": [ - "Message" - ] - }, - "targetHandle": { - "fieldName": "dynamic_owner_name", - "id": "AdvancedDynamicFormBuilder-81Exw", - "inputTypes": [ - "Text", - "Message" - ], - "type": "str" - } - }, - "id": "xy-edge__SecretInput-Iqtxd{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-Iqtxdœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-81Exw{œfieldNameœ:œdynamic_owner_nameœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", - "selected": false, - "source": "SecretInput-Iqtxd", - "sourceHandle": "{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-Iqtxdœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", - "target": "AdvancedDynamicFormBuilder-81Exw", - "targetHandle": "{œfieldNameœ:œdynamic_owner_nameœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" - }, { "animated": false, "className": "", @@ -288,35 +172,6 @@ "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4", "targetHandle": "{œfieldNameœ:œdocs_metadataœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4œ,œinputTypesœ:[œDataœ],œtypeœ:œtableœ}" }, - { - "animated": false, - "className": "", - "data": { - "sourceHandle": { - "dataType": "SplitText", - "id": "SplitText-QIKhg", - "name": "dataframe", - "output_types": [ - "DataFrame" - ] - }, - "targetHandle": { - "fieldName": "ingest_data", - "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4", - "inputTypes": [ - "Data", - "DataFrame" - ], - "type": "other" - } - }, - "id": "xy-edge__SplitText-QIKhg{œdataTypeœ:œSplitTextœ,œidœ:œSplitText-QIKhgœ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4{œfieldNameœ:œingest_dataœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4œ,œinputTypesœ:[œDataœ,œDataFrameœ],œtypeœ:œotherœ}", - "selected": false, - "source": "SplitText-QIKhg", - "sourceHandle": "{œdataTypeœ:œSplitTextœ,œidœ:œSplitText-QIKhgœ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}", - "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4", - "targetHandle": "{œfieldNameœ:œingest_dataœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4œ,œinputTypesœ:[œDataœ,œDataFrameœ],œtypeœ:œotherœ}" - }, { "animated": false, "className": "", @@ -406,15 +261,44 @@ "className": "", "data": { "sourceHandle": { - "dataType": "SecretInput", - "id": "SecretInput-b1QE5", + "dataType": "SplitText", + "id": "SplitText-QIKhg", + "name": "dataframe", + "output_types": [ + "DataFrame" + ] + }, + "targetHandle": { + "fieldName": "ingest_data", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4", + "inputTypes": [ + "Data", + "DataFrame" + ], + "type": "other" + } + }, + "id": "xy-edge__SplitText-QIKhg{œdataTypeœ:œSplitTextœ,œidœ:œSplitText-QIKhgœ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4{œfieldNameœ:œingest_dataœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4œ,œinputTypesœ:[œDataœ,œDataFrameœ],œtypeœ:œotherœ}", + "selected": false, + "source": "SplitText-QIKhg", + "sourceHandle": "{œdataTypeœ:œSplitTextœ,œidœ:œSplitText-QIKhgœ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}", + "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4", + "targetHandle": "{œfieldNameœ:œingest_dataœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4œ,œinputTypesœ:[œDataœ,œDataFrameœ],œtypeœ:œotherœ}" + }, + { + "animated": false, + "className": "", + "data": { + "sourceHandle": { + "dataType": "TextInput", + "id": "TextInput-CTKlr", "name": "text", "output_types": [ "Message" ] }, "targetHandle": { - "fieldName": "dynamic_document_id", + "fieldName": "dynamic_allowed_groups", "id": "AdvancedDynamicFormBuilder-81Exw", "inputTypes": [ "Text", @@ -423,27 +307,27 @@ "type": "str" } }, - "id": "xy-edge__SecretInput-b1QE5{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-b1QE5œ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-81Exw{œfieldNameœ:œdynamic_document_idœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", + "id": "xy-edge__TextInput-CTKlr{œdataTypeœ:œTextInputœ,œidœ:œTextInput-CTKlrœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-81Exw{œfieldNameœ:œdynamic_allowed_groupsœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", "selected": false, - "source": "SecretInput-b1QE5", - "sourceHandle": "{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-b1QE5œ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "source": "TextInput-CTKlr", + "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-CTKlrœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", "target": "AdvancedDynamicFormBuilder-81Exw", - "targetHandle": "{œfieldNameœ:œdynamic_document_idœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" + "targetHandle": "{œfieldNameœ:œdynamic_allowed_groupsœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" }, { "animated": false, "className": "", "data": { "sourceHandle": { - "dataType": "SecretInput", - "id": "SecretInput-KxF3a", + "dataType": "TextInput", + "id": "TextInput-hlgVv", "name": "text", "output_types": [ "Message" ] }, "targetHandle": { - "fieldName": "dynamic_source_url", + "fieldName": "dynamic_allowed_users", "id": "AdvancedDynamicFormBuilder-81Exw", "inputTypes": [ "Text", @@ -452,26 +336,27 @@ "type": "str" } }, - "id": "xy-edge__SecretInput-KxF3a{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-KxF3aœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-81Exw{œfieldNameœ:œdynamic_source_urlœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", + "id": "xy-edge__TextInput-hlgVv{œdataTypeœ:œTextInputœ,œidœ:œTextInput-hlgVvœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-81Exw{œfieldNameœ:œdynamic_allowed_usersœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", "selected": false, - "source": "SecretInput-KxF3a", - "sourceHandle": "{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-KxF3aœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "source": "TextInput-hlgVv", + "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-hlgVvœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", "target": "AdvancedDynamicFormBuilder-81Exw", - "targetHandle": "{œfieldNameœ:œdynamic_source_urlœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" + "targetHandle": "{œfieldNameœ:œdynamic_allowed_usersœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" }, { "animated": false, + "className": "", "data": { "sourceHandle": { - "dataType": "SecretInput", - "id": "SecretInput-utc4X", + "dataType": "TextInput", + "id": "TextInput-OGCeZ", "name": "text", "output_types": [ "Message" ] }, "targetHandle": { - "fieldName": "dynamic_allowed_users", + "fieldName": "dynamic_connector_type", "id": "AdvancedDynamicFormBuilder-81Exw", "inputTypes": [ "Text", @@ -480,26 +365,27 @@ "type": "str" } }, - "id": "xy-edge__SecretInput-utc4X{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-utc4Xœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-81Exw{œfieldNameœ:œdynamic_allowed_usersœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", + "id": "xy-edge__TextInput-OGCeZ{œdataTypeœ:œTextInputœ,œidœ:œTextInput-OGCeZœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-81Exw{œfieldNameœ:œdynamic_connector_typeœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", "selected": false, - "source": "SecretInput-utc4X", - "sourceHandle": "{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-utc4Xœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "source": "TextInput-OGCeZ", + "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-OGCeZœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", "target": "AdvancedDynamicFormBuilder-81Exw", - "targetHandle": "{œfieldNameœ:œdynamic_allowed_usersœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" + "targetHandle": "{œfieldNameœ:œdynamic_connector_typeœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" }, { "animated": false, + "className": "", "data": { "sourceHandle": { - "dataType": "SecretInput", - "id": "SecretInput-SS8Hy", + "dataType": "TextInput", + "id": "TextInput-PI6at", "name": "text", "output_types": [ "Message" ] }, "targetHandle": { - "fieldName": "dynamic_allowed_groups", + "fieldName": "dynamic_document_id", "id": "AdvancedDynamicFormBuilder-81Exw", "inputTypes": [ "Text", @@ -508,69 +394,186 @@ "type": "str" } }, - "id": "xy-edge__SecretInput-SS8Hy{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-SS8Hyœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-81Exw{œfieldNameœ:œdynamic_allowed_groupsœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", + "id": "xy-edge__TextInput-PI6at{œdataTypeœ:œTextInputœ,œidœ:œTextInput-PI6atœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-81Exw{œfieldNameœ:œdynamic_document_idœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", "selected": false, - "source": "SecretInput-SS8Hy", - "sourceHandle": "{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-SS8Hyœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "source": "TextInput-PI6at", + "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-PI6atœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", "target": "AdvancedDynamicFormBuilder-81Exw", - "targetHandle": "{œfieldNameœ:œdynamic_allowed_groupsœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" - } - ], - "nodes": [ + "targetHandle": "{œfieldNameœ:œdynamic_document_idœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" + }, { + "animated": false, + "className": "", "data": { - "description": "Split text into chunks based on specified criteria.", - "display_name": "Split Text", - "id": "SplitText-QIKhg", - "node": { - "base_classes": [ - "DataFrame" + "sourceHandle": { + "dataType": "TextInput", + "id": "TextInput-gRPNR", + "name": "text", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "dynamic_owner", + "id": "AdvancedDynamicFormBuilder-81Exw", + "inputTypes": [ + "Text", + "Message" ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Split text into chunks based on specified criteria.", - "display_name": "Split Text", - "documentation": "https://docs.langflow.org/components-processing#split-text", - "edited": true, - "field_order": [ - "data_inputs", - "chunk_overlap", - "chunk_size", - "separator", - "text_key", - "keep_separator" + "type": "str" + } + }, + "id": "xy-edge__TextInput-gRPNR{œdataTypeœ:œTextInputœ,œidœ:œTextInput-gRPNRœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-81Exw{œfieldNameœ:œdynamic_ownerœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", + "selected": false, + "source": "TextInput-gRPNR", + "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-gRPNRœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "target": "AdvancedDynamicFormBuilder-81Exw", + "targetHandle": "{œfieldNameœ:œdynamic_ownerœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" + }, + { + "animated": false, + "className": "", + "data": { + "sourceHandle": { + "dataType": "TextInput", + "id": "TextInput-lTHSx", + "name": "text", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "dynamic_owner_email", + "id": "AdvancedDynamicFormBuilder-81Exw", + "inputTypes": [ + "Text", + "Message" ], - "frozen": false, - "icon": "scissors-line-dashed", - "legacy": false, - "lf_version": "1.7.0.dev21", - "metadata": { - "code_hash": "f2867efda61f", - "dependencies": { - "dependencies": [ - { - "name": "langchain_text_splitters", - "version": "0.3.9" - }, - { - "name": "lfx", - "version": null - } - ], - "total_dependencies": 2 - }, - "module": "custom_components.split_text" - }, - "minimized": false, - "output_types": [], - "outputs": [ - { - "allows_loop": false, - "cache": true, + "type": "str" + } + }, + "id": "xy-edge__TextInput-lTHSx{œdataTypeœ:œTextInputœ,œidœ:œTextInput-lTHSxœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-81Exw{œfieldNameœ:œdynamic_owner_emailœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", + "selected": false, + "source": "TextInput-lTHSx", + "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-lTHSxœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "target": "AdvancedDynamicFormBuilder-81Exw", + "targetHandle": "{œfieldNameœ:œdynamic_owner_emailœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" + }, + { + "animated": false, + "className": "", + "data": { + "sourceHandle": { + "dataType": "TextInput", + "id": "TextInput-UZQ8v", + "name": "text", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "dynamic_source_url", + "id": "AdvancedDynamicFormBuilder-81Exw", + "inputTypes": [ + "Text", + "Message" + ], + "type": "str" + } + }, + "id": "xy-edge__TextInput-UZQ8v{œdataTypeœ:œTextInputœ,œidœ:œTextInput-UZQ8vœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-81Exw{œfieldNameœ:œdynamic_source_urlœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", + "selected": false, + "source": "TextInput-UZQ8v", + "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-UZQ8vœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "target": "AdvancedDynamicFormBuilder-81Exw", + "targetHandle": "{œfieldNameœ:œdynamic_source_urlœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" + }, + { + "animated": false, + "className": "", + "data": { + "sourceHandle": { + "dataType": "TextInput", + "id": "TextInput-68n9L", + "name": "text", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "dynamic_owner_name", + "id": "AdvancedDynamicFormBuilder-81Exw", + "inputTypes": [ + "Text", + "Message" + ], + "type": "str" + } + }, + "id": "xy-edge__TextInput-68n9L{œdataTypeœ:œTextInputœ,œidœ:œTextInput-68n9Lœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-81Exw{œfieldNameœ:œdynamic_owner_nameœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", + "selected": false, + "source": "TextInput-68n9L", + "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-68n9Lœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "target": "AdvancedDynamicFormBuilder-81Exw", + "targetHandle": "{œfieldNameœ:œdynamic_owner_nameœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" + } + ], + "nodes": [ + { + "data": { + "description": "Split text into chunks based on specified criteria.", + "display_name": "Split Text", + "id": "SplitText-QIKhg", + "node": { + "base_classes": [ + "DataFrame" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Split text into chunks based on specified criteria.", + "display_name": "Split Text", + "documentation": "https://docs.langflow.org/split-text", + "edited": false, + "field_order": [ + "data_inputs", + "chunk_overlap", + "chunk_size", + "separator", + "text_key", + "keep_separator", + "clean_output" + ], + "frozen": false, + "icon": "scissors-line-dashed", + "legacy": false, + "metadata": { + "code_hash": "29ae597d2d86", + "dependencies": { + "dependencies": [ + { + "name": "langchain_text_splitters", + "version": "0.3.11" + }, + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 2 + }, + "module": "custom_components.split_text" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, "display_name": "Chunks", "group_outputs": false, "hidden": null, + "loop_types": null, "method": "split_text", "name": "dataframe", "options": null, @@ -595,12 +598,14 @@ "list": false, "list_add_label": "Add More", "name": "chunk_overlap", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": 200 }, @@ -613,15 +618,37 @@ "list": false, "list_add_label": "Add More", "name": "chunk_size", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": 1000 }, + "clean_output": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Clean Output", + "dynamic": false, + "info": "When enabled, only the text column is included in the output. Metadata columns are removed.", + "list": false, + "list_add_label": "Add More", + "name": "clean_output", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": false + }, "code": { "advanced": true, "dynamic": true, @@ -638,7 +665,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from langchain_text_splitters import CharacterTextSplitter\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.io import DropdownInput, HandleInput, IntInput, MessageTextInput, Output\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.utils.util import unescape_string\n\n\nclass SplitTextComponent(Component):\n display_name: str = \"Split Text\"\n description: str = \"Split text into chunks based on specified criteria.\"\n documentation: str = \"https://docs.langflow.org/components-processing#split-text\"\n icon = \"scissors-line-dashed\"\n name = \"SplitText\"\n\n inputs = [\n HandleInput(\n name=\"data_inputs\",\n display_name=\"Input\",\n info=\"The data with texts to split in chunks.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n IntInput(\n name=\"chunk_overlap\",\n display_name=\"Chunk Overlap\",\n info=\"Number of characters to overlap between chunks.\",\n value=200,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n info=(\n \"The maximum length of each chunk. Text is first split by separator, \"\n \"then chunks are merged up to this size. \"\n \"Individual splits larger than this won't be further divided.\"\n ),\n value=1000,\n ),\n MessageTextInput(\n name=\"separator\",\n display_name=\"Separator\",\n info=(\n \"The character to split on. Use \\\\n for newline. \"\n \"Examples: \\\\n\\\\n for paragraphs, \\\\n for lines, . for sentences\"\n ),\n value=\"\\n\",\n ),\n MessageTextInput(\n name=\"text_key\",\n display_name=\"Text Key\",\n info=\"The key to use for the text column.\",\n value=\"text\",\n advanced=True,\n ),\n DropdownInput(\n name=\"keep_separator\",\n display_name=\"Keep Separator\",\n info=\"Whether to keep the separator in the output chunks and where to place it.\",\n options=[\"False\", \"True\", \"Start\", \"End\"],\n value=\"False\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Chunks\", name=\"dataframe\", method=\"split_text\"),\n ]\n\n def _docs_to_data(self, docs) -> list[Data]:\n return [Data(text=doc.page_content, data=doc.metadata) for doc in docs]\n\n def _fix_separator(self, separator: str) -> str:\n \"\"\"Fix common separator issues and convert to proper format.\"\"\"\n if separator == \"/n\":\n return \"\\n\"\n if separator == \"/t\":\n return \"\\t\"\n return separator\n\n def split_text_base(self):\n separator = self._fix_separator(self.separator)\n separator = unescape_string(separator)\n\n if isinstance(self.data_inputs, DataFrame):\n if not len(self.data_inputs):\n msg = \"DataFrame is empty\"\n raise TypeError(msg)\n\n self.data_inputs.text_key = self.text_key\n try:\n documents = self.data_inputs.to_lc_documents()\n except Exception as e:\n msg = f\"Error converting DataFrame to documents: {e}\"\n raise TypeError(msg) from e\n elif isinstance(self.data_inputs, Message):\n self.data_inputs = [self.data_inputs.to_data()]\n return self.split_text_base()\n else:\n if not self.data_inputs:\n msg = \"No data inputs provided\"\n raise TypeError(msg)\n\n documents = []\n if isinstance(self.data_inputs, Data):\n self.data_inputs.text_key = self.text_key\n documents = [self.data_inputs.to_lc_document()]\n else:\n try:\n documents = [input_.to_lc_document() for input_ in self.data_inputs if isinstance(input_, Data)]\n if not documents:\n msg = f\"No valid Data inputs found in {type(self.data_inputs)}\"\n raise TypeError(msg)\n except AttributeError as e:\n msg = f\"Invalid input type in collection: {e}\"\n raise TypeError(msg) from e\n try:\n # Convert string 'False'/'True' to boolean\n keep_sep = self.keep_separator\n if isinstance(keep_sep, str):\n if keep_sep.lower() == \"false\":\n keep_sep = False\n elif keep_sep.lower() == \"true\":\n keep_sep = True\n # 'start' and 'end' are kept as strings\n\n splitter = CharacterTextSplitter(\n chunk_overlap=self.chunk_overlap,\n chunk_size=self.chunk_size,\n separator=separator,\n keep_separator=keep_sep,\n )\n return splitter.split_documents(documents)\n except Exception as e:\n msg = f\"Error splitting text: {e}\"\n raise TypeError(msg) from e\n\n def split_text(self) -> DataFrame:\n return DataFrame(self._docs_to_data(self.split_text_base()))\n" + "value": "from langchain_text_splitters import CharacterTextSplitter\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MessageTextInput, Output\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.utils.util import unescape_string\n\n\nclass SplitTextComponent(Component):\n display_name: str = \"Split Text\"\n description: str = \"Split text into chunks based on specified criteria.\"\n documentation: str = \"https://docs.langflow.org/split-text\"\n icon = \"scissors-line-dashed\"\n name = \"SplitText\"\n\n inputs = [\n HandleInput(\n name=\"data_inputs\",\n display_name=\"Input\",\n info=\"The data with texts to split in chunks.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n IntInput(\n name=\"chunk_overlap\",\n display_name=\"Chunk Overlap\",\n info=\"Number of characters to overlap between chunks.\",\n value=200,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n info=(\n \"The maximum length of each chunk. Text is first split by separator, \"\n \"then chunks are merged up to this size. \"\n \"Individual splits larger than this won't be further divided.\"\n ),\n value=1000,\n ),\n MessageTextInput(\n name=\"separator\",\n display_name=\"Separator\",\n info=(\n \"The character to split on. Use \\\\n for newline. \"\n \"Examples: \\\\n\\\\n for paragraphs, \\\\n for lines, . for sentences\"\n ),\n value=\"\\n\",\n ),\n MessageTextInput(\n name=\"text_key\",\n display_name=\"Text Key\",\n info=\"The key to use for the text column.\",\n value=\"text\",\n advanced=True,\n ),\n DropdownInput(\n name=\"keep_separator\",\n display_name=\"Keep Separator\",\n info=\"Whether to keep the separator in the output chunks and where to place it.\",\n options=[\"False\", \"True\", \"Start\", \"End\"],\n value=\"False\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_output\",\n display_name=\"Clean Output\",\n info=\"When enabled, only the text column is included in the output. Metadata columns are removed.\",\n value=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Chunks\", name=\"dataframe\", method=\"split_text\"),\n ]\n\n def _docs_to_data(self, docs, *, clean: bool = False) -> list[Data]:\n return [\n Data(text=doc.page_content) if clean else Data(text=doc.page_content, data=doc.metadata) for doc in docs\n ]\n\n def _fix_separator(self, separator: str) -> str:\n \"\"\"Fix common separator issues and convert to proper format.\"\"\"\n if separator == \"/n\":\n return \"\\n\"\n if separator == \"/t\":\n return \"\\t\"\n return separator\n\n def split_text_base(self):\n separator = self._fix_separator(self.separator)\n separator = unescape_string(separator)\n\n if isinstance(self.data_inputs, DataFrame):\n if not len(self.data_inputs):\n msg = \"DataFrame is empty\"\n raise TypeError(msg)\n\n self.data_inputs.text_key = self.text_key\n try:\n documents = self.data_inputs.to_lc_documents()\n except Exception as e:\n msg = f\"Error converting DataFrame to documents: {e}\"\n raise TypeError(msg) from e\n elif isinstance(self.data_inputs, Message):\n self.data_inputs = [self.data_inputs.to_data()]\n return self.split_text_base()\n else:\n if not self.data_inputs:\n msg = \"No data inputs provided\"\n raise TypeError(msg)\n\n documents = []\n if isinstance(self.data_inputs, Data):\n self.data_inputs.text_key = self.text_key\n documents = [self.data_inputs.to_lc_document()]\n else:\n try:\n documents = [input_.to_lc_document() for input_ in self.data_inputs if isinstance(input_, Data)]\n if not documents:\n msg = f\"No valid Data inputs found in {type(self.data_inputs)}\"\n raise TypeError(msg)\n except AttributeError as e:\n msg = f\"Invalid input type in collection: {e}\"\n raise TypeError(msg) from e\n try:\n # Convert string 'False'/'True' to boolean\n keep_sep = self.keep_separator\n if isinstance(keep_sep, str):\n if keep_sep.lower() == \"false\":\n keep_sep = False\n elif keep_sep.lower() == \"true\":\n keep_sep = True\n # 'start' and 'end' are kept as strings\n\n splitter = CharacterTextSplitter(\n chunk_overlap=self.chunk_overlap,\n chunk_size=self.chunk_size,\n separator=separator,\n keep_separator=keep_sep,\n )\n return splitter.split_documents(documents)\n except Exception as e:\n msg = f\"Error splitting text: {e}\"\n raise TypeError(msg) from e\n\n def split_text(self) -> DataFrame:\n docs = self.split_text_base()\n df = DataFrame(self._docs_to_data(docs, clean=self.clean_output))\n return df if self.clean_output else df.smart_column_order()\n" }, "data_inputs": { "_input_type": "HandleInput", @@ -654,11 +681,13 @@ "list": false, "list_add_label": "Add More", "name": "data_inputs", + "override_skip": false, "placeholder": "", "required": true, "show": true, "title_case": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "other", "value": "" }, @@ -679,6 +708,7 @@ "End" ], "options_metadata": [], + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -686,6 +716,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "False" }, @@ -702,6 +733,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "separator", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -709,6 +741,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "\n" }, @@ -725,6 +758,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "text_key", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -732,6 +766,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "text" } @@ -750,8 +785,8 @@ "width": 320 }, "position": { - "x": 1354.9418688156868, - "y": 1768.0815618919698 + "x": 1353.7989679128482, + "y": 1785.418981315661 }, "positionAbsolute": { "x": 1683.4543896546102, @@ -782,7 +817,7 @@ ], "frozen": false, "icon": "braces", - "last_updated": "2026-02-11T08:29:36.987Z", + "last_updated": "2026-02-27T19:50:35.544Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": {}, @@ -832,7 +867,7 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "4998653d-b7f9-4104-a939-1bacf3693ad3" + "value": "bbfbd352-e931-4894-afe8-6552a3f0cc2c" }, "_type": "Component", "code": { @@ -872,6 +907,7 @@ "multiline": true, "name": "dynamic_allowed_groups", "override_skip": false, + "password": false, "placeholder": "", "real_time_refresh": null, "refresh_button": null, @@ -905,6 +941,7 @@ "multiline": true, "name": "dynamic_allowed_users", "override_skip": false, + "password": false, "placeholder": "", "real_time_refresh": null, "refresh_button": null, @@ -938,6 +975,7 @@ "multiline": true, "name": "dynamic_connector_type", "override_skip": false, + "password": false, "placeholder": "", "real_time_refresh": null, "refresh_button": null, @@ -971,6 +1009,7 @@ "multiline": true, "name": "dynamic_document_id", "override_skip": false, + "password": false, "placeholder": "", "real_time_refresh": null, "refresh_button": null, @@ -1004,6 +1043,7 @@ "multiline": true, "name": "dynamic_owner", "override_skip": false, + "password": false, "placeholder": "", "real_time_refresh": null, "refresh_button": null, @@ -1037,6 +1077,7 @@ "multiline": true, "name": "dynamic_owner_email", "override_skip": false, + "password": false, "placeholder": "", "real_time_refresh": null, "refresh_button": null, @@ -1070,6 +1111,7 @@ "multiline": true, "name": "dynamic_owner_name", "override_skip": false, + "password": false, "placeholder": "", "real_time_refresh": null, "refresh_button": null, @@ -1103,6 +1145,7 @@ "multiline": true, "name": "dynamic_source_url", "override_skip": false, + "password": false, "placeholder": "", "real_time_refresh": null, "refresh_button": null, @@ -1233,61 +1276,98 @@ "tool_mode": false }, "selected_output": "form_data", - "showNode": false, + "showNode": true, "type": "AdvancedDynamicFormBuilder" }, "dragging": false, "id": "AdvancedDynamicFormBuilder-81Exw", "measured": { - "height": 48, - "width": 192 + "height": 881, + "width": 320 }, "position": { - "x": 1448.4009363327248, - "y": 1398.6976207235286 + "x": 1331.3793686192853, + "y": 868.8499669098999 }, "selected": false, "type": "genericNode" }, { "data": { - "id": "SecretInput-F34VJ", + "description": "Uses Docling to process input documents connecting to your instance of Docling Serve.", + "display_name": "Docling Serve", + "id": "DoclingRemote-Dp3PX", "node": { "base_classes": [ - "Message" + "DataFrame" ], "beta": false, "conditional_paths": [], "custom_fields": {}, - "description": "Allows the selection of a secret to be generated as output..", - "display_name": "Secret Input", - "documentation": "https://docs.langflow.org/components-io#text-input", - "edited": true, + "description": "Uses Docling to process input documents connecting to your instance of Docling Serve.", + "display_name": "Docling Serve", + "documentation": "https://docling-project.github.io/docling/", + "edited": false, "field_order": [ - "input_value" + "path", + "file_path", + "separator", + "silent_errors", + "delete_server_file_after_processing", + "ignore_unsupported_extensions", + "ignore_unspecified_files", + "api_url", + "max_concurrency", + "max_poll_timeout", + "api_headers", + "docling_serve_opts" ], "frozen": false, - "icon": "type", + "icon": "Docling", "legacy": false, - "lf_version": "1.7.0.dev21", - "metadata": {}, + "metadata": { + "code_hash": "409d771a961e", + "dependencies": { + "dependencies": [ + { + "name": "httpx", + "version": "0.28.1" + }, + { + "name": "docling_core", + "version": "2.60.1" + }, + { + "name": "pydantic", + "version": "2.11.10" + }, + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 4 + }, + "module": "custom_components.docling_serve" + }, "minimized": false, "output_types": [], "outputs": [ { "allows_loop": false, "cache": true, - "display_name": "Output Text", + "display_name": "Files", "group_outputs": false, "hidden": null, - "method": "text_response", - "name": "text", + "loop_types": null, + "method": "load_files", + "name": "dataframe", "options": null, "required_inputs": null, - "selected": "Message", + "selected": "DataFrame", "tool_mode": true, "types": [ - "Message" + "DataFrame" ], "value": "__UNDEFINED__" } @@ -1295,6 +1375,51 @@ "pinned": false, "template": { "_type": "Component", + "api_headers": { + "_input_type": "NestedDictInput", + "advanced": true, + "display_name": "HTTP headers", + "dynamic": false, + "info": "Optional dictionary of additional headers required for connecting to Docling Serve.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "name": "api_headers", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "NestedDict", + "value": {} + }, + "api_url": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "Server address", + "dynamic": false, + "info": "URL of the Docling Serve instance.", + "list": false, + "list_add_label": "Add More", + "load_from_db": true, + "name": "api_url", + "override_skip": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "DOCLING_SERVE_URL" + }, "code": { "advanced": true, "dynamic": true, @@ -1311,493 +1436,17 @@ "show": true, "title_case": false, "type": "code", - "value": "from langflow.base.io.text import TextComponent\r\nfrom langflow.io import MultilineInput, Output, SecretStrInput\r\nfrom langflow.schema.message import Message\r\n\r\n\r\nclass SecretInputComponent(TextComponent):\r\n display_name = \"Secret Input\"\r\n description = \"Allows the selection of a secret to be generated as output..\"\r\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\r\n icon = \"type\"\r\n name = \"SecretInput\"\r\n\r\n inputs = [\r\n SecretStrInput(\r\n name=\"input_value\",\r\n display_name=\"Secret\",\r\n info=\"Secret to be passed as input.\",\r\n ),\r\n ]\r\n outputs = [\r\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\r\n ]\r\n\r\n def text_response(self) -> Message:\r\n return Message(\r\n text=self.input_value,\r\n )\r\n" + "value": "import base64\nimport time\nfrom concurrent.futures import Future, ThreadPoolExecutor\nfrom pathlib import Path\nfrom typing import Any\n\nimport httpx\nfrom docling_core.types.doc import DoclingDocument\nfrom pydantic import ValidationError\n\nfrom lfx.base.data import BaseFileComponent\nfrom lfx.inputs import IntInput, NestedDictInput, StrInput\nfrom lfx.inputs.inputs import FloatInput\nfrom lfx.schema import Data\nfrom lfx.utils.util import transform_localhost_url\n\n\nclass DoclingRemoteComponent(BaseFileComponent):\n display_name = \"Docling Serve\"\n description = \"Uses Docling to process input documents connecting to your instance of Docling Serve.\"\n documentation = \"https://docling-project.github.io/docling/\"\n trace_type = \"tool\"\n icon = \"Docling\"\n name = \"DoclingRemote\"\n\n MAX_500_RETRIES = 5\n\n # https://docling-project.github.io/docling/usage/supported_formats/\n VALID_EXTENSIONS = [\n \"adoc\",\n \"asciidoc\",\n \"asc\",\n \"bmp\",\n \"csv\",\n \"dotx\",\n \"dotm\",\n \"docm\",\n \"docx\",\n \"htm\",\n \"html\",\n \"jpeg\",\n \"jpg\",\n \"json\",\n \"md\",\n \"pdf\",\n \"png\",\n \"potx\",\n \"ppsx\",\n \"pptm\",\n \"potm\",\n \"ppsm\",\n \"pptx\",\n \"tiff\",\n \"txt\",\n \"xls\",\n \"xlsx\",\n \"xhtml\",\n \"xml\",\n \"webp\",\n ]\n\n inputs = [\n *BaseFileComponent.get_base_inputs(),\n StrInput(\n name=\"api_url\",\n display_name=\"Server address\",\n info=\"URL of the Docling Serve instance.\",\n required=True,\n ),\n IntInput(\n name=\"max_concurrency\",\n display_name=\"Concurrency\",\n info=\"Maximum number of concurrent requests for the server.\",\n advanced=True,\n value=2,\n input_types=[\"Message\"],\n ),\n FloatInput(\n name=\"max_poll_timeout\",\n display_name=\"Maximum poll time\",\n info=\"Maximum waiting time for the document conversion to complete.\",\n advanced=True,\n value=3600,\n input_types=[\"Message\"],\n ),\n NestedDictInput(\n name=\"api_headers\",\n display_name=\"HTTP headers\",\n advanced=True,\n required=False,\n info=(\"Optional dictionary of additional headers required for connecting to Docling Serve.\"),\n input_types=[\"Message\"],\n ),\n NestedDictInput(\n name=\"docling_serve_opts\",\n display_name=\"Docling options\",\n advanced=True,\n required=False,\n info=(\n \"Optional dictionary of additional options. \"\n \"See https://github.com/docling-project/docling-serve/blob/main/docs/usage.md for more information.\"\n ),\n input_types=[\"Message\"],\n ),\n ]\n\n outputs = [\n *BaseFileComponent.get_base_outputs(),\n ]\n\n def process_files(self, file_list: list[BaseFileComponent.BaseFile]) -> list[BaseFileComponent.BaseFile]:\n # Transform localhost URLs to container-accessible hosts when running in a container\n transformed_url = transform_localhost_url(self.api_url)\n base_url = f\"{transformed_url}/v1\"\n\n def _convert_document(client: httpx.Client, file_path: Path, options: dict[str, Any]) -> Data | None:\n encoded_doc = base64.b64encode(file_path.read_bytes()).decode()\n payload = {\n \"options\": options,\n \"sources\": [{\"kind\": \"file\", \"base64_string\": encoded_doc, \"filename\": file_path.name}],\n }\n\n response = client.post(f\"{base_url}/convert/source/async\", json=payload)\n response.raise_for_status()\n task = response.json()\n\n http_failures = 0\n retry_status_start = 500\n retry_status_end = 600\n start_wait_time = time.monotonic()\n while task[\"task_status\"] not in (\"success\", \"failure\"):\n # Check if processing exceeds the maximum poll timeout\n processing_time = time.monotonic() - start_wait_time\n if processing_time >= self.max_poll_timeout:\n msg = (\n f\"Processing time {processing_time=} exceeds the maximum poll timeout {self.max_poll_timeout=}.\"\n \"Please increase the max_poll_timeout parameter or review why the processing \"\n \"takes long on the server.\"\n )\n self.log(msg)\n raise RuntimeError(msg)\n\n # Call for a new status update\n time.sleep(2)\n response = client.get(f\"{base_url}/status/poll/{task['task_id']}\")\n\n # Check if the status call gets into 5xx errors and retry\n if retry_status_start <= response.status_code < retry_status_end:\n http_failures += 1\n if http_failures > self.MAX_500_RETRIES:\n self.log(f\"The status requests got a http response {response.status_code} too many times.\")\n return None\n continue\n\n # Update task status\n task = response.json()\n\n result_resp = client.get(f\"{base_url}/result/{task['task_id']}\")\n result_resp.raise_for_status()\n result = result_resp.json()\n\n if \"json_content\" not in result[\"document\"] or result[\"document\"][\"json_content\"] is None:\n self.log(\"No JSON DoclingDocument found in the result.\")\n return None\n\n try:\n doc = DoclingDocument.model_validate(result[\"document\"][\"json_content\"])\n return Data(data={\"doc\": doc, \"file_path\": str(file_path)})\n except ValidationError as e:\n self.log(f\"Error validating the document. {e}\")\n return None\n\n docling_options = {\n \"to_formats\": [\"json\"],\n \"image_export_mode\": \"placeholder\",\n **(self.docling_serve_opts or {}),\n }\n\n processed_data: list[Data | None] = []\n with (\n httpx.Client(headers=self.api_headers) as client,\n ThreadPoolExecutor(max_workers=self.max_concurrency) as executor,\n ):\n futures: list[tuple[int, Future]] = []\n for i, file in enumerate(file_list):\n if file.path is None:\n processed_data.append(None)\n continue\n\n futures.append((i, executor.submit(_convert_document, client, file.path, docling_options)))\n\n for _index, future in futures:\n try:\n result_data = future.result()\n processed_data.append(result_data)\n except (httpx.HTTPStatusError, httpx.RequestError, KeyError, ValueError) as exc:\n self.log(f\"Docling remote processing failed: {exc}\")\n raise\n\n return self.rollup_data(file_list, processed_data)\n" }, - "input_value": { - "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "Secret", + "delete_server_file_after_processing": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Delete Server File After Processing", "dynamic": false, - "info": "Secret to be passed as input.", - "input_types": [], - "load_from_db": true, - "name": "input_value", - "password": true, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "CONNECTOR_TYPE" - } - }, - "tool_mode": false - }, - "showNode": false, - "type": "SecretInput" - }, - "dragging": false, - "id": "SecretInput-F34VJ", - "measured": { - "height": 48, - "width": 192 - }, - "position": { - "x": 1086.7441155608867, - "y": 1181.5114079907114 - }, - "selected": false, - "type": "genericNode" - }, - { - "data": { - "id": "SecretInput-b2cab", - "node": { - "base_classes": [ - "Message" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Allows the selection of a secret to be generated as output..", - "display_name": "Secret Input", - "documentation": "https://docs.langflow.org/components-io#text-input", - "edited": true, - "field_order": [ - "input_value" - ], - "frozen": false, - "icon": "type", - "legacy": false, - "lf_version": "1.7.0.dev21", - "metadata": {}, - "minimized": false, - "output_types": [], - "outputs": [ - { - "allows_loop": false, - "cache": true, - "display_name": "Output Text", - "group_outputs": false, - "hidden": null, - "method": "text_response", - "name": "text", - "options": null, - "required_inputs": null, - "selected": "Message", - "tool_mode": true, - "types": [ - "Message" - ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "code", - "value": "from langflow.base.io.text import TextComponent\r\nfrom langflow.io import MultilineInput, Output, SecretStrInput\r\nfrom langflow.schema.message import Message\r\n\r\n\r\nclass SecretInputComponent(TextComponent):\r\n display_name = \"Secret Input\"\r\n description = \"Allows the selection of a secret to be generated as output..\"\r\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\r\n icon = \"type\"\r\n name = \"SecretInput\"\r\n\r\n inputs = [\r\n SecretStrInput(\r\n name=\"input_value\",\r\n display_name=\"Secret\",\r\n info=\"Secret to be passed as input.\",\r\n ),\r\n ]\r\n outputs = [\r\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\r\n ]\r\n\r\n def text_response(self) -> Message:\r\n return Message(\r\n text=self.input_value,\r\n )\r\n" - }, - "input_value": { - "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "Secret", - "dynamic": false, - "info": "Secret to be passed as input.", - "input_types": [], - "load_from_db": true, - "name": "input_value", - "password": true, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "OWNER" - } - }, - "tool_mode": false - }, - "showNode": false, - "type": "SecretInput" - }, - "dragging": false, - "id": "SecretInput-b2cab", - "measured": { - "height": 48, - "width": 192 - }, - "position": { - "x": 1084.8963360259204, - "y": 1464.903053731546 - }, - "selected": false, - "type": "genericNode" - }, - { - "data": { - "id": "SecretInput-ZVfuS", - "node": { - "base_classes": [ - "Message" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Allows the selection of a secret to be generated as output..", - "display_name": "Secret Input", - "documentation": "https://docs.langflow.org/components-io#text-input", - "edited": true, - "field_order": [ - "input_value" - ], - "frozen": false, - "icon": "type", - "legacy": false, - "lf_version": "1.7.0.dev21", - "metadata": {}, - "minimized": false, - "output_types": [], - "outputs": [ - { - "allows_loop": false, - "cache": true, - "display_name": "Output Text", - "group_outputs": false, - "hidden": null, - "method": "text_response", - "name": "text", - "options": null, - "required_inputs": null, - "selected": "Message", - "tool_mode": true, - "types": [ - "Message" - ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "code", - "value": "from langflow.base.io.text import TextComponent\r\nfrom langflow.io import MultilineInput, Output, SecretStrInput\r\nfrom langflow.schema.message import Message\r\n\r\n\r\nclass SecretInputComponent(TextComponent):\r\n display_name = \"Secret Input\"\r\n description = \"Allows the selection of a secret to be generated as output..\"\r\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\r\n icon = \"type\"\r\n name = \"SecretInput\"\r\n\r\n inputs = [\r\n SecretStrInput(\r\n name=\"input_value\",\r\n display_name=\"Secret\",\r\n info=\"Secret to be passed as input.\",\r\n ),\r\n ]\r\n outputs = [\r\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\r\n ]\r\n\r\n def text_response(self) -> Message:\r\n return Message(\r\n text=self.input_value,\r\n )\r\n" - }, - "input_value": { - "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "Secret", - "dynamic": false, - "info": "Secret to be passed as input.", - "input_types": [], - "load_from_db": true, - "name": "input_value", - "password": true, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "OWNER_EMAIL" - } - }, - "tool_mode": false - }, - "showNode": false, - "type": "SecretInput" - }, - "dragging": false, - "id": "SecretInput-ZVfuS", - "measured": { - "height": 48, - "width": 192 - }, - "position": { - "x": 1080.009806633616, - "y": 1539.0114651846889 - }, - "selected": false, - "type": "genericNode" - }, - { - "data": { - "id": "SecretInput-Iqtxd", - "node": { - "base_classes": [ - "Message" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Allows the selection of a secret to be generated as output..", - "display_name": "Secret Input", - "documentation": "https://docs.langflow.org/components-io#text-input", - "edited": true, - "field_order": [ - "input_value" - ], - "frozen": false, - "icon": "type", - "legacy": false, - "lf_version": "1.7.0.dev21", - "metadata": {}, - "minimized": false, - "output_types": [], - "outputs": [ - { - "allows_loop": false, - "cache": true, - "display_name": "Output Text", - "group_outputs": false, - "hidden": null, - "method": "text_response", - "name": "text", - "options": null, - "required_inputs": null, - "selected": "Message", - "tool_mode": true, - "types": [ - "Message" - ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "code", - "value": "from langflow.base.io.text import TextComponent\r\nfrom langflow.io import MultilineInput, Output, SecretStrInput\r\nfrom langflow.schema.message import Message\r\n\r\n\r\nclass SecretInputComponent(TextComponent):\r\n display_name = \"Secret Input\"\r\n description = \"Allows the selection of a secret to be generated as output..\"\r\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\r\n icon = \"type\"\r\n name = \"SecretInput\"\r\n\r\n inputs = [\r\n SecretStrInput(\r\n name=\"input_value\",\r\n display_name=\"Secret\",\r\n info=\"Secret to be passed as input.\",\r\n ),\r\n ]\r\n outputs = [\r\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\r\n ]\r\n\r\n def text_response(self) -> Message:\r\n return Message(\r\n text=self.input_value,\r\n )\r\n" - }, - "input_value": { - "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "Secret", - "dynamic": false, - "info": "Secret to be passed as input.", - "input_types": [], - "load_from_db": true, - "name": "input_value", - "password": true, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "OWNER_NAME" - } - }, - "tool_mode": false - }, - "showNode": false, - "type": "SecretInput" - }, - "dragging": false, - "id": "SecretInput-Iqtxd", - "measured": { - "height": 48, - "width": 192 - }, - "position": { - "x": 1080.0778610229986, - "y": 1695.3088801456563 - }, - "selected": false, - "type": "genericNode" - }, - { - "data": { - "id": "DoclingRemote-Dp3PX", - "node": { - "base_classes": [ - "DataFrame" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Uses Docling to process input documents connecting to your instance of Docling Serve.", - "display_name": "Docling Serve", - "documentation": "https://docling-project.github.io/docling/", - "edited": true, - "field_order": [ - "path", - "file_path", - "separator", - "silent_errors", - "delete_server_file_after_processing", - "ignore_unsupported_extensions", - "ignore_unspecified_files", - "api_url", - "max_concurrency", - "max_poll_timeout", - "api_headers", - "docling_serve_opts" - ], - "frozen": false, - "icon": "Docling", - "legacy": false, - "metadata": { - "code_hash": "5723576d00e5", - "dependencies": { - "dependencies": [ - { - "name": "httpx", - "version": "0.28.1" - }, - { - "name": "docling_core", - "version": "2.49.0" - }, - { - "name": "pydantic", - "version": "2.11.10" - }, - { - "name": "lfx", - "version": "0.2.0.dev21" - } - ], - "total_dependencies": 4 - }, - "module": "custom_components.docling_serve" - }, - "minimized": false, - "output_types": [], - "outputs": [ - { - "allows_loop": false, - "cache": true, - "display_name": "Files", - "group_outputs": false, - "hidden": null, - "loop_types": null, - "method": "load_files", - "name": "dataframe", - "options": null, - "required_inputs": null, - "selected": "DataFrame", - "tool_mode": true, - "types": [ - "DataFrame" - ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "api_headers": { - "_input_type": "NestedDictInput", - "advanced": true, - "display_name": "HTTP headers", - "dynamic": false, - "info": "Optional dictionary of additional headers required for connecting to Docling Serve.", - "list": false, - "list_add_label": "Add More", - "name": "api_headers", - "override_skip": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "NestedDict", - "value": {} - }, - "api_url": { - "_input_type": "StrInput", - "advanced": false, - "display_name": "Server address", - "dynamic": false, - "info": "URL of the Docling Serve instance.", + "info": "If true, the File Path will be deleted after processing, if it is hosted on the Server.", "list": false, "list_add_label": "Add More", - "load_from_db": true, - "name": "api_url", - "override_skip": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "DOCLING_SERVE_URL" - }, - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "code", - "value": "import base64\nimport time\nfrom concurrent.futures import Future, ThreadPoolExecutor\nfrom pathlib import Path\nfrom typing import Any\n\nimport httpx\nfrom docling_core.types.doc import DoclingDocument\nfrom pydantic import ValidationError\n\nfrom lfx.base.data import BaseFileComponent\nfrom lfx.inputs import IntInput, NestedDictInput, StrInput\nfrom lfx.inputs.inputs import FloatInput\nfrom lfx.schema import Data\nfrom lfx.utils.util import transform_localhost_url\n\n\nclass DoclingRemoteComponent(BaseFileComponent):\n display_name = \"Docling Serve\"\n description = \"Uses Docling to process input documents connecting to your instance of Docling Serve.\"\n documentation = \"https://docling-project.github.io/docling/\"\n trace_type = \"tool\"\n icon = \"Docling\"\n name = \"DoclingRemote\"\n\n MAX_500_RETRIES = 5\n\n # https://docling-project.github.io/docling/usage/supported_formats/\n VALID_EXTENSIONS = [\n \"adoc\",\n \"asciidoc\",\n \"asc\",\n \"bmp\",\n \"csv\",\n \"dotx\",\n \"dotm\",\n \"docm\",\n \"docx\",\n \"htm\",\n \"html\",\n \"jpeg\",\n \"jpg\",\n \"json\",\n \"md\",\n \"pdf\",\n \"png\",\n \"potx\",\n \"ppsx\",\n \"pptm\",\n \"potm\",\n \"ppsm\",\n \"pptx\",\n \"tiff\",\n \"txt\",\n \"xls\",\n \"xlsx\",\n \"xhtml\",\n \"xml\",\n \"webp\",\n ]\n\n inputs = [\n *BaseFileComponent.get_base_inputs(),\n StrInput(\n name=\"api_url\",\n display_name=\"Server address\",\n info=\"URL of the Docling Serve instance.\",\n required=True,\n ),\n IntInput(\n name=\"max_concurrency\",\n display_name=\"Concurrency\",\n info=\"Maximum number of concurrent requests for the server.\",\n advanced=True,\n value=2,\n ),\n FloatInput(\n name=\"max_poll_timeout\",\n display_name=\"Maximum poll time\",\n info=\"Maximum waiting time for the document conversion to complete.\",\n advanced=True,\n value=3600,\n ),\n NestedDictInput(\n name=\"api_headers\",\n display_name=\"HTTP headers\",\n advanced=True,\n required=False,\n info=(\"Optional dictionary of additional headers required for connecting to Docling Serve.\"),\n ),\n NestedDictInput(\n name=\"docling_serve_opts\",\n display_name=\"Docling options\",\n advanced=True,\n required=False,\n info=(\n \"Optional dictionary of additional options. \"\n \"See https://github.com/docling-project/docling-serve/blob/main/docs/usage.md for more information.\"\n ),\n ),\n ]\n\n outputs = [\n *BaseFileComponent.get_base_outputs(),\n ]\n\n def process_files(self, file_list: list[BaseFileComponent.BaseFile]) -> list[BaseFileComponent.BaseFile]:\n # Transform localhost URLs to container-accessible hosts when running in a container\n transformed_url = transform_localhost_url(self.api_url)\n base_url = f\"{transformed_url}/v1\"\n\n def _convert_document(client: httpx.Client, file_path: Path, options: dict[str, Any]) -> Data | None:\n encoded_doc = base64.b64encode(file_path.read_bytes()).decode()\n payload = {\n \"options\": options,\n \"sources\": [{\"kind\": \"file\", \"base64_string\": encoded_doc, \"filename\": file_path.name}],\n }\n\n response = client.post(f\"{base_url}/convert/source/async\", json=payload)\n response.raise_for_status()\n task = response.json()\n\n http_failures = 0\n retry_status_start = 500\n retry_status_end = 600\n start_wait_time = time.monotonic()\n while task[\"task_status\"] not in (\"success\", \"failure\"):\n # Check if processing exceeds the maximum poll timeout\n processing_time = time.monotonic() - start_wait_time\n if processing_time >= self.max_poll_timeout:\n msg = (\n f\"Processing time {processing_time=} exceeds the maximum poll timeout {self.max_poll_timeout=}.\"\n \"Please increase the max_poll_timeout parameter or review why the processing \"\n \"takes long on the server.\"\n )\n self.log(msg)\n raise RuntimeError(msg)\n\n # Call for a new status update\n time.sleep(2)\n response = client.get(f\"{base_url}/status/poll/{task['task_id']}\")\n\n # Check if the status call gets into 5xx errors and retry\n if retry_status_start <= response.status_code < retry_status_end:\n http_failures += 1\n if http_failures > self.MAX_500_RETRIES:\n self.log(f\"The status requests got a http response {response.status_code} too many times.\")\n return None\n continue\n\n # Update task status\n task = response.json()\n\n result_resp = client.get(f\"{base_url}/result/{task['task_id']}\")\n result_resp.raise_for_status()\n result = result_resp.json()\n\n if \"json_content\" not in result[\"document\"] or result[\"document\"][\"json_content\"] is None:\n self.log(\"No JSON DoclingDocument found in the result.\")\n return None\n\n try:\n doc = DoclingDocument.model_validate(result[\"document\"][\"json_content\"])\n return Data(data={\"doc\": doc, \"file_path\": str(file_path)})\n except ValidationError as e:\n self.log(f\"Error validating the document. {e}\")\n return None\n\n docling_options = {\n \"to_formats\": [\"json\"],\n \"image_export_mode\": \"placeholder\",\n **(self.docling_serve_opts or {}),\n }\n\n processed_data: list[Data | None] = []\n with (\n httpx.Client(headers=self.api_headers) as client,\n ThreadPoolExecutor(max_workers=self.max_concurrency) as executor,\n ):\n futures: list[tuple[int, Future]] = []\n for i, file in enumerate(file_list):\n if file.path is None:\n processed_data.append(None)\n continue\n\n futures.append((i, executor.submit(_convert_document, client, file.path, docling_options)))\n\n for _index, future in futures:\n try:\n result_data = future.result()\n processed_data.append(result_data)\n except (httpx.HTTPStatusError, httpx.RequestError, KeyError, ValueError) as exc:\n self.log(f\"Docling remote processing failed: {exc}\")\n raise\n\n return self.rollup_data(file_list, processed_data)\n" - }, - "delete_server_file_after_processing": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Delete Server File After Processing", - "dynamic": false, - "info": "If true, the Server File Path will be deleted after processing.", - "list": false, - "list_add_label": "Add More", "name": "delete_server_file_after_processing", "override_skip": false, "placeholder": "", @@ -1816,6 +1465,9 @@ "display_name": "Docling options", "dynamic": false, "info": "Optional dictionary of additional options. See https://github.com/docling-project/docling-serve/blob/main/docs/usage.md for more information.", + "input_types": [ + "Message" + ], "list": false, "list_add_label": "Add More", "load_from_db": false, @@ -1835,7 +1487,7 @@ "do_picture_classification": false, "do_picture_description": false, "do_table_structure": true, - "ocr_engine": "easyocr", + "ocr_engine": "ocrmac", "picture_description_local": { "prompt": "Describe this image in a few sentences.", "repo_id": "HuggingFaceTB/SmolVLM-256M-Instruct" @@ -1845,9 +1497,9 @@ "file_path": { "_input_type": "HandleInput", "advanced": true, - "display_name": "Server File Path", + "display_name": "File Path", "dynamic": false, - "info": "Data object with a 'file_path' property pointing to server file or a Message object with a path to the file. Supercedes 'Path' but supports same file types.", + "info": "Data object (or list of Data objects) with a 'file_path' property pointing to a file or a Message object with a path to the file or list of paths to the file. The file can be hosted on the server or on Langflow's file system. Supercedes 'Path' but supports same file types.", "input_types": [ "Data", "Message" @@ -1911,6 +1563,9 @@ "display_name": "Concurrency", "dynamic": false, "info": "Maximum number of concurrent requests for the server.", + "input_types": [ + "Message" + ], "list": false, "list_add_label": "Add More", "name": "max_concurrency", @@ -1931,6 +1586,9 @@ "display_name": "Maximum poll time", "dynamic": false, "info": "Maximum waiting time for the document conversion to complete.", + "input_types": [ + "Message" + ], "list": false, "list_add_label": "Add More", "name": "max_poll_timeout", @@ -2054,18 +1712,20 @@ "dragging": false, "id": "DoclingRemote-Dp3PX", "measured": { - "height": 48, + "height": 52, "width": 192 }, "position": { - "x": -24.21919265198389, - "y": 1766.4695572112516 + "x": -18.22506037537059, + "y": 1767.7398128168159 }, "selected": false, "type": "genericNode" }, { "data": { + "description": "Export DoclingDocument to markdown, html or other formats.", + "display_name": "Export DoclingDocument", "id": "ExportDoclingDocument-zZdRg", "node": { "base_classes": [ @@ -2089,25 +1749,23 @@ ], "frozen": false, "icon": "Docling", - "last_updated": "2025-10-04T01:42:10.290Z", "legacy": false, - "lf_version": "1.7.0.dev21", "metadata": { - "code_hash": "4de16ddd37ac", + "code_hash": "32577a7e396b", "dependencies": { "dependencies": [ { "name": "docling_core", - "version": "2.48.4" + "version": "2.60.1" }, { "name": "lfx", - "version": "0.1.12.dev31" + "version": null } ], "total_dependencies": 2 }, - "module": "lfx.components.docling.export_docling_document.ExportDoclingDocumentComponent" + "module": "custom_components.export_doclingdocument" }, "minimized": false, "output_types": [], @@ -2117,6 +1775,7 @@ "cache": true, "display_name": "Exported data", "group_outputs": false, + "loop_types": null, "method": "export_document", "name": "data", "options": null, @@ -2133,6 +1792,7 @@ "cache": true, "display_name": "DataFrame", "group_outputs": false, + "loop_types": null, "method": "as_dataframe", "name": "dataframe", "options": null, @@ -2164,7 +1824,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nfrom docling_core.types.doc import ImageRefMode\n\nfrom lfx.base.data.docling_utils import extract_docling_documents\nfrom lfx.custom import Component\nfrom lfx.io import DropdownInput, HandleInput, MessageTextInput, Output, StrInput\nfrom lfx.schema import Data, DataFrame\n\n\nclass ExportDoclingDocumentComponent(Component):\n display_name: str = \"Export DoclingDocument\"\n description: str = \"Export DoclingDocument to markdown, html or other formats.\"\n documentation = \"https://docling-project.github.io/docling/\"\n icon = \"Docling\"\n name = \"ExportDoclingDocument\"\n\n inputs = [\n HandleInput(\n name=\"data_inputs\",\n display_name=\"Data or DataFrame\",\n info=\"The data with documents to export.\",\n input_types=[\"Data\", \"DataFrame\"],\n required=True,\n ),\n DropdownInput(\n name=\"export_format\",\n display_name=\"Export format\",\n options=[\"Markdown\", \"HTML\", \"Plaintext\", \"DocTags\"],\n info=\"Select the export format to convert the input.\",\n value=\"Markdown\",\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"image_mode\",\n display_name=\"Image export mode\",\n options=[\"placeholder\", \"embedded\"],\n info=(\n \"Specify how images are exported in the output. Placeholder will replace the images with a string, \"\n \"whereas Embedded will include them as base64 encoded images.\"\n ),\n value=\"placeholder\",\n ),\n StrInput(\n name=\"md_image_placeholder\",\n display_name=\"Image placeholder\",\n info=\"Specify the image placeholder for markdown exports.\",\n value=\"\",\n advanced=True,\n ),\n StrInput(\n name=\"md_page_break_placeholder\",\n display_name=\"Page break placeholder\",\n info=\"Add this placeholder betweek pages in the markdown output.\",\n value=\"\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"doc_key\",\n display_name=\"Doc Key\",\n info=\"The key to use for the DoclingDocument column.\",\n value=\"doc\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Exported data\", name=\"data\", method=\"export_document\"),\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"as_dataframe\"),\n ]\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name == \"export_format\" and field_value == \"Markdown\":\n build_config[\"md_image_placeholder\"][\"show\"] = True\n build_config[\"md_page_break_placeholder\"][\"show\"] = True\n build_config[\"image_mode\"][\"show\"] = True\n elif field_name == \"export_format\" and field_value == \"HTML\":\n build_config[\"md_image_placeholder\"][\"show\"] = False\n build_config[\"md_page_break_placeholder\"][\"show\"] = False\n build_config[\"image_mode\"][\"show\"] = True\n elif field_name == \"export_format\" and field_value in {\"Plaintext\", \"DocTags\"}:\n build_config[\"md_image_placeholder\"][\"show\"] = False\n build_config[\"md_page_break_placeholder\"][\"show\"] = False\n build_config[\"image_mode\"][\"show\"] = False\n\n return build_config\n\n def export_document(self) -> list[Data]:\n documents = extract_docling_documents(self.data_inputs, self.doc_key)\n\n results: list[Data] = []\n try:\n image_mode = ImageRefMode(self.image_mode)\n for doc in documents:\n content = \"\"\n if self.export_format == \"Markdown\":\n content = doc.export_to_markdown(\n image_mode=image_mode,\n image_placeholder=self.md_image_placeholder,\n page_break_placeholder=self.md_page_break_placeholder,\n )\n elif self.export_format == \"HTML\":\n content = doc.export_to_html(image_mode=image_mode)\n elif self.export_format == \"Plaintext\":\n content = doc.export_to_text()\n elif self.export_format == \"DocTags\":\n content = doc.export_to_doctags()\n\n results.append(Data(text=content))\n except Exception as e:\n msg = f\"Error splitting text: {e}\"\n raise TypeError(msg) from e\n\n return results\n\n def as_dataframe(self) -> DataFrame:\n return DataFrame(self.export_document())\n" + "value": "from typing import Any\n\nfrom docling_core.types.doc import ImageRefMode\n\nfrom lfx.base.data.docling_utils import extract_docling_documents\nfrom lfx.custom import Component\nfrom lfx.io import DropdownInput, HandleInput, MessageTextInput, Output, StrInput\nfrom lfx.schema import Data, DataFrame\n\n\nclass ExportDoclingDocumentComponent(Component):\n display_name: str = \"Export DoclingDocument\"\n description: str = \"Export DoclingDocument to markdown, html or other formats.\"\n documentation = \"https://docling-project.github.io/docling/\"\n icon = \"Docling\"\n name = \"ExportDoclingDocument\"\n\n inputs = [\n HandleInput(\n name=\"data_inputs\",\n display_name=\"Data or DataFrame\",\n info=\"The data with documents to export.\",\n input_types=[\"Data\", \"DataFrame\"],\n required=True,\n ),\n DropdownInput(\n name=\"export_format\",\n display_name=\"Export format\",\n options=[\"Markdown\", \"HTML\", \"Plaintext\", \"DocTags\"],\n info=\"Select the export format to convert the input.\",\n value=\"Markdown\",\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"image_mode\",\n display_name=\"Image export mode\",\n options=[\"placeholder\", \"embedded\"],\n info=(\n \"Specify how images are exported in the output. Placeholder will replace the images with a string, \"\n \"whereas Embedded will include them as base64 encoded images.\"\n ),\n value=\"placeholder\",\n ),\n StrInput(\n name=\"md_image_placeholder\",\n display_name=\"Image placeholder\",\n info=\"Specify the image placeholder for markdown exports.\",\n value=\"\",\n advanced=True,\n ),\n StrInput(\n name=\"md_page_break_placeholder\",\n display_name=\"Page break placeholder\",\n info=\"Add this placeholder betweek pages in the markdown output.\",\n value=\"\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"doc_key\",\n display_name=\"Doc Key\",\n info=\"The key to use for the DoclingDocument column.\",\n value=\"doc\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Exported data\", name=\"data\", method=\"export_document\"),\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"as_dataframe\"),\n ]\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name == \"export_format\" and field_value == \"Markdown\":\n build_config[\"md_image_placeholder\"][\"show\"] = True\n build_config[\"md_page_break_placeholder\"][\"show\"] = True\n build_config[\"image_mode\"][\"show\"] = True\n elif field_name == \"export_format\" and field_value == \"HTML\":\n build_config[\"md_image_placeholder\"][\"show\"] = False\n build_config[\"md_page_break_placeholder\"][\"show\"] = False\n build_config[\"image_mode\"][\"show\"] = True\n elif field_name == \"export_format\" and field_value in {\"Plaintext\", \"DocTags\"}:\n build_config[\"md_image_placeholder\"][\"show\"] = False\n build_config[\"md_page_break_placeholder\"][\"show\"] = False\n build_config[\"image_mode\"][\"show\"] = False\n\n return build_config\n\n def export_document(self) -> list[Data]:\n documents, warning = extract_docling_documents(self.data_inputs, self.doc_key)\n if warning:\n self.status = warning\n\n results: list[Data] = []\n try:\n image_mode = ImageRefMode(self.image_mode)\n for doc in documents:\n content = \"\"\n if self.export_format == \"Markdown\":\n content = doc.export_to_markdown(\n image_mode=image_mode,\n image_placeholder=self.md_image_placeholder,\n page_break_placeholder=self.md_page_break_placeholder,\n )\n elif self.export_format == \"HTML\":\n content = doc.export_to_html(image_mode=image_mode)\n elif self.export_format == \"Plaintext\":\n content = doc.export_to_text()\n elif self.export_format == \"DocTags\":\n content = doc.export_to_doctags()\n\n results.append(Data(text=content))\n except Exception as e:\n msg = f\"Error splitting text: {e}\"\n raise TypeError(msg) from e\n\n return results\n\n def as_dataframe(self) -> DataFrame:\n return DataFrame(self.export_document())\n" }, "data_inputs": { "_input_type": "HandleInput", @@ -2179,11 +1839,13 @@ "list": false, "list_add_label": "Add More", "name": "data_inputs", + "override_skip": false, "placeholder": "", "required": true, "show": true, "title_case": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "other", "value": "" }, @@ -2200,6 +1862,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "doc_key", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -2207,6 +1870,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "doc" }, @@ -2227,6 +1891,7 @@ "DocTags" ], "options_metadata": [], + "override_skip": false, "placeholder": "", "real_time_refresh": true, "required": false, @@ -2235,6 +1900,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "Markdown" }, @@ -2253,6 +1919,7 @@ "embedded" ], "options_metadata": [], + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -2260,6 +1927,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "placeholder" }, @@ -2273,12 +1941,14 @@ "list_add_label": "Add More", "load_from_db": false, "name": "md_image_placeholder", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -2292,12 +1962,14 @@ "list_add_label": "Add More", "load_from_db": false, "name": "md_page_break_placeholder", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" } @@ -2311,12 +1983,12 @@ "dragging": false, "id": "ExportDoclingDocument-zZdRg", "measured": { - "height": 48, + "height": 52, "width": 192 }, "position": { - "x": 258.9758629312913, - "y": 1768.6446880643248 + "x": 240.80357530808504, + "y": 1768.6446880643246 }, "selected": false, "type": "genericNode" @@ -2762,7 +2434,7 @@ "dragging": false, "id": "DataFrameOperations-1BWXB", "measured": { - "height": 48, + "height": 52, "width": 192 }, "position": { @@ -3213,7 +2885,7 @@ "dragging": false, "id": "DataFrameOperations-N80fC", "measured": { - "height": 48, + "height": 52, "width": 192 }, "position": { @@ -3664,7 +3336,7 @@ "dragging": false, "id": "DataFrameOperations-9vMrp", "measured": { - "height": 48, + "height": 52, "width": 192 }, "position": { @@ -3703,10 +3375,13 @@ }, { "data": { + "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search. To search use the tools search_documents and raw_search. Search documents takes a query for vector search, for example\n {search_query: \"components in openrag\"}", + "display_name": "OpenSearch (Multi-Model Multi-Embedding)", "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4", "node": { "base_classes": [ - "Data" + "Data", + "VectorStore" ], "beta": false, "conditional_paths": [], @@ -3714,7 +3389,7 @@ "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search. To search use the tools search_documents and raw_search. Search documents takes a query for vector search, for example\n {search_query: \"components in openrag\"}", "display_name": "OpenSearch (Multi-Model Multi-Embedding)", "documentation": "", - "edited": true, + "edited": false, "field_order": [ "docs_metadata", "opensearch_url", @@ -3739,14 +3414,16 @@ "jwt_header", "bearer_prefix", "use_ssl", - "verify_certs" + "verify_certs", + "request_timeout", + "max_retries" ], "frozen": false, "icon": "OpenSearch", - "last_updated": "2026-02-11T08:33:20.805Z", + "last_updated": "2026-02-27T18:36:17.049Z", "legacy": false, "metadata": { - "code_hash": "0a29b8fb1205", + "code_hash": "6a3df45b55c5", "dependencies": { "dependencies": [ { @@ -3755,7 +3432,7 @@ }, { "name": "lfx", - "version": "0.2.0.dev21" + "version": null }, { "name": "tenacity", @@ -3804,6 +3481,24 @@ "Data" ], "value": "__UNDEFINED__" + }, + { + "allows_loop": false, + "cache": true, + "display_name": "Vector Store Connection", + "group_outputs": false, + "hidden": false, + "loop_types": null, + "method": "as_vector_store", + "name": "vectorstoreconnection", + "options": null, + "required_inputs": null, + "selected": "VectorStore", + "tool_mode": true, + "types": [ + "VectorStore" + ], + "value": "__UNDEFINED__" } ], "pinned": false, @@ -3812,28 +3507,200 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "4998653d-b7f9-4104-a939-1bacf3693ad3" + "value": "2bef1fdd-4d60-4bb6-8fd2-c0a3eae09d1e" + }, + "_type": "Component", + "auth_mode": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Authentication Mode", + "dynamic": false, + "external_options": {}, + "info": "Authentication method: 'basic' for username/password authentication, or 'jwt' for JSON Web Token (Bearer) authentication.", + "load_from_db": false, + "name": "auth_mode", + "options": [ + "basic", + "jwt" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "jwt" + }, + "bearer_prefix": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Prefix 'Bearer '", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "bearer_prefix", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import (\n BoolInput,\n DropdownInput,\n HandleInput,\n IntInput,\n MultilineInput,\n Output,\n SecretStrInput,\n StrInput,\n TableInput,\n)\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\nREQUEST_TIMEOUT = 60\nMAX_RETRIES = 5\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search. \"\n \"To search use the tools search_documents and raw_search. \"\n \"Search documents takes a query for vector search, for example\\n\"\n ' {search_query: \"components in openrag\"}'\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n \"request_timeout\",\n \"max_retries\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"nmslib\", \"faiss\", \"lucene\", \"jvector\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'nmslib' works with standard \"\n \"OpenSearch. 'jvector' requires OpenSearch 2.9+. 'lucene' requires index.knn: true. \"\n \"Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n # ----- Timeout / Retry -----\n StrInput(\n name=\"request_timeout\",\n display_name=\"Request Timeout (seconds)\",\n value=\"60\",\n advanced=True,\n info=(\n \"Time in seconds to wait for a response from OpenSearch. \"\n \"Increase for large bulk ingestion or complex hybrid queries.\"\n ),\n ),\n StrInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n value=\"3\",\n advanced=True,\n info=\"Number of retries for failed connections before raising an error.\",\n ),\n ]\n outputs = [\n Output(\n display_name=\"Search Results\",\n name=\"search_results\",\n method=\"search_documents\",\n ),\n Output(display_name=\"Raw Search\", name=\"raw_search\", method=\"raw_search\"),\n ]\n\n def raw_search(self, query: str | dict | None = None) -> Data:\n \"\"\"Execute a raw OpenSearch query against the target index.\n\n Args:\n query (dict[str, Any]): The OpenSearch query DSL dictionary.\n\n Returns:\n Data: Search results as a Data object.\n\n Raises:\n ValueError: If 'query' is not a valid OpenSearch query (must be a non-empty dict).\n \"\"\"\n raw_query = query if query is not None else self.search_query\n\n if raw_query is None or (isinstance(raw_query, str) and not raw_query.strip()):\n self.log(\"No query provided for raw search - returning empty results\")\n return Data(data={})\n\n if isinstance(raw_query, dict):\n query_body = raw_query\n elif isinstance(raw_query, str):\n s = raw_query.strip()\n\n # First, optimistically try to parse as JSON DSL\n try:\n query_body = json.loads(s)\n except json.JSONDecodeError:\n # Fallback: treat as a basic text query over common fields\n query_body = {\n \"query\": {\n \"multi_match\": {\n \"query\": s,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n }\n }\n }\n else:\n msg = f\"Unsupported raw_search query type: {type(raw_query)!r}\"\n raise TypeError(msg)\n\n client = self.build_client()\n logger.info(f\"query: {query_body}\")\n resp = client.search(\n index=self.index_name,\n body=query_body,\n params={\"terminate_after\": 0},\n )\n # Remove any _source keys whose value is a list of floats (embedding vectors)\n # Minimum length threshold to identify embedding vectors\n min_vector_length = 100\n\n def is_vector(val):\n # Accepts if it's a list of numbers (float or int) and has reasonable vector length\n return (\n isinstance(val, list) and len(val) > min_vector_length and all(isinstance(x, (float, int)) for x in val)\n )\n\n if \"hits\" in resp and \"hits\" in resp[\"hits\"]:\n for hit in resp[\"hits\"][\"hits\"]:\n source = hit.get(\"_source\")\n if isinstance(source, dict):\n keys_to_remove = [k for k, v in source.items() if is_vector(v)]\n for k in keys_to_remove:\n source.pop(k)\n logger.info(f\"Raw search response (all embedding vectors removed): {resp}\")\n return Data(**resp)\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Note: Some OpenSearch versions/configurations have issues with dynamically adding\n knn_vector mappings (NullPointerException). This method checks if the field\n already exists before attempting to add it, and gracefully skips if the field\n is already properly configured.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n # First, check if the field already exists and is properly mapped\n properties = self._get_index_properties(client)\n if self._is_knn_vector_field(properties, field_name):\n # Field already exists as knn_vector - verify dimensions match\n existing_dim = self._get_field_dimension(properties, field_name)\n if existing_dim is not None and existing_dim != dim:\n logger.warning(\n f\"Field '{field_name}' exists with dimension {existing_dim}, \"\n f\"but current embedding has dimension {dim}. Using existing mapping.\"\n )\n else:\n logger.info(\n f\"[OpenSearchMultimodel] Field '{field_name}' already exists\"\n f\"as knn_vector with matching dimensions - skipping mapping update\"\n )\n return\n\n # Field doesn't exist, try to add the mapping\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except RequestError as e:\n error_str = str(e).lower()\n if \"invalid engine\" in error_str and \"jvector\" in error_str:\n msg = (\n \"The 'jvector' engine is not available in your OpenSearch installation. \"\n \"Use 'nmslib' or 'faiss' for standard OpenSearch, or upgrade to OpenSearch 2.9+.\"\n )\n raise ValueError(msg) from e\n if \"index.knn\" in error_str:\n msg = (\n \"The index has index.knn: false. Delete the existing index and let the \"\n \"component recreate it, or create a new index with a different name.\"\n )\n raise ValueError(msg) from e\n raise\n except Exception as e:\n # Check if this is the known OpenSearch k-NN NullPointerException issue\n error_str = str(e).lower()\n if \"null\" in error_str or \"nullpointerexception\" in error_str:\n logger.warning(\n f\"[OpenSearchMultimodel] Could not add embedding field mapping for {field_name}\"\n f\"due to OpenSearch k-NN plugin issue: {e}. \"\n f\"This is a known issue with some OpenSearch versions. \"\n f\"[OpenSearchMultimodel] Skipping mapping update. \"\n f\"Please ensure the index has the correct mapping for KNN search to work.\"\n )\n # Skip and continue - ingestion will proceed, but KNN search may fail if mapping doesn't exist\n return\n logger.warning(f\"[OpenSearchMultimodel] Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n # Verify the field was added correctly\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.error(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n logger.debug(f\"[OpenSearchMultimodel] Bulk ingesting embeddings for {index_name}\")\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n\n # Normalize ACL fields that may arrive as JSON strings from flows\n for key in (\"allowed_users\", \"allowed_groups\"):\n value = metadata.get(key)\n if isinstance(value, str):\n try:\n parsed = json.loads(value)\n if isinstance(parsed, list):\n metadata[key] = parsed\n except (json.JSONDecodeError, TypeError):\n # Leave value as-is if it isn't valid JSON\n pass\n\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- param helpers ----------\n def _parse_int_param(self, attr_name: str, default: int) -> int:\n \"\"\"Parse a string attribute to int, returning *default* on failure.\"\"\"\n raw = getattr(self, attr_name, None)\n if raw is None or str(raw).strip() == \"\":\n return default\n try:\n value = int(str(raw).strip())\n except ValueError:\n logger.warning(f\"Invalid integer value '{raw}' for {attr_name}, using default {default}\")\n return default\n\n if value < 0:\n logger.warning(f\"Negative value '{raw}' for {attr_name}, using default {default}\")\n return default\n\n return value\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n logger.debug(\"[OpenSearchMultimodel] Building OpenSearch client\")\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n timeout=self._parse_int_param(\"request_timeout\", REQUEST_TIMEOUT),\n max_retries=self._parse_int_param(\"max_retries\", MAX_RETRIES),\n retry_on_timeout=True,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n\n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"[OpenSearchMultimodel] Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"[OpenSearchMultimodel] Starting ingestion mode...\")\n\n logger.debug(f\"[OpenSearchMultimodel] Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[OpenSearchMultimodel][INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n logger.debug(\n f\"[OpenSearchMultimodel][INGESTION] ingest_data type: \"\n f\"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\"\n )\n logger.debug(\n f\"[OpenSearchMultimodel][INGESTION] ingest_data content: \"\n f\"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\"\n )\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[OpenSearchMultimodel][INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"[OpenSearchMultimodel][INGESTION] Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings with rate-limit-aware retry logic using tenacity\n from tenacity import (\n retry,\n retry_if_exception,\n stop_after_attempt,\n wait_exponential,\n )\n\n def is_rate_limit_error(exception: Exception) -> bool:\n \"\"\"Check if exception is a rate limit error (429).\"\"\"\n error_str = str(exception).lower()\n return \"429\" in error_str or \"rate_limit\" in error_str or \"rate limit\" in error_str\n\n def is_other_retryable_error(exception: Exception) -> bool:\n \"\"\"Check if exception is retryable but not a rate limit error.\"\"\"\n # Retry on most exceptions except for specific non-retryable ones\n # Add other non-retryable exceptions here if needed\n return not is_rate_limit_error(exception)\n\n # Create retry decorator for rate limit errors (longer backoff)\n retry_on_rate_limit = retry(\n retry=retry_if_exception(is_rate_limit_error),\n stop=stop_after_attempt(5),\n wait=wait_exponential(multiplier=2, min=2, max=30),\n reraise=True,\n before_sleep=lambda retry_state: logger.warning(\n f\"Rate limit hit for chunk (attempt {retry_state.attempt_number}/5), \"\n f\"backing off for {retry_state.next_action.sleep:.1f}s\"\n ),\n )\n\n # Create retry decorator for other errors (shorter backoff)\n retry_on_other_errors = retry(\n retry=retry_if_exception(is_other_retryable_error),\n stop=stop_after_attempt(3),\n wait=wait_exponential(multiplier=1, min=1, max=8),\n reraise=True,\n before_sleep=lambda retry_state: logger.warning(\n f\"Error embedding chunk (attempt {retry_state.attempt_number}/3), \"\n f\"retrying in {retry_state.next_action.sleep:.1f}s: {retry_state.outcome.exception()}\"\n ),\n )\n\n def embed_chunk_with_retry(chunk_text: str, chunk_idx: int) -> list[float]:\n \"\"\"Embed a single chunk with rate-limit-aware retry logic.\"\"\"\n\n @retry_on_rate_limit\n @retry_on_other_errors\n def _embed(text: str) -> list[float]:\n return selected_embedding.embed_documents([text])[0]\n\n try:\n return _embed(chunk_text)\n except Exception as e:\n logger.error(\n f\"Failed to embed chunk {chunk_idx} after all retries: {e}\",\n error=str(e),\n )\n raise\n\n # Restrict concurrency for IBM/Watsonx models to avoid rate limits\n is_ibm = (embedding_model and \"ibm\" in str(embedding_model).lower()) or (\n selected_embedding and \"watsonx\" in type(selected_embedding).__name__.lower()\n )\n logger.debug(f\"Is IBM: {is_ibm}\")\n\n # For IBM models, use sequential processing with rate limiting\n # For other models, use parallel processing\n vectors: list[list[float]] = [None] * len(texts)\n\n if is_ibm:\n # Sequential processing with inter-request delay for IBM models\n inter_request_delay = 0.6 # ~1.67 req/s, safely under 2 req/s limit\n logger.info(f\"Using sequential processing for IBM model with {inter_request_delay}s delay between requests\")\n\n for idx, chunk in enumerate(texts):\n if idx > 0:\n # Add delay between requests (but not before the first one)\n time.sleep(inter_request_delay)\n vectors[idx] = embed_chunk_with_retry(chunk, idx)\n else:\n # Parallel processing for non-IBM models\n max_workers = min(max(len(texts), 1), 8)\n logger.debug(f\"Using parallel processing with {max_workers} workers\")\n\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk_with_retry, chunk, idx): idx for idx, chunk in enumerate(texts)}\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping (index.knn: true is required for vector search)\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error == \"resource_already_exists_exception\":\n pass # Index was created concurrently\n else:\n error_msg = str(creation_error).lower()\n if \"invalid engine\" in error_msg or \"illegal_argument\" in error_msg:\n if \"jvector\" in error_msg:\n msg = (\n \"The 'jvector' engine is not available in your OpenSearch installation. \"\n \"Use 'nmslib' or 'faiss' for standard OpenSearch, or upgrade to 2.9+.\"\n )\n raise ValueError(msg) from creation_error\n if \"index.knn\" in error_msg:\n msg = (\n \"The index has index.knn: false. Delete the existing index and let the \"\n \"component recreate it, or create a new index with a different name.\"\n )\n raise ValueError(msg) from creation_error\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n raise\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(\n f\"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\"\n )\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n logger.debug(f\"Model detection query: {agg_query}\")\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n # Log detailed bucket info for debugging\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n if not models:\n total_hits = result.get(\"hits\", {}).get(\"total\", {})\n total_count = total_hits.get(\"value\", 0) if isinstance(total_hits, dict) else total_hits\n logger.warning(\n f\"No embedding_model values found in index '{self.index_name}'. \"\n f\"Total docs in index: {total_count}. \"\n f\"This may indicate documents were indexed without the embedding_model field.\"\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n fallback_model = self._get_embedding_model_name()\n logger.info(f\"Using fallback model: {fallback_model}\")\n return [fallback_model]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n def _get_filename_agg_field(self, index_properties: dict[str, Any] | None) -> str:\n \"\"\"Choose the appropriate field for filename aggregations.\"\"\"\n if not index_properties:\n return \"filename.keyword\"\n\n filename_def = index_properties.get(\"filename\")\n if not isinstance(filename_def, dict):\n return \"filename.keyword\"\n\n field_type = filename_def.get(\"type\")\n fields_def = filename_def.get(\"fields\", {})\n\n # Top-level keyword with no subfields\n if field_type == \"keyword\" and not isinstance(fields_def, dict):\n return \"filename\"\n\n # Text field with keyword subfield\n if isinstance(fields_def, dict) and \"keyword\" in fields_def:\n return \"filename.keyword\"\n\n # Fallback: aggregate on filename directly\n return \"filename\"\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n if not embeddings_list:\n logger.error(\n \"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\"\n )\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models_attr}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models_attr and isinstance(available_models_attr, dict):\n logger.info(\n f\"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict\"\n )\n for model_name_key, dedicated_embedding in available_models_attr.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n self.log(f\"[SEARCH] Models detected in index: {available_models}\")\n self.log(f\"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n # Track matching status for debugging\n matched_models = []\n unmatched_models = []\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n matched_models.append(model_name)\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n self.log(f\"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding\")\n else:\n # No matching embedding found for this model\n unmatched_models.append(model_name)\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}\")\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n self.log(f\"[ERROR] Embedding generation failed for '{model_name}': {e}\")\n\n # Log summary of model matching\n logger.info(f\"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n self.log(f\"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n if unmatched_models:\n self.log(f\"[WARN] Unmatched models in index: {unmatched_models}\")\n\n if not query_embeddings:\n msg = (\n f\"Failed to generate embeddings for any model. \"\n f\"Index has models: {available_models}, but no matching embedding objects found. \"\n f\"Available embedding identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[FAIL] Search failed: {msg}\")\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n self.log(f\"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'\")\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n self.log(f\"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping\")\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n self.log(\n f\"[WARN] No valid KNN queries could be built. \"\n f\"Query embeddings generated: {list(query_embeddings.keys())}, \"\n f\"but no matching knn_vector fields found in index.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Determine the best aggregation field for filename based on index mapping\n filename_agg_field = self._get_filename_agg_field(index_properties)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": filename_agg_field, \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: \"\n f\"{list(query_embeddings.keys())}\"\n )\n self.log(f\"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}\")\n self.log(f\"[EXEC] Embedding models used: {list(query_embeddings.keys())}\")\n self.log(f\"[EXEC] KNN fields being queried: {embedding_fields}\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n self.log(f\"[RESULT] Search complete: {len(hits)} results found\")\n\n if len(hits) == 0:\n self.log(\n f\"[EMPTY] Debug info: \"\n f\"models_in_index={available_models}, \"\n f\"matched_models={matched_models}, \"\n f\"knn_fields={embedding_fields}, \"\n f\"filters={len(filter_clauses)} clauses\"\n )\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n logger.info(f\"Search query: {self.search_query}\")\n if self._cached_vector_store is None:\n self.build_vector_store()\n\n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n\n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n if is_basic:\n build_config[\"jwt_token\"][\"value\"] = \"\"\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" + }, + "docs_metadata": { + "_input_type": "TableInput", + "advanced": false, + "display_name": "Document Metadata", + "dynamic": false, + "info": "Additional metadata key-value pairs to be added to all ingested documents. Useful for tagging documents with source information, categories, or other custom attributes.", + "input_types": [ + "Data" + ], + "is_list": true, + "list_add_label": "Add More", + "name": "docs_metadata", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "table_icon": "Table", + "table_schema": [ + { + "description": "Key name", + "display_name": "Key", + "formatter": "text", + "name": "key", + "type": "str" + }, + { + "description": "Value of the metadata", + "display_name": "Value", + "formatter": "text", + "name": "value", + "type": "str" + } + ], + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "trigger_icon": "Table", + "trigger_text": "Open table", + "type": "table", + "value": [] + }, + "ef_construction": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "EF Construction", + "dynamic": false, + "info": "Size of the dynamic candidate list during index construction. Higher values improve recall but increase indexing time and memory usage.", + "list": false, + "list_add_label": "Add More", + "name": "ef_construction", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 512 + }, + "embedding": { + "_input_type": "HandleInput", + "advanced": false, + "display_name": "Embedding", + "dynamic": false, + "info": "", + "input_types": [ + "Embeddings" + ], + "list": true, + "list_add_label": "Add More", + "name": "embedding", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "other", + "value": "" + }, + "embedding_model_name": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "Embedding Model Name", + "dynamic": false, + "info": "Name of the embedding model to use for ingestion. This selects which embedding from the list will be used to embed documents. Matches on deployment, model, model_id, or model_name. For duplicate deployments, use combined format: 'deployment:model' (e.g., 'text-embedding-ada-002:text-embedding-3-large'). Leave empty to use the first embedding. Error message will show all available identifiers.", + "list": false, + "list_add_label": "Add More", + "load_from_db": true, + "name": "embedding_model_name", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "SELECTED_EMBEDDING_MODEL" }, - "_type": "Component", - "auth_mode": { + "engine": { "_input_type": "DropdownInput", - "advanced": false, + "advanced": true, "combobox": false, "dialog_inputs": {}, - "display_name": "Authentication Mode", + "display_name": "Vector Engine", "dynamic": false, "external_options": {}, - "info": "Authentication method: 'basic' for username/password authentication, or 'jwt' for JSON Web Token (Bearer) authentication.", - "load_from_db": false, - "name": "auth_mode", + "info": "Vector search engine for similarity calculations. 'nmslib' works with standard OpenSearch. 'jvector' requires OpenSearch 2.9+. 'lucene' requires index.knn: true. Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.", + "name": "engine", "options": [ - "basic", - "jwt" + "nmslib", + "faiss", + "lucene", + "jvector" ], "options_metadata": [], "override_skip": false, "placeholder": "", - "real_time_refresh": true, "required": false, "show": true, "title_case": false, @@ -3842,17 +3709,131 @@ "trace_as_metadata": true, "track_in_telemetry": true, "type": "str", - "value": "jwt" + "value": "jvector" }, - "bearer_prefix": { - "_input_type": "BoolInput", + "filter_expression": { + "_input_type": "MultilineInput", + "advanced": false, + "ai_enabled": false, + "copy_field": false, + "display_name": "Search Filters (JSON)", + "dynamic": false, + "info": "Optional JSON configuration for search filtering, result limits, and score thresholds.\n\nFormat 1 - Explicit filters:\n{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, {\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\n\nFormat 2 - Context-style mapping:\n{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\n\nUse __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "multiline": true, + "name": "filter_expression", + "override_skip": false, + "password": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "index_name": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "Index Name", + "dynamic": false, + "info": "The OpenSearch index name where documents will be stored and searched. Will be created automatically if it doesn't exist.", + "list": false, + "list_add_label": "Add More", + "load_from_db": true, + "name": "index_name", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "OPENSEARCH_INDEX_NAME" + }, + "ingest_data": { + "_input_type": "HandleInput", + "advanced": false, + "display_name": "Ingest Data", + "dynamic": false, + "info": "", + "input_types": [ + "Data", + "DataFrame" + ], + "list": true, + "list_add_label": "Add More", + "name": "ingest_data", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "other", + "value": "" + }, + "is_refresh": false, + "jwt_header": { + "_input_type": "StrInput", "advanced": true, - "display_name": "Prefix 'Bearer '", + "display_name": "JWT Header Name", "dynamic": false, "info": "", "list": false, "list_add_label": "Add More", - "name": "bearer_prefix", + "load_from_db": false, + "name": "jwt_header", + "override_skip": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "Authorization" + }, + "jwt_token": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "JWT Token", + "dynamic": false, + "info": "Valid JSON Web Token for authentication. Will be sent in the Authorization header (with optional 'Bearer ' prefix).", + "input_types": [], + "load_from_db": true, + "name": "jwt_token", + "override_skip": false, + "password": true, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "track_in_telemetry": false, + "type": "str", + "value": "JWT" + }, + "m": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "M Parameter", + "dynamic": false, + "info": "Number of bidirectional connections for each vector in the HNSW graph. Higher values improve search quality but increase memory usage and indexing time.", + "list": false, + "list_add_label": "Add More", + "name": "m", "override_skip": false, "placeholder": "", "required": false, @@ -3861,78 +3842,120 @@ "tool_mode": false, "trace_as_metadata": true, "track_in_telemetry": true, - "type": "bool", - "value": true + "type": "int", + "value": 16 }, - "code": { + "max_retries": { + "_input_type": "StrInput", "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", + "display_name": "Max Retries", + "dynamic": false, + "info": "Number of retries for failed connections before raising an error.", "list": false, + "list_add_label": "Add More", "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, + "name": "max_retries", + "override_skip": false, "placeholder": "", - "required": true, + "required": false, "show": true, "title_case": false, - "type": "code", - "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\n\nfrom lfx.io import (\n BoolInput,\n DropdownInput,\n HandleInput,\n IntInput,\n MultilineInput,\n Output,\n SecretStrInput,\n StrInput,\n TableInput,\n)\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search. \"\n \"To search use the tools search_documents and raw_search. \"\n \"Search documents takes a query for vector search, for example\\n\"\n ' {search_query: \"components in openrag\"}'\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n # DictInput(name=\"query\", display_name=\"Query\", input_types=[\"Data\"], is_list=False, tool_mode=True),\n ]\n outputs = [\n Output(\n display_name=\"Search Results\",\n name=\"search_results\",\n method=\"search_documents\",\n ),\n Output(display_name=\"Raw Search\", name=\"raw_search\", method=\"raw_search\"),\n ]\n\n def raw_search(self, query: str | None = None) -> Data:\n \"\"\"Execute a raw OpenSearch query against the target index.\n\n Args:\n query (dict[str, Any]): The OpenSearch query DSL dictionary.\n\n Returns:\n Data: Search results as a Data object.\n\n Raises:\n ValueError: If 'query' is not a valid OpenSearch query (must be a non-empty dict).\n \"\"\"\n raw_query = query if query is not None else self.search_query\n if isinstance(raw_query, str):\n raw_query = json.loads(raw_query)\n client = self.build_client()\n logger.info(f\"query: {raw_query}\")\n resp = client.search(\n index=self.index_name,\n body=raw_query,\n params={\"terminate_after\": 0},\n )\n # Remove any _source keys whose value is a list of floats (embedding vectors)\n # Minimum length threshold to identify embedding vectors\n min_vector_length = 100\n\n def is_vector(val):\n # Accepts if it's a list of numbers (float or int) and has reasonable vector length\n return (\n isinstance(val, list) and len(val) > min_vector_length and all(isinstance(x, (float, int)) for x in val)\n )\n\n if \"hits\" in resp and \"hits\" in resp[\"hits\"]:\n for hit in resp[\"hits\"][\"hits\"]:\n source = hit.get(\"_source\")\n if isinstance(source, dict):\n keys_to_remove = [k for k, v in source.items() if is_vector(v)]\n for k in keys_to_remove:\n source.pop(k)\n logger.info(f\"Raw search response (all embedding vectors removed): {resp}\")\n return Data(**resp)\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Note: Some OpenSearch versions/configurations have issues with dynamically adding\n knn_vector mappings (NullPointerException). This method checks if the field\n already exists before attempting to add it, and gracefully skips if the field\n is already properly configured.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n # First, check if the field already exists and is properly mapped\n properties = self._get_index_properties(client)\n if self._is_knn_vector_field(properties, field_name):\n # Field already exists as knn_vector - verify dimensions match\n existing_dim = self._get_field_dimension(properties, field_name)\n if existing_dim is not None and existing_dim != dim:\n logger.warning(\n f\"Field '{field_name}' exists with dimension {existing_dim}, \"\n f\"but current embedding has dimension {dim}. Using existing mapping.\"\n )\n else:\n logger.info(\n f\"[OpenSearchMultimodal] Field '{field_name}' already exists\"\n f\"as knn_vector with matching dimensions - skipping mapping update\"\n )\n return\n\n # Field doesn't exist, try to add the mapping\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n # Check if this is the known OpenSearch k-NN NullPointerException issue\n error_str = str(e).lower()\n if \"null\" in error_str or \"nullpointerexception\" in error_str:\n logger.warning(\n f\"[OpenSearchMultimodal] Could not add embedding field mapping for {field_name}\"\n f\"due to OpenSearch k-NN plugin issue: {e}. \"\n f\"This is a known issue with some OpenSearch versions. \"\n f\"[OpenSearchMultimodal] Skipping mapping update. \"\n f\"Please ensure the index has the correct mapping for KNN search to work.\"\n )\n # Skip and continue - ingestion will proceed, but KNN search may fail if mapping doesn't exist\n return\n logger.warning(f\"[OpenSearchMultimodal] Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n # Verify the field was added correctly\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.error(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n logger.debug(f\"[OpenSearchMultimodal] Bulk ingesting embeddings for {index_name}\")\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n\n # Normalize ACL fields that may arrive as JSON strings from flows\n for key in (\"allowed_users\", \"allowed_groups\"):\n value = metadata.get(key)\n if isinstance(value, str):\n try:\n parsed = json.loads(value)\n if isinstance(parsed, list):\n metadata[key] = parsed\n except (json.JSONDecodeError, TypeError):\n # Leave value as-is if it isn't valid JSON\n pass\n\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n logger.debug(\"[OpenSearchMultimodal] Building OpenSearch client\")\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n\n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"[OpenSearchMultimodal] Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"[OpenSearchMultimodal] Starting ingestion mode...\")\n\n logger.debug(f\"[OpenSearchMultimodal] Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[OpenSearchMultimodal][INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n logger.debug(\n f\"[OpenSearchMultimodal][INGESTION] ingest_data type: \"\n f\"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\"\n )\n logger.debug(\n f\"[OpenSearchMultimodal][INGESTION] ingest_data content: \"\n f\"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\"\n )\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[OpenSearchMultimodal][INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"[OpenSearchMultimodal][INGESTION] Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings with rate-limit-aware retry logic using tenacity\n from tenacity import (\n retry,\n retry_if_exception,\n stop_after_attempt,\n wait_exponential,\n )\n\n def is_rate_limit_error(exception: Exception) -> bool:\n \"\"\"Check if exception is a rate limit error (429).\"\"\"\n error_str = str(exception).lower()\n return \"429\" in error_str or \"rate_limit\" in error_str or \"rate limit\" in error_str\n\n def is_other_retryable_error(exception: Exception) -> bool:\n \"\"\"Check if exception is retryable but not a rate limit error.\"\"\"\n # Retry on most exceptions except for specific non-retryable ones\n # Add other non-retryable exceptions here if needed\n return not is_rate_limit_error(exception)\n\n # Create retry decorator for rate limit errors (longer backoff)\n retry_on_rate_limit = retry(\n retry=retry_if_exception(is_rate_limit_error),\n stop=stop_after_attempt(5),\n wait=wait_exponential(multiplier=2, min=2, max=30),\n reraise=True,\n before_sleep=lambda retry_state: logger.warning(\n f\"Rate limit hit for chunk (attempt {retry_state.attempt_number}/5), \"\n f\"backing off for {retry_state.next_action.sleep:.1f}s\"\n ),\n )\n\n # Create retry decorator for other errors (shorter backoff)\n retry_on_other_errors = retry(\n retry=retry_if_exception(is_other_retryable_error),\n stop=stop_after_attempt(3),\n wait=wait_exponential(multiplier=1, min=1, max=8),\n reraise=True,\n before_sleep=lambda retry_state: logger.warning(\n f\"Error embedding chunk (attempt {retry_state.attempt_number}/3), \"\n f\"retrying in {retry_state.next_action.sleep:.1f}s: {retry_state.outcome.exception()}\"\n ),\n )\n\n def embed_chunk_with_retry(chunk_text: str, chunk_idx: int) -> list[float]:\n \"\"\"Embed a single chunk with rate-limit-aware retry logic.\"\"\"\n\n @retry_on_rate_limit\n @retry_on_other_errors\n def _embed(text: str) -> list[float]:\n return selected_embedding.embed_documents([text])[0]\n\n try:\n return _embed(chunk_text)\n except Exception as e:\n logger.error(\n f\"Failed to embed chunk {chunk_idx} after all retries: {e}\",\n error=str(e),\n )\n raise\n\n # Restrict concurrency for IBM/Watsonx models to avoid rate limits\n is_ibm = (embedding_model and \"ibm\" in str(embedding_model).lower()) or (\n selected_embedding and \"watsonx\" in type(selected_embedding).__name__.lower()\n )\n logger.debug(f\"Is IBM: {is_ibm}\")\n\n # For IBM models, use sequential processing with rate limiting\n # For other models, use parallel processing\n vectors: list[list[float]] = [None] * len(texts)\n\n if is_ibm:\n # Sequential processing with inter-request delay for IBM models\n inter_request_delay = 0.6 # ~1.67 req/s, safely under 2 req/s limit\n logger.info(f\"Using sequential processing for IBM model with {inter_request_delay}s delay between requests\")\n\n for idx, chunk in enumerate(texts):\n if idx > 0:\n # Add delay between requests (but not before the first one)\n time.sleep(inter_request_delay)\n vectors[idx] = embed_chunk_with_retry(chunk, idx)\n else:\n # Parallel processing for non-IBM models\n max_workers = min(max(len(texts), 1), 8)\n logger.debug(f\"Using parallel processing with {max_workers} workers\")\n\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk_with_retry, chunk, idx): idx for idx, chunk in enumerate(texts)}\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(\n f\"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\"\n )\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n logger.debug(f\"Model detection query: {agg_query}\")\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n # Log detailed bucket info for debugging\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n if not models:\n total_hits = result.get(\"hits\", {}).get(\"total\", {})\n total_count = total_hits.get(\"value\", 0) if isinstance(total_hits, dict) else total_hits\n logger.warning(\n f\"No embedding_model values found in index '{self.index_name}'. \"\n f\"Total docs in index: {total_count}. \"\n f\"This may indicate documents were indexed without the embedding_model field.\"\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n fallback_model = self._get_embedding_model_name()\n logger.info(f\"Using fallback model: {fallback_model}\")\n return [fallback_model]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n if not embeddings_list:\n logger.error(\n \"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\"\n )\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models_attr}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models_attr and isinstance(available_models_attr, dict):\n logger.info(\n f\"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict\"\n )\n for model_name_key, dedicated_embedding in available_models_attr.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n self.log(f\"[SEARCH] Models detected in index: {available_models}\")\n self.log(f\"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n # Track matching status for debugging\n matched_models = []\n unmatched_models = []\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n matched_models.append(model_name)\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n self.log(f\"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding\")\n else:\n # No matching embedding found for this model\n unmatched_models.append(model_name)\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}\")\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n self.log(f\"[ERROR] Embedding generation failed for '{model_name}': {e}\")\n\n # Log summary of model matching\n logger.info(f\"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n self.log(f\"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n if unmatched_models:\n self.log(f\"[WARN] Unmatched models in index: {unmatched_models}\")\n\n if not query_embeddings:\n msg = (\n f\"Failed to generate embeddings for any model. \"\n f\"Index has models: {available_models}, but no matching embedding objects found. \"\n f\"Available embedding identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[FAIL] Search failed: {msg}\")\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n self.log(f\"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'\")\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n self.log(f\"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping\")\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n self.log(\n f\"[WARN] No valid KNN queries could be built. \"\n f\"Query embeddings generated: {list(query_embeddings.keys())}, \"\n f\"but no matching knn_vector fields found in index.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename.keyword\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: \"\n f\"{list(query_embeddings.keys())}\"\n )\n self.log(f\"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}\")\n self.log(f\"[EXEC] Embedding models used: {list(query_embeddings.keys())}\")\n self.log(f\"[EXEC] KNN fields being queried: {embedding_fields}\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n self.log(f\"[RESULT] Search complete: {len(hits)} results found\")\n\n if len(hits) == 0:\n self.log(\n f\"[EMPTY] Debug info: \"\n f\"models_in_index={available_models}, \"\n f\"matched_models={matched_models}, \"\n f\"knn_fields={embedding_fields}, \"\n f\"filters={len(filter_clauses)} clauses\"\n )\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n logger.info(f\"Search query: {self.search_query}\")\n if self._cached_vector_store is None:\n self.build_vector_store()\n\n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n\n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config" + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "3" }, - "docs_metadata": { - "_input_type": "TableInput", + "num_candidates": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Candidate Pool Size", + "dynamic": false, + "info": "Number of approximate neighbors to consider for each KNN query. Some OpenSearch deployments do not support this parameter; set to 0 to disable.", + "list": false, + "list_add_label": "Add More", + "name": "num_candidates", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 1000 + }, + "number_of_results": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Default Result Limit", + "dynamic": false, + "info": "Default maximum number of search results to return when no limit is specified in the filter expression.", + "list": false, + "list_add_label": "Add More", + "name": "number_of_results", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 10 + }, + "opensearch_url": { + "_input_type": "StrInput", "advanced": false, - "display_name": "Document Metadata", + "display_name": "OpenSearch URL", "dynamic": false, - "info": "Additional metadata key-value pairs to be added to all ingested documents. Useful for tagging documents with source information, categories, or other custom attributes.", - "input_types": [ - "Data" - ], - "is_list": true, + "info": "The connection URL for your OpenSearch cluster (e.g., http://localhost:9200 for local development or your cloud endpoint).", + "list": false, "list_add_label": "Add More", - "name": "docs_metadata", + "load_from_db": true, + "name": "opensearch_url", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "OPENSEARCH_URL" + }, + "password": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "OpenSearch Password", + "dynamic": false, + "info": "", + "input_types": [], + "load_from_db": false, + "name": "password", "override_skip": false, + "password": true, "placeholder": "", "required": false, - "show": true, - "table_icon": "Table", - "table_schema": [ - { - "description": "Key name", - "display_name": "Key", - "formatter": "text", - "name": "key", - "type": "str" - }, - { - "description": "Value of the metadata", - "display_name": "Value", - "formatter": "text", - "name": "value", - "type": "str" - } - ], + "show": false, "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, "track_in_telemetry": false, - "trigger_icon": "Table", - "trigger_text": "Open table", - "type": "table", - "value": [] + "type": "str", + "value": "MyStrongOpenSearchPassword123!" }, - "ef_construction": { - "_input_type": "IntInput", + "request_timeout": { + "_input_type": "StrInput", "advanced": true, - "display_name": "EF Construction", + "display_name": "Request Timeout (seconds)", "dynamic": false, - "info": "Size of the dynamic candidate list during index construction. Higher values improve recall but increase indexing time and memory usage.", + "info": "Time in seconds to wait for a response from OpenSearch. Increase for large bulk ingestion or complex hybrid queries.", "list": false, "list_add_label": "Add More", - "name": "ef_construction", + "load_from_db": false, + "name": "request_timeout", "override_skip": false, "placeholder": "", "required": false, @@ -3940,42 +3963,44 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "int", - "value": 512 + "track_in_telemetry": false, + "type": "str", + "value": "60" }, - "embedding": { - "_input_type": "HandleInput", + "search_query": { + "_input_type": "QueryInput", "advanced": false, - "display_name": "Embedding", + "display_name": "Search Query", "dynamic": false, - "info": "", + "info": "Enter a query to run a similarity search.", "input_types": [ - "Embeddings" + "Message" ], - "list": true, + "list": false, "list_add_label": "Add More", - "name": "embedding", + "load_from_db": false, + "name": "search_query", "override_skip": false, - "placeholder": "", + "placeholder": "Enter a query...", "required": false, "show": true, "title_case": false, + "tool_mode": true, + "trace_as_input": true, "trace_as_metadata": true, "track_in_telemetry": false, - "type": "other", + "type": "query", "value": "" }, - "embedding_model_name": { - "_input_type": "StrInput", - "advanced": false, - "display_name": "Embedding Model Name", + "should_cache_vector_store": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Cache Vector Store", "dynamic": false, - "info": "Name of the embedding model to use for ingestion. This selects which embedding from the list will be used to embed documents. Matches on deployment, model, model_id, or model_name. For duplicate deployments, use combined format: 'deployment:model' (e.g., 'text-embedding-ada-002:text-embedding-3-large'). Leave empty to use the first embedding. Error message will show all available identifiers.", + "info": "If True, the vector store will be cached for the current build of the component. This is useful for components that have multiple output methods and want to share the same vector store.", "list": false, "list_add_label": "Add More", - "load_from_db": true, - "name": "embedding_model_name", + "name": "should_cache_vector_store", "override_skip": false, "placeholder": "", "required": false, @@ -3983,25 +4008,26 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "SELECTED_EMBEDDING_MODEL" + "track_in_telemetry": true, + "type": "bool", + "value": true }, - "engine": { + "space_type": { "_input_type": "DropdownInput", "advanced": true, "combobox": false, "dialog_inputs": {}, - "display_name": "Vector Engine", + "display_name": "Distance Metric", "dynamic": false, "external_options": {}, - "info": "Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.", - "name": "engine", + "info": "Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, 'cosinesimil' for cosine similarity, 'innerproduct' for dot product.", + "name": "space_type", "options": [ - "jvector", - "nmslib", - "faiss", - "lucene" + "l2", + "l1", + "cosinesimil", + "linf", + "innerproduct" ], "options_metadata": [], "override_skip": false, @@ -4014,46 +4040,59 @@ "trace_as_metadata": true, "track_in_telemetry": true, "type": "str", - "value": "jvector" + "value": "l2" }, - "filter_expression": { - "_input_type": "MultilineInput", + "use_ssl": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Use SSL/TLS", + "dynamic": false, + "info": "Enable SSL/TLS encryption for secure connections to OpenSearch.", + "list": false, + "list_add_label": "Add More", + "name": "use_ssl", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "username": { + "_input_type": "StrInput", "advanced": false, - "ai_enabled": false, - "copy_field": false, - "display_name": "Search Filters (JSON)", + "display_name": "Username", "dynamic": false, - "info": "Optional JSON configuration for search filtering, result limits, and score thresholds.\n\nFormat 1 - Explicit filters:\n{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, {\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\n\nFormat 2 - Context-style mapping:\n{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\n\nUse __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.", - "input_types": [ - "Message" - ], + "info": "", "list": false, "list_add_label": "Add More", "load_from_db": false, - "multiline": true, - "name": "filter_expression", + "name": "username", "override_skip": false, "placeholder": "", "required": false, - "show": true, + "show": false, "title_case": false, "tool_mode": false, - "trace_as_input": true, "trace_as_metadata": true, "track_in_telemetry": false, "type": "str", - "value": "" + "value": "admin" }, - "index_name": { + "vector_field": { "_input_type": "StrInput", - "advanced": false, - "display_name": "Index Name", + "advanced": true, + "display_name": "Legacy Vector Field Name", "dynamic": false, - "info": "The OpenSearch index name where documents will be stored and searched. Will be created automatically if it doesn't exist.", + "info": "Legacy field name for backward compatibility. New documents use dynamic fields (chunk_embedding_{model_name}) based on the embedding_model_name.", "list": false, "list_add_label": "Add More", - "load_from_db": true, - "name": "index_name", + "load_from_db": false, + "name": "vector_field", "override_skip": false, "placeholder": "", "required": false, @@ -4063,101 +4102,217 @@ "trace_as_metadata": true, "track_in_telemetry": false, "type": "str", - "value": "OPENSEARCH_INDEX_NAME" + "value": "chunk_embedding" }, - "ingest_data": { - "_input_type": "HandleInput", - "advanced": false, - "display_name": "Ingest Data", + "verify_certs": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Verify SSL Certificates", "dynamic": false, - "info": "", - "input_types": [ - "Data", - "DataFrame" - ], - "list": true, + "info": "Verify SSL certificates when connecting. Disable for self-signed certificates in development environments.", + "list": false, "list_add_label": "Add More", - "name": "ingest_data", + "name": "verify_certs", "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, + "tool_mode": false, "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "other", - "value": "" + "track_in_telemetry": true, + "type": "bool", + "value": false + } + }, + "tool_mode": false + }, + "selected_output": "search_results", + "showNode": true, + "type": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding" + }, + "dragging": false, + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4", + "measured": { + "height": 967, + "width": 320 + }, + "position": { + "x": 1756.949537574781, + "y": 1525.6023642029047 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "description": "Generate embeddings using a specified provider.", + "display_name": "Embedding Model", + "id": "EmbeddingModel-EAo9i", + "node": { + "base_classes": [ + "Embeddings" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Generate embeddings using a specified provider.", + "display_name": "Embedding Model", + "documentation": "https://docs.langflow.org/components-embedding-models", + "edited": false, + "field_order": [ + "model", + "api_key", + "api_base", + "base_url_ibm_watsonx", + "project_id", + "dimensions", + "chunk_size", + "request_timeout", + "max_retries", + "show_progress_bar", + "model_kwargs", + "truncate_input_tokens", + "input_text" + ], + "frozen": false, + "icon": "binary", + "last_updated": "2026-02-27T18:40:06.707Z", + "legacy": false, + "metadata": { + "code_hash": "c5ce0982da48", + "dependencies": { + "dependencies": [ + { + "name": "lfx", + "version": null + }, + { + "name": "langchain_core", + "version": "0.3.83" + } + ], + "total_dependencies": 2 + }, + "module": "custom_components.embedding_model" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Embedding Model", + "group_outputs": false, + "hidden": null, + "loop_types": null, + "method": "build_embeddings", + "name": "embeddings", + "options": null, + "required_inputs": null, + "selected": "Embeddings", + "tool_mode": true, + "types": [ + "Embeddings" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, - "is_refresh": false, - "jwt_header": { - "_input_type": "StrInput", + "_frontend_node_folder_id": { + "value": "bbfbd352-e931-4894-afe8-6552a3f0cc2c" + }, + "_type": "Component", + "api_base": { + "_input_type": "MessageTextInput", "advanced": true, - "display_name": "JWT Header Name", + "display_name": "API Base URL", "dynamic": false, - "info": "", + "info": "Base URL for the API. Leave empty for default.", + "input_types": [ + "Message" + ], "list": false, "list_add_label": "Add More", "load_from_db": false, - "name": "jwt_header", + "name": "api_base", "override_skip": false, "placeholder": "", - "required": true, + "required": false, "show": true, "title_case": false, "tool_mode": false, + "trace_as_input": true, "trace_as_metadata": true, "track_in_telemetry": false, "type": "str", - "value": "Authorization" + "value": "" }, - "jwt_token": { + "api_key": { "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "JWT Token", + "advanced": true, + "display_name": "API Key", "dynamic": false, - "info": "Valid JSON Web Token for authentication. Will be sent in the Authorization header (with optional 'Bearer ' prefix).", + "info": "Model Provider API key", "input_types": [], "load_from_db": false, - "name": "jwt_token", + "name": "api_key", "override_skip": false, "password": true, "placeholder": "", - "required": true, + "real_time_refresh": true, + "required": false, "show": true, "title_case": false, "track_in_telemetry": false, "type": "str", - "value": "jwt" + "value": "" }, - "m": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "M Parameter", + "base_url_ibm_watsonx": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "watsonx API Endpoint", "dynamic": false, - "info": "Number of bidirectional connections for each vector in the HNSW graph. Higher values improve search quality but increase memory usage and indexing time.", - "list": false, - "list_add_label": "Add More", - "name": "m", + "external_options": {}, + "info": "The base URL of the API (IBM watsonx.ai only)", + "name": "base_url_ibm_watsonx", + "options": [ + "https://us-south.ml.cloud.ibm.com", + "https://eu-de.ml.cloud.ibm.com", + "https://eu-gb.ml.cloud.ibm.com", + "https://au-syd.ml.cloud.ibm.com", + "https://jp-tok.ml.cloud.ibm.com", + "https://ca-tor.ml.cloud.ibm.com" + ], + "options_metadata": [], "override_skip": false, "placeholder": "", + "real_time_refresh": true, "required": false, - "show": true, + "show": false, "title_case": false, + "toggle": false, "tool_mode": false, "trace_as_metadata": true, "track_in_telemetry": true, - "type": "int", - "value": 16 + "type": "str", + "value": "https://us-south.ml.cloud.ibm.com" }, - "num_candidates": { + "chunk_size": { "_input_type": "IntInput", "advanced": true, - "display_name": "Candidate Pool Size", + "display_name": "Chunk Size", "dynamic": false, - "info": "Number of approximate neighbors to consider for each KNN query. Some OpenSearch deployments do not support this parameter; set to 0 to disable.", + "info": "", "list": false, "list_add_label": "Add More", - "name": "num_candidates", + "name": "chunk_size", "override_skip": false, "placeholder": "", "required": false, @@ -4169,36 +4324,34 @@ "type": "int", "value": 1000 }, - "number_of_results": { - "_input_type": "IntInput", + "code": { "advanced": true, - "display_name": "Default Result Limit", - "dynamic": false, - "info": "Default maximum number of search results to return when no limit is specified in the filter expression.", + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", "list": false, - "list_add_label": "Add More", - "name": "number_of_results", - "override_skip": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, "placeholder": "", - "required": false, + "required": true, "show": true, "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "int", - "value": 10 + "type": "code", + "value": "from typing import Any\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.unified_models import (\n get_api_key_for_provider,\n get_embedding_class,\n get_embedding_model_options,\n get_unified_models_detailed,\n update_model_options_in_build_config,\n)\nfrom lfx.base.models.watsonx_constants import IBM_WATSONX_URLS\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n ModelInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n \"\"\"Dynamically update build config with user-filtered model options.\"\"\"\n # Update model options\n build_config = update_model_options_in_build_config(\n component=self,\n build_config=build_config,\n cache_key_prefix=\"embedding_model_options\",\n get_options_func=get_embedding_model_options,\n field_name=field_name,\n field_value=field_value,\n )\n\n # Show/hide provider-specific fields based on selected model\n if field_name == \"model\" and isinstance(field_value, list) and len(field_value) > 0:\n selected_model = field_value[0]\n provider = selected_model.get(\"provider\", \"\")\n\n # Show/hide watsonx fields\n is_watsonx = provider == \"IBM WatsonX\"\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = is_watsonx\n build_config[\"project_id\"][\"show\"] = is_watsonx\n build_config[\"truncate_input_tokens\"][\"show\"] = is_watsonx\n build_config[\"input_text\"][\"show\"] = is_watsonx\n if is_watsonx:\n build_config[\"base_url_ibm_watsonx\"][\"required\"] = True\n build_config[\"project_id\"][\"required\"] = True\n\n return build_config\n\n inputs = [\n ModelInput(\n name=\"model\",\n display_name=\"Embedding Model\",\n info=\"Select your model provider\",\n real_time_refresh=True,\n required=True,\n model_type=\"embedding\",\n input_types=[\"Embeddings\"], # Override default to accept Embeddings instead of LanguageModel\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"Model Provider API key\",\n real_time_refresh=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n # Watson-specific inputs\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n advanced=True,\n value=1000,\n ),\n FloatInput(\n name=\"request_timeout\",\n display_name=\"Request Timeout\",\n advanced=True,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n advanced=True,\n value=3,\n ),\n BoolInput(\n name=\"show_progress_bar\",\n display_name=\"Show Progress Bar\",\n advanced=True,\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n \"\"\"Build and return an embeddings instance based on the selected model.\n\n Returns an EmbeddingsWithModels wrapper that contains:\n - The primary embedding instance (for the selected model)\n - available_models dict mapping all available model names to their instances\n \"\"\"\n # If an Embeddings object is directly connected, return it\n try:\n from langchain_core.embeddings import Embeddings as BaseEmbeddings\n\n if isinstance(self.model, BaseEmbeddings):\n return self.model\n except ImportError:\n pass\n\n # Safely extract model configuration\n if not self.model or not isinstance(self.model, list):\n msg = \"Model must be a non-empty list\"\n raise ValueError(msg)\n\n model = self.model[0]\n model_name = model.get(\"name\")\n provider = model.get(\"provider\")\n metadata = model.get(\"metadata\", {})\n\n # Get API key from user input or global variables\n api_key = get_api_key_for_provider(self.user_id, provider, self.api_key)\n\n # Validate required fields (Ollama doesn't require API key)\n if not api_key and provider != \"Ollama\":\n msg = (\n f\"{provider} API key is required. \"\n f\"Please provide it in the component or configure it globally as \"\n f\"{provider.upper().replace(' ', '_')}_API_KEY.\"\n )\n raise ValueError(msg)\n\n if not model_name:\n msg = \"Model name is required\"\n raise ValueError(msg)\n\n # Get embedding class\n embedding_class_name = metadata.get(\"embedding_class\")\n if not embedding_class_name:\n msg = f\"No embedding class defined in metadata for {model_name}\"\n raise ValueError(msg)\n\n embedding_class = get_embedding_class(embedding_class_name)\n\n # Build kwargs using parameter mapping for primary instance\n kwargs = self._build_kwargs(model, metadata)\n primary_instance = embedding_class(**kwargs)\n\n # Get all available embedding models for this provider\n available_models_dict = self._build_available_models(\n provider=provider,\n embedding_class=embedding_class,\n metadata=metadata,\n api_key=api_key,\n )\n\n # Wrap with EmbeddingsWithModels to provide available_models metadata\n return EmbeddingsWithModels(\n embeddings=primary_instance,\n available_models=available_models_dict,\n )\n\n def _build_available_models(\n self,\n provider: str,\n embedding_class: type,\n metadata: dict[str, Any],\n api_key: str | None,\n ) -> dict[str, Embeddings]:\n \"\"\"Build a dictionary of all available embedding model instances for the provider.\n\n Args:\n provider: The provider name (e.g., \"OpenAI\", \"Ollama\")\n embedding_class: The embedding class to instantiate\n metadata: Metadata containing param_mapping\n api_key: The API key for the provider\n\n Returns:\n Dict mapping model names to their embedding instances\n \"\"\"\n available_models_dict: dict[str, Embeddings] = {}\n\n # Get all embedding models for this provider from unified models\n all_embedding_models = get_unified_models_detailed(\n providers=[provider],\n model_type=\"embeddings\",\n include_deprecated=False,\n include_unsupported=False,\n )\n\n if not all_embedding_models:\n return available_models_dict\n\n # Extract models from the provider data\n for provider_data in all_embedding_models:\n if provider_data.get(\"provider\") != provider:\n continue\n\n for model_data in provider_data.get(\"models\", []):\n model_name = model_data.get(\"model_name\")\n if not model_name:\n continue\n\n # Create a model dict compatible with _build_kwargs\n model_dict = {\n \"name\": model_name,\n \"provider\": provider,\n \"metadata\": metadata, # Reuse the same metadata/param_mapping\n }\n\n try:\n # Build kwargs for this model\n model_kwargs = self._build_kwargs_for_model(model_dict, metadata, api_key)\n # Create the embedding instance\n available_models_dict[model_name] = embedding_class(**model_kwargs)\n except Exception: # noqa: BLE001\n # Skip models that fail to instantiate\n # This handles cases where specific models have incompatible parameters\n logger.debug(\"Failed to instantiate embedding model %s: skipping\", model_name, exc_info=True)\n continue\n\n return available_models_dict\n\n def _build_kwargs_for_model(\n self,\n model: dict[str, Any],\n metadata: dict[str, Any],\n api_key: str | None,\n ) -> dict[str, Any]:\n \"\"\"Build kwargs dictionary for a specific model using parameter mapping.\n\n This is similar to _build_kwargs but uses the provided api_key directly\n instead of looking it up again.\n\n Args:\n model: Model dict with name and provider\n metadata: Metadata containing param_mapping\n api_key: The API key to use\n\n Returns:\n kwargs dict for embedding class instantiation\n \"\"\"\n param_mapping = metadata.get(\"param_mapping\", {})\n if not param_mapping:\n msg = \"Parameter mapping not found in metadata\"\n raise ValueError(msg)\n\n kwargs = {}\n provider = model.get(\"provider\")\n\n # Required parameters - handle both \"model\" and \"model_id\" (for watsonx)\n if \"model\" in param_mapping:\n kwargs[param_mapping[\"model\"]] = model.get(\"name\")\n elif \"model_id\" in param_mapping:\n kwargs[param_mapping[\"model_id\"]] = model.get(\"name\")\n\n # Add API key if mapped\n if \"api_key\" in param_mapping and api_key:\n kwargs[param_mapping[\"api_key\"]] = api_key\n\n # Optional parameters with their values\n optional_params = {\n \"api_base\": self.api_base if self.api_base else None,\n \"dimensions\": int(self.dimensions) if self.dimensions else None,\n \"chunk_size\": int(self.chunk_size) if self.chunk_size else None,\n \"request_timeout\": float(self.request_timeout) if self.request_timeout else None,\n \"max_retries\": int(self.max_retries) if self.max_retries else None,\n \"show_progress_bar\": self.show_progress_bar if hasattr(self, \"show_progress_bar\") else None,\n \"model_kwargs\": self.model_kwargs if self.model_kwargs else None,\n }\n\n # Watson-specific parameters\n if provider in {\"IBM WatsonX\", \"IBM watsonx.ai\"}:\n # Map base_url_ibm_watsonx to \"url\" parameter for watsonx\n if \"url\" in param_mapping:\n url_value = (\n self.base_url_ibm_watsonx\n if hasattr(self, \"base_url_ibm_watsonx\") and self.base_url_ibm_watsonx\n else \"https://us-south.ml.cloud.ibm.com\"\n )\n kwargs[param_mapping[\"url\"]] = url_value\n # Map project_id for watsonx\n if hasattr(self, \"project_id\") and self.project_id and \"project_id\" in param_mapping:\n kwargs[param_mapping[\"project_id\"]] = self.project_id\n\n # Ollama-specific parameters\n if provider == \"Ollama\" and \"base_url\" in param_mapping:\n # Map api_base to \"base_url\" parameter for Ollama\n base_url_value = self.api_base if hasattr(self, \"api_base\") and self.api_base else \"http://localhost:11434\"\n kwargs[param_mapping[\"base_url\"]] = base_url_value\n\n # Add optional parameters if they have values and are mapped\n for param_name, param_value in optional_params.items():\n if param_value is not None and param_name in param_mapping:\n # Special handling for request_timeout with Google provider\n if param_name == \"request_timeout\":\n if provider == \"Google Generative AI\" and isinstance(param_value, (int, float)):\n kwargs[param_mapping[param_name]] = {\"timeout\": param_value}\n else:\n kwargs[param_mapping[param_name]] = param_value\n else:\n kwargs[param_mapping[param_name]] = param_value\n\n return kwargs\n\n def _build_kwargs(self, model: dict[str, Any], metadata: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Build kwargs dictionary using parameter mapping.\"\"\"\n param_mapping = metadata.get(\"param_mapping\", {})\n if not param_mapping:\n msg = \"Parameter mapping not found in metadata\"\n raise ValueError(msg)\n\n kwargs = {}\n\n # Required parameters - handle both \"model\" and \"model_id\" (for watsonx)\n if \"model\" in param_mapping:\n kwargs[param_mapping[\"model\"]] = model.get(\"name\")\n elif \"model_id\" in param_mapping:\n kwargs[param_mapping[\"model_id\"]] = model.get(\"name\")\n if \"api_key\" in param_mapping:\n kwargs[param_mapping[\"api_key\"]] = get_api_key_for_provider(\n self.user_id,\n model.get(\"provider\"),\n self.api_key,\n )\n\n # Optional parameters with their values\n provider = model.get(\"provider\")\n optional_params = {\n \"api_base\": self.api_base if self.api_base else None,\n \"dimensions\": int(self.dimensions) if self.dimensions else None,\n \"chunk_size\": int(self.chunk_size) if self.chunk_size else None,\n \"request_timeout\": float(self.request_timeout) if self.request_timeout else None,\n \"max_retries\": int(self.max_retries) if self.max_retries else None,\n \"show_progress_bar\": self.show_progress_bar if hasattr(self, \"show_progress_bar\") else None,\n \"model_kwargs\": self.model_kwargs if self.model_kwargs else None,\n }\n\n # Watson-specific parameters\n if provider in {\"IBM WatsonX\", \"IBM watsonx.ai\"}:\n # Map base_url_ibm_watsonx to \"url\" parameter for watsonx\n if \"url\" in param_mapping:\n url_value = (\n self.base_url_ibm_watsonx\n if hasattr(self, \"base_url_ibm_watsonx\") and self.base_url_ibm_watsonx\n else \"https://us-south.ml.cloud.ibm.com\"\n )\n kwargs[param_mapping[\"url\"]] = url_value\n # Map project_id for watsonx\n if hasattr(self, \"project_id\") and self.project_id and \"project_id\" in param_mapping:\n kwargs[param_mapping[\"project_id\"]] = self.project_id\n\n # Ollama-specific parameters\n if provider == \"Ollama\" and \"base_url\" in param_mapping:\n # Map api_base to \"base_url\" parameter for Ollama\n base_url_value = self.api_base if hasattr(self, \"api_base\") and self.api_base else \"http://localhost:11434\"\n kwargs[param_mapping[\"base_url\"]] = base_url_value\n\n # Add optional parameters if they have values and are mapped\n for param_name, param_value in optional_params.items():\n if param_value is not None and param_name in param_mapping:\n # Special handling for request_timeout with Google provider\n if param_name == \"request_timeout\":\n if provider == \"Google Generative AI\" and isinstance(param_value, (int, float)):\n kwargs[param_mapping[param_name]] = {\"timeout\": param_value}\n else:\n kwargs[param_mapping[param_name]] = param_value\n else:\n kwargs[param_mapping[param_name]] = param_value\n\n return kwargs\n" }, - "opensearch_url": { - "_input_type": "StrInput", - "advanced": false, - "display_name": "OpenSearch URL", + "dimensions": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Dimensions", "dynamic": false, - "info": "The connection URL for your OpenSearch cluster (e.g., http://localhost:9200 for local development or your cloud endpoint).", + "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", "list": false, "list_add_label": "Add More", - "load_from_db": true, - "name": "opensearch_url", + "load_from_db": false, + "name": "dimensions", "override_skip": false, "placeholder": "", "required": false, @@ -4206,63 +4359,40 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "OPENSEARCH_URL" + "track_in_telemetry": true, + "type": "int", + "value": "" }, - "password": { - "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "OpenSearch Password", + "input_text": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Include the original text in the output", "dynamic": false, "info": "", - "input_types": [], - "load_from_db": false, - "name": "password", - "override_skip": false, - "password": true, - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "track_in_telemetry": false, - "type": "str", - "value": "MyStrongOpenSearchPassword123!" - }, - "search_query": { - "_input_type": "QueryInput", - "advanced": false, - "display_name": "Search Query", - "dynamic": false, - "info": "Enter a query to run a similarity search.", - "input_types": [ - "Message" - ], "list": false, "list_add_label": "Add More", - "load_from_db": false, - "name": "search_query", + "name": "input_text", "override_skip": false, - "placeholder": "Enter a query...", + "placeholder": "", "required": false, - "show": true, + "show": false, "title_case": false, - "tool_mode": true, - "trace_as_input": true, + "tool_mode": false, "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "query", - "value": "" + "track_in_telemetry": true, + "type": "bool", + "value": true }, - "should_cache_vector_store": { - "_input_type": "BoolInput", + "is_refresh": true, + "max_retries": { + "_input_type": "IntInput", "advanced": true, - "display_name": "Cache Vector Store", + "display_name": "Max Retries", "dynamic": false, - "info": "If True, the vector store will be cached for the current build of the component. This is useful for components that have multiple output methods and want to share the same vector store.", + "info": "", "list": false, "list_add_label": "Add More", - "name": "should_cache_vector_store", + "name": "max_retries", "override_skip": false, "placeholder": "", "required": false, @@ -4271,90 +4401,207 @@ "tool_mode": false, "trace_as_metadata": true, "track_in_telemetry": true, - "type": "bool", - "value": true + "type": "int", + "value": 3 }, - "space_type": { - "_input_type": "DropdownInput", - "advanced": true, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Distance Metric", + "model": { + "_input_type": "ModelInput", + "advanced": false, + "display_name": "Embedding Model", "dynamic": false, - "external_options": {}, - "info": "Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, 'cosinesimil' for cosine similarity, 'innerproduct' for dot product.", - "name": "space_type", + "external_options": { + "fields": { + "data": { + "node": { + "display_name": "Connect other models", + "icon": "CornerDownLeft", + "name": "connect_other_models" + } + } + } + }, + "info": "Select your model provider", + "input_types": [ + "Embeddings" + ], + "list": false, + "list_add_label": "Add More", + "model_type": "embedding", + "name": "model", "options": [ - "l2", - "l1", - "cosinesimil", - "linf", - "innerproduct" + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-small", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-large", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-ada-002", + "provider": "OpenAI" + }, + { + "category": "Google Generative AI", + "icon": "GoogleGenerativeAI", + "metadata": { + "is_disabled_provider": true, + "variable_name": null + }, + "name": "__enable_provider_Google Generative AI__", + "provider": "Google Generative AI" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "is_disabled_provider": true, + "variable_name": null + }, + "name": "__enable_provider_IBM WatsonX__", + "provider": "IBM WatsonX" + } ], - "options_metadata": [], "override_skip": false, - "placeholder": "", - "required": false, + "placeholder": "Setup Provider", + "real_time_refresh": true, + "refresh_button": true, + "required": true, "show": true, "title_case": false, - "toggle": false, "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "l2" + "trace_as_input": true, + "track_in_telemetry": false, + "type": "model", + "value": [ + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-large", + "provider": "OpenAI" + } + ] }, - "use_ssl": { - "_input_type": "BoolInput", + "model_kwargs": { + "_input_type": "DictInput", "advanced": true, - "display_name": "Use SSL/TLS", + "display_name": "Model Kwargs", "dynamic": false, - "info": "Enable SSL/TLS encryption for secure connections to OpenSearch.", + "info": "Additional keyword arguments to pass to the model.", "list": false, "list_add_label": "Add More", - "name": "use_ssl", + "name": "model_kwargs", "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "bool", - "value": true + "trace_as_input": true, + "track_in_telemetry": false, + "type": "dict", + "value": {} }, - "username": { - "_input_type": "StrInput", + "project_id": { + "_input_type": "MessageTextInput", "advanced": false, - "display_name": "Username", + "display_name": "Project ID", "dynamic": false, - "info": "", + "info": "IBM watsonx.ai Project ID (required for IBM watsonx.ai)", + "input_types": [ + "Message" + ], "list": false, "list_add_label": "Add More", - "load_from_db": false, - "name": "username", + "load_from_db": true, + "name": "project_id", "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, + "trace_as_input": true, "trace_as_metadata": true, "track_in_telemetry": false, "type": "str", - "value": "admin" + "value": "WATSONX_PROJECT_ID" }, - "vector_field": { - "_input_type": "StrInput", + "request_timeout": { + "_input_type": "FloatInput", "advanced": true, - "display_name": "Legacy Vector Field Name", + "display_name": "Request Timeout", "dynamic": false, - "info": "Legacy field name for backward compatibility. New documents use dynamic fields (chunk_embedding_{model_name}) based on the embedding_model_name.", + "info": "", "list": false, "list_add_label": "Add More", - "load_from_db": false, - "name": "vector_field", + "name": "request_timeout", "override_skip": false, "placeholder": "", "required": false, @@ -4362,19 +4609,19 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "chunk_embedding" + "track_in_telemetry": true, + "type": "float", + "value": "" }, - "verify_certs": { + "show_progress_bar": { "_input_type": "BoolInput", "advanced": true, - "display_name": "Verify SSL Certificates", + "display_name": "Show Progress Bar", "dynamic": false, - "info": "Verify SSL certificates when connecting. Disable for self-signed certificates in development environments.", + "info": "", "list": false, "list_add_label": "Add More", - "name": "verify_certs", + "name": "show_progress_bar", "override_skip": false, "placeholder": "", "required": false, @@ -4385,30 +4632,51 @@ "track_in_telemetry": true, "type": "bool", "value": false + }, + "truncate_input_tokens": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Truncate Input Tokens", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "truncate_input_tokens", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 200 } }, "tool_mode": false }, - "selected_output": "search_results", - "showNode": false, - "type": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding" + "showNode": true, + "type": "EmbeddingModel" }, "dragging": false, - "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4", + "id": "EmbeddingModel-EAo9i", "measured": { - "height": 48, - "width": 192 + "height": 207, + "width": 320 }, "position": { - "x": 1779.7037312968387, - "y": 1756.3949005266322 + "x": 1311.1440056916672, + "y": 1890.167434226358 }, - "selected": true, + "selected": false, "type": "genericNode" }, { "data": { - "id": "EmbeddingModel-EAo9i", + "description": "Generate embeddings using a specified provider.", + "display_name": "Embedding Model", + "id": "EmbeddingModel-E0hvR", "node": { "base_classes": [ "Embeddings" @@ -4419,14 +4687,12 @@ "description": "Generate embeddings using a specified provider.", "display_name": "Embedding Model", "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": true, + "edited": false, "field_order": [ - "provider", - "api_base", - "ollama_base_url", - "base_url_ibm_watsonx", "model", "api_key", + "api_base", + "base_url_ibm_watsonx", "project_id", "dimensions", "chunk_size", @@ -4435,48 +4701,26 @@ "show_progress_bar", "model_kwargs", "truncate_input_tokens", - "input_text", - "fail_safe_mode" + "input_text" ], "frozen": false, "icon": "binary", - "last_updated": "2026-02-11T07:52:18.314Z", + "last_updated": "2026-02-27T18:40:19.256Z", "legacy": false, - "lf_version": "1.7.0.dev21", "metadata": { - "code_hash": "0e2d6fe67a26", + "code_hash": "c5ce0982da48", "dependencies": { "dependencies": [ - { - "name": "requests", - "version": "2.32.5" - }, - { - "name": "ibm_watsonx_ai", - "version": "1.4.2" - }, - { - "name": "langchain_openai", - "version": "0.3.23" - }, { "name": "lfx", - "version": "0.2.0.dev21" - }, - { - "name": "langchain_ollama", - "version": "0.3.10" - }, - { - "name": "langchain_community", - "version": "0.3.21" + "version": null }, { - "name": "langchain_ibm", - "version": "0.3.19" + "name": "langchain_core", + "version": "0.3.83" } ], - "total_dependencies": 7 + "total_dependencies": 2 }, "module": "custom_components.embedding_model" }, @@ -4508,13 +4752,13 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "4998653d-b7f9-4104-a939-1bacf3693ad3" + "value": "bbfbd352-e931-4894-afe8-6552a3f0cc2c" }, "_type": "Component", "api_base": { "_input_type": "MessageTextInput", - "advanced": false, - "display_name": "OpenAI API Base URL", + "advanced": true, + "display_name": "API Base URL", "dynamic": false, "info": "Base URL for the API. Leave empty for default.", "input_types": [ @@ -4538,12 +4782,12 @@ }, "api_key": { "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "OpenAI API Key", + "advanced": true, + "display_name": "API Key", "dynamic": false, "info": "Model Provider API key", "input_types": [], - "load_from_db": true, + "load_from_db": false, "name": "api_key", "override_skip": false, "password": true, @@ -4554,7 +4798,7 @@ "title_case": false, "track_in_telemetry": false, "type": "str", - "value": "OPENAI_API_KEY" + "value": "" }, "base_url_ibm_watsonx": { "_input_type": "DropdownInput", @@ -4624,7 +4868,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"fail_safe_mode\",\n display_name=\"Fail-Safe Mode\",\n value=False,\n advanced=True,\n info=\"When enabled, errors will be logged instead of raising exceptions. \"\n \"The component will return None on error.\",\n real_time_refresh=True,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n async def fetch_ollama_models(self) -> list[str]:\n try:\n return await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n except Exception: # noqa: BLE001\n\n logger.exception(\"Error fetching models\")\n return []\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize OpenAI embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n try:\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await self.fetch_ollama_models()\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize Ollama embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to authenticate with IBM watsonx.ai: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n msg = f\"Unknown provider: {provider}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n # Handle fail_safe_mode changes first - set all required fields to False if enabled\n if field_name == \"fail_safe_mode\":\n if field_value: # If fail_safe_mode is enabled\n build_config[\"api_key\"][\"required\"] = False\n elif hasattr(self, \"provider\"):\n # If fail_safe_mode is disabled, restore required flags based on provider\n if self.provider in [\"OpenAI\", \"IBM watsonx.ai\"]:\n build_config[\"api_key\"][\"required\"] = True\n else: # Ollama\n build_config[\"api_key\"][\"required\"] = False\n\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.unified_models import (\n get_api_key_for_provider,\n get_embedding_class,\n get_embedding_model_options,\n get_unified_models_detailed,\n update_model_options_in_build_config,\n)\nfrom lfx.base.models.watsonx_constants import IBM_WATSONX_URLS\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n ModelInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n \"\"\"Dynamically update build config with user-filtered model options.\"\"\"\n # Update model options\n build_config = update_model_options_in_build_config(\n component=self,\n build_config=build_config,\n cache_key_prefix=\"embedding_model_options\",\n get_options_func=get_embedding_model_options,\n field_name=field_name,\n field_value=field_value,\n )\n\n # Show/hide provider-specific fields based on selected model\n if field_name == \"model\" and isinstance(field_value, list) and len(field_value) > 0:\n selected_model = field_value[0]\n provider = selected_model.get(\"provider\", \"\")\n\n # Show/hide watsonx fields\n is_watsonx = provider == \"IBM WatsonX\"\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = is_watsonx\n build_config[\"project_id\"][\"show\"] = is_watsonx\n build_config[\"truncate_input_tokens\"][\"show\"] = is_watsonx\n build_config[\"input_text\"][\"show\"] = is_watsonx\n if is_watsonx:\n build_config[\"base_url_ibm_watsonx\"][\"required\"] = True\n build_config[\"project_id\"][\"required\"] = True\n\n return build_config\n\n inputs = [\n ModelInput(\n name=\"model\",\n display_name=\"Embedding Model\",\n info=\"Select your model provider\",\n real_time_refresh=True,\n required=True,\n model_type=\"embedding\",\n input_types=[\"Embeddings\"], # Override default to accept Embeddings instead of LanguageModel\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"Model Provider API key\",\n real_time_refresh=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n # Watson-specific inputs\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n advanced=True,\n value=1000,\n ),\n FloatInput(\n name=\"request_timeout\",\n display_name=\"Request Timeout\",\n advanced=True,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n advanced=True,\n value=3,\n ),\n BoolInput(\n name=\"show_progress_bar\",\n display_name=\"Show Progress Bar\",\n advanced=True,\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n \"\"\"Build and return an embeddings instance based on the selected model.\n\n Returns an EmbeddingsWithModels wrapper that contains:\n - The primary embedding instance (for the selected model)\n - available_models dict mapping all available model names to their instances\n \"\"\"\n # If an Embeddings object is directly connected, return it\n try:\n from langchain_core.embeddings import Embeddings as BaseEmbeddings\n\n if isinstance(self.model, BaseEmbeddings):\n return self.model\n except ImportError:\n pass\n\n # Safely extract model configuration\n if not self.model or not isinstance(self.model, list):\n msg = \"Model must be a non-empty list\"\n raise ValueError(msg)\n\n model = self.model[0]\n model_name = model.get(\"name\")\n provider = model.get(\"provider\")\n metadata = model.get(\"metadata\", {})\n\n # Get API key from user input or global variables\n api_key = get_api_key_for_provider(self.user_id, provider, self.api_key)\n\n # Validate required fields (Ollama doesn't require API key)\n if not api_key and provider != \"Ollama\":\n msg = (\n f\"{provider} API key is required. \"\n f\"Please provide it in the component or configure it globally as \"\n f\"{provider.upper().replace(' ', '_')}_API_KEY.\"\n )\n raise ValueError(msg)\n\n if not model_name:\n msg = \"Model name is required\"\n raise ValueError(msg)\n\n # Get embedding class\n embedding_class_name = metadata.get(\"embedding_class\")\n if not embedding_class_name:\n msg = f\"No embedding class defined in metadata for {model_name}\"\n raise ValueError(msg)\n\n embedding_class = get_embedding_class(embedding_class_name)\n\n # Build kwargs using parameter mapping for primary instance\n kwargs = self._build_kwargs(model, metadata)\n primary_instance = embedding_class(**kwargs)\n\n # Get all available embedding models for this provider\n available_models_dict = self._build_available_models(\n provider=provider,\n embedding_class=embedding_class,\n metadata=metadata,\n api_key=api_key,\n )\n\n # Wrap with EmbeddingsWithModels to provide available_models metadata\n return EmbeddingsWithModels(\n embeddings=primary_instance,\n available_models=available_models_dict,\n )\n\n def _build_available_models(\n self,\n provider: str,\n embedding_class: type,\n metadata: dict[str, Any],\n api_key: str | None,\n ) -> dict[str, Embeddings]:\n \"\"\"Build a dictionary of all available embedding model instances for the provider.\n\n Args:\n provider: The provider name (e.g., \"OpenAI\", \"Ollama\")\n embedding_class: The embedding class to instantiate\n metadata: Metadata containing param_mapping\n api_key: The API key for the provider\n\n Returns:\n Dict mapping model names to their embedding instances\n \"\"\"\n available_models_dict: dict[str, Embeddings] = {}\n\n # Get all embedding models for this provider from unified models\n all_embedding_models = get_unified_models_detailed(\n providers=[provider],\n model_type=\"embeddings\",\n include_deprecated=False,\n include_unsupported=False,\n )\n\n if not all_embedding_models:\n return available_models_dict\n\n # Extract models from the provider data\n for provider_data in all_embedding_models:\n if provider_data.get(\"provider\") != provider:\n continue\n\n for model_data in provider_data.get(\"models\", []):\n model_name = model_data.get(\"model_name\")\n if not model_name:\n continue\n\n # Create a model dict compatible with _build_kwargs\n model_dict = {\n \"name\": model_name,\n \"provider\": provider,\n \"metadata\": metadata, # Reuse the same metadata/param_mapping\n }\n\n try:\n # Build kwargs for this model\n model_kwargs = self._build_kwargs_for_model(model_dict, metadata, api_key)\n # Create the embedding instance\n available_models_dict[model_name] = embedding_class(**model_kwargs)\n except Exception: # noqa: BLE001\n # Skip models that fail to instantiate\n # This handles cases where specific models have incompatible parameters\n logger.debug(\"Failed to instantiate embedding model %s: skipping\", model_name, exc_info=True)\n continue\n\n return available_models_dict\n\n def _build_kwargs_for_model(\n self,\n model: dict[str, Any],\n metadata: dict[str, Any],\n api_key: str | None,\n ) -> dict[str, Any]:\n \"\"\"Build kwargs dictionary for a specific model using parameter mapping.\n\n This is similar to _build_kwargs but uses the provided api_key directly\n instead of looking it up again.\n\n Args:\n model: Model dict with name and provider\n metadata: Metadata containing param_mapping\n api_key: The API key to use\n\n Returns:\n kwargs dict for embedding class instantiation\n \"\"\"\n param_mapping = metadata.get(\"param_mapping\", {})\n if not param_mapping:\n msg = \"Parameter mapping not found in metadata\"\n raise ValueError(msg)\n\n kwargs = {}\n provider = model.get(\"provider\")\n\n # Required parameters - handle both \"model\" and \"model_id\" (for watsonx)\n if \"model\" in param_mapping:\n kwargs[param_mapping[\"model\"]] = model.get(\"name\")\n elif \"model_id\" in param_mapping:\n kwargs[param_mapping[\"model_id\"]] = model.get(\"name\")\n\n # Add API key if mapped\n if \"api_key\" in param_mapping and api_key:\n kwargs[param_mapping[\"api_key\"]] = api_key\n\n # Optional parameters with their values\n optional_params = {\n \"api_base\": self.api_base if self.api_base else None,\n \"dimensions\": int(self.dimensions) if self.dimensions else None,\n \"chunk_size\": int(self.chunk_size) if self.chunk_size else None,\n \"request_timeout\": float(self.request_timeout) if self.request_timeout else None,\n \"max_retries\": int(self.max_retries) if self.max_retries else None,\n \"show_progress_bar\": self.show_progress_bar if hasattr(self, \"show_progress_bar\") else None,\n \"model_kwargs\": self.model_kwargs if self.model_kwargs else None,\n }\n\n # Watson-specific parameters\n if provider in {\"IBM WatsonX\", \"IBM watsonx.ai\"}:\n # Map base_url_ibm_watsonx to \"url\" parameter for watsonx\n if \"url\" in param_mapping:\n url_value = (\n self.base_url_ibm_watsonx\n if hasattr(self, \"base_url_ibm_watsonx\") and self.base_url_ibm_watsonx\n else \"https://us-south.ml.cloud.ibm.com\"\n )\n kwargs[param_mapping[\"url\"]] = url_value\n # Map project_id for watsonx\n if hasattr(self, \"project_id\") and self.project_id and \"project_id\" in param_mapping:\n kwargs[param_mapping[\"project_id\"]] = self.project_id\n\n # Ollama-specific parameters\n if provider == \"Ollama\" and \"base_url\" in param_mapping:\n # Map api_base to \"base_url\" parameter for Ollama\n base_url_value = self.api_base if hasattr(self, \"api_base\") and self.api_base else \"http://localhost:11434\"\n kwargs[param_mapping[\"base_url\"]] = base_url_value\n\n # Add optional parameters if they have values and are mapped\n for param_name, param_value in optional_params.items():\n if param_value is not None and param_name in param_mapping:\n # Special handling for request_timeout with Google provider\n if param_name == \"request_timeout\":\n if provider == \"Google Generative AI\" and isinstance(param_value, (int, float)):\n kwargs[param_mapping[param_name]] = {\"timeout\": param_value}\n else:\n kwargs[param_mapping[param_name]] = param_value\n else:\n kwargs[param_mapping[param_name]] = param_value\n\n return kwargs\n\n def _build_kwargs(self, model: dict[str, Any], metadata: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Build kwargs dictionary using parameter mapping.\"\"\"\n param_mapping = metadata.get(\"param_mapping\", {})\n if not param_mapping:\n msg = \"Parameter mapping not found in metadata\"\n raise ValueError(msg)\n\n kwargs = {}\n\n # Required parameters - handle both \"model\" and \"model_id\" (for watsonx)\n if \"model\" in param_mapping:\n kwargs[param_mapping[\"model\"]] = model.get(\"name\")\n elif \"model_id\" in param_mapping:\n kwargs[param_mapping[\"model_id\"]] = model.get(\"name\")\n if \"api_key\" in param_mapping:\n kwargs[param_mapping[\"api_key\"]] = get_api_key_for_provider(\n self.user_id,\n model.get(\"provider\"),\n self.api_key,\n )\n\n # Optional parameters with their values\n provider = model.get(\"provider\")\n optional_params = {\n \"api_base\": self.api_base if self.api_base else None,\n \"dimensions\": int(self.dimensions) if self.dimensions else None,\n \"chunk_size\": int(self.chunk_size) if self.chunk_size else None,\n \"request_timeout\": float(self.request_timeout) if self.request_timeout else None,\n \"max_retries\": int(self.max_retries) if self.max_retries else None,\n \"show_progress_bar\": self.show_progress_bar if hasattr(self, \"show_progress_bar\") else None,\n \"model_kwargs\": self.model_kwargs if self.model_kwargs else None,\n }\n\n # Watson-specific parameters\n if provider in {\"IBM WatsonX\", \"IBM watsonx.ai\"}:\n # Map base_url_ibm_watsonx to \"url\" parameter for watsonx\n if \"url\" in param_mapping:\n url_value = (\n self.base_url_ibm_watsonx\n if hasattr(self, \"base_url_ibm_watsonx\") and self.base_url_ibm_watsonx\n else \"https://us-south.ml.cloud.ibm.com\"\n )\n kwargs[param_mapping[\"url\"]] = url_value\n # Map project_id for watsonx\n if hasattr(self, \"project_id\") and self.project_id and \"project_id\" in param_mapping:\n kwargs[param_mapping[\"project_id\"]] = self.project_id\n\n # Ollama-specific parameters\n if provider == \"Ollama\" and \"base_url\" in param_mapping:\n # Map api_base to \"base_url\" parameter for Ollama\n base_url_value = self.api_base if hasattr(self, \"api_base\") and self.api_base else \"http://localhost:11434\"\n kwargs[param_mapping[\"base_url\"]] = base_url_value\n\n # Add optional parameters if they have values and are mapped\n for param_name, param_value in optional_params.items():\n if param_value is not None and param_name in param_mapping:\n # Special handling for request_timeout with Google provider\n if param_name == \"request_timeout\":\n if provider == \"Google Generative AI\" and isinstance(param_value, (int, float)):\n kwargs[param_mapping[param_name]] = {\"timeout\": param_value}\n else:\n kwargs[param_mapping[param_name]] = param_value\n else:\n kwargs[param_mapping[param_name]] = param_value\n\n return kwargs\n" }, "dimensions": { "_input_type": "IntInput", @@ -4634,6 +4878,7 @@ "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", "list": false, "list_add_label": "Add More", + "load_from_db": false, "name": "dimensions", "override_skip": false, "placeholder": "", @@ -4646,28 +4891,6 @@ "type": "int", "value": "" }, - "fail_safe_mode": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Fail-Safe Mode", - "dynamic": false, - "info": "When enabled, errors will be logged instead of raising exceptions. The component will return None on error.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "fail_safe_mode", - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "bool", - "value": true - }, "input_text": { "_input_type": "BoolInput", "advanced": true, @@ -4688,7 +4911,7 @@ "type": "bool", "value": true }, - "is_refresh": false, + "is_refresh": true, "max_retries": { "_input_type": "IntInput", "advanced": true, @@ -4710,33 +4933,147 @@ "value": 3 }, "model": { - "_input_type": "DropdownInput", + "_input_type": "ModelInput", "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Name", + "display_name": "Embedding Model", "dynamic": false, - "external_options": {}, - "info": "Select the embedding model to use", - "load_from_db": false, + "external_options": { + "fields": { + "data": { + "node": { + "display_name": "Connect other models", + "icon": "CornerDownLeft", + "name": "connect_other_models" + } + } + } + }, + "info": "Select your model provider", + "input_types": [ + "Embeddings" + ], + "list": false, + "list_add_label": "Add More", + "model_type": "embedding", "name": "model", "options": [ - "text-embedding-3-small" + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-small", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-large", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-ada-002", + "provider": "OpenAI" + }, + { + "category": "Google Generative AI", + "icon": "GoogleGenerativeAI", + "metadata": { + "is_disabled_provider": true, + "variable_name": null + }, + "name": "__enable_provider_Google Generative AI__", + "provider": "Google Generative AI" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "is_disabled_provider": true, + "variable_name": null + }, + "name": "__enable_provider_IBM WatsonX__", + "provider": "IBM WatsonX" + } ], - "options_metadata": [], "override_skip": false, - "placeholder": "", + "placeholder": "Setup Provider", "real_time_refresh": true, "refresh_button": true, - "required": false, + "required": true, "show": true, "title_case": false, - "toggle": false, "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "text-embedding-3-small" + "trace_as_input": true, + "track_in_telemetry": false, + "type": "model", + "value": [ + { + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-large", + "provider": "OpenAI" + } + ] }, "model_kwargs": { "_input_type": "DictInput", @@ -4758,32 +5095,6 @@ "type": "dict", "value": {} }, - "ollama_base_url": { - "_input_type": "MessageTextInput", - "advanced": false, - "display_name": "Ollama API URL", - "dynamic": false, - "info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "ollama_base_url", - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "" - }, "project_id": { "_input_type": "MessageTextInput", "advanced": false, @@ -4795,7 +5106,7 @@ ], "list": false, "list_add_label": "Add More", - "load_from_db": true, + "load_from_db": false, "name": "project_id", "override_skip": false, "placeholder": "", @@ -4807,50 +5118,7 @@ "trace_as_metadata": true, "track_in_telemetry": false, "type": "str", - "value": "WATSONX_PROJECT_ID" - }, - "provider": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Provider", - "dynamic": false, - "external_options": {}, - "info": "Select the embedding model provider", - "load_from_db": false, - "name": "provider", - "options": [ - "OpenAI", - "Ollama", - "IBM watsonx.ai" - ], - "options_metadata": [ - { - "icon": "OpenAI" - }, - { - "icon": "Ollama" - }, - { - "icon": "WatsonxAI" - } - ], - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "selected_metadata": { - "icon": "WatsonxAI" - }, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "OpenAI" + "value": "" }, "request_timeout": { "_input_type": "FloatInput", @@ -4915,25 +5183,27 @@ }, "tool_mode": false }, - "showNode": false, + "showNode": true, "type": "EmbeddingModel" }, "dragging": false, - "id": "EmbeddingModel-EAo9i", + "id": "EmbeddingModel-E0hvR", "measured": { - "height": 48, - "width": 192 + "height": 207, + "width": 320 }, "position": { - "x": 1355.5927560779853, - "y": 1905.3991707633513 + "x": 1308.6253288695966, + "y": 2125.2916788143903 }, "selected": false, "type": "genericNode" }, { "data": { - "id": "EmbeddingModel-E0hvR", + "description": "Generate embeddings using a specified provider.", + "display_name": "Embedding Model", + "id": "EmbeddingModel-3LsIP", "node": { "base_classes": [ "Embeddings" @@ -4944,14 +5214,12 @@ "description": "Generate embeddings using a specified provider.", "display_name": "Embedding Model", "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": true, + "edited": false, "field_order": [ - "provider", - "api_base", - "ollama_base_url", - "base_url_ibm_watsonx", "model", "api_key", + "api_base", + "base_url_ibm_watsonx", "project_id", "dimensions", "chunk_size", @@ -4960,48 +5228,26 @@ "show_progress_bar", "model_kwargs", "truncate_input_tokens", - "input_text", - "fail_safe_mode" + "input_text" ], "frozen": false, "icon": "binary", - "last_updated": "2026-02-11T07:52:18.315Z", + "last_updated": "2026-02-27T18:40:44.054Z", "legacy": false, - "lf_version": "1.7.0.dev21", "metadata": { - "code_hash": "0e2d6fe67a26", + "code_hash": "c5ce0982da48", "dependencies": { "dependencies": [ - { - "name": "requests", - "version": "2.32.5" - }, - { - "name": "ibm_watsonx_ai", - "version": "1.4.2" - }, - { - "name": "langchain_openai", - "version": "0.3.23" - }, { "name": "lfx", - "version": "0.2.0.dev21" - }, - { - "name": "langchain_ollama", - "version": "0.3.10" - }, - { - "name": "langchain_community", - "version": "0.3.21" + "version": null }, { - "name": "langchain_ibm", - "version": "0.3.19" + "name": "langchain_core", + "version": "0.3.83" } ], - "total_dependencies": 7 + "total_dependencies": 2 }, "module": "custom_components.embedding_model" }, @@ -5033,7 +5279,7 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "4998653d-b7f9-4104-a939-1bacf3693ad3" + "value": "bbfbd352-e931-4894-afe8-6552a3f0cc2c" }, "_type": "Component", "api_base": { @@ -5052,7 +5298,7 @@ "override_skip": false, "placeholder": "", "required": false, - "show": false, + "show": true, "title_case": false, "tool_mode": false, "trace_as_input": true, @@ -5063,8 +5309,8 @@ }, "api_key": { "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "API Key (Optional)", + "advanced": true, + "display_name": "API Key", "dynamic": false, "info": "Model Provider API key", "input_types": [], @@ -5075,7 +5321,7 @@ "placeholder": "", "real_time_refresh": true, "required": false, - "show": false, + "show": true, "title_case": false, "track_in_telemetry": false, "type": "str", @@ -5149,7 +5395,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"fail_safe_mode\",\n display_name=\"Fail-Safe Mode\",\n value=False,\n advanced=True,\n info=\"When enabled, errors will be logged instead of raising exceptions. \"\n \"The component will return None on error.\",\n real_time_refresh=True,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n async def fetch_ollama_models(self) -> list[str]:\n try:\n return await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n except Exception: # noqa: BLE001\n\n logger.exception(\"Error fetching models\")\n return []\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize OpenAI embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n try:\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await self.fetch_ollama_models()\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize Ollama embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to authenticate with IBM watsonx.ai: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n msg = f\"Unknown provider: {provider}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n # Handle fail_safe_mode changes first - set all required fields to False if enabled\n if field_name == \"fail_safe_mode\":\n if field_value: # If fail_safe_mode is enabled\n build_config[\"api_key\"][\"required\"] = False\n elif hasattr(self, \"provider\"):\n # If fail_safe_mode is disabled, restore required flags based on provider\n if self.provider in [\"OpenAI\", \"IBM watsonx.ai\"]:\n build_config[\"api_key\"][\"required\"] = True\n else: # Ollama\n build_config[\"api_key\"][\"required\"] = False\n\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.unified_models import (\n get_api_key_for_provider,\n get_embedding_class,\n get_embedding_model_options,\n get_unified_models_detailed,\n update_model_options_in_build_config,\n)\nfrom lfx.base.models.watsonx_constants import IBM_WATSONX_URLS\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n ModelInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n \"\"\"Dynamically update build config with user-filtered model options.\"\"\"\n # Update model options\n build_config = update_model_options_in_build_config(\n component=self,\n build_config=build_config,\n cache_key_prefix=\"embedding_model_options\",\n get_options_func=get_embedding_model_options,\n field_name=field_name,\n field_value=field_value,\n )\n\n # Show/hide provider-specific fields based on selected model\n if field_name == \"model\" and isinstance(field_value, list) and len(field_value) > 0:\n selected_model = field_value[0]\n provider = selected_model.get(\"provider\", \"\")\n\n # Show/hide watsonx fields\n is_watsonx = provider == \"IBM WatsonX\"\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = is_watsonx\n build_config[\"project_id\"][\"show\"] = is_watsonx\n build_config[\"truncate_input_tokens\"][\"show\"] = is_watsonx\n build_config[\"input_text\"][\"show\"] = is_watsonx\n if is_watsonx:\n build_config[\"base_url_ibm_watsonx\"][\"required\"] = True\n build_config[\"project_id\"][\"required\"] = True\n\n return build_config\n\n inputs = [\n ModelInput(\n name=\"model\",\n display_name=\"Embedding Model\",\n info=\"Select your model provider\",\n real_time_refresh=True,\n required=True,\n model_type=\"embedding\",\n input_types=[\"Embeddings\"], # Override default to accept Embeddings instead of LanguageModel\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"Model Provider API key\",\n real_time_refresh=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n # Watson-specific inputs\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n advanced=True,\n value=1000,\n ),\n FloatInput(\n name=\"request_timeout\",\n display_name=\"Request Timeout\",\n advanced=True,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n advanced=True,\n value=3,\n ),\n BoolInput(\n name=\"show_progress_bar\",\n display_name=\"Show Progress Bar\",\n advanced=True,\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n \"\"\"Build and return an embeddings instance based on the selected model.\n\n Returns an EmbeddingsWithModels wrapper that contains:\n - The primary embedding instance (for the selected model)\n - available_models dict mapping all available model names to their instances\n \"\"\"\n # If an Embeddings object is directly connected, return it\n try:\n from langchain_core.embeddings import Embeddings as BaseEmbeddings\n\n if isinstance(self.model, BaseEmbeddings):\n return self.model\n except ImportError:\n pass\n\n # Safely extract model configuration\n if not self.model or not isinstance(self.model, list):\n msg = \"Model must be a non-empty list\"\n raise ValueError(msg)\n\n model = self.model[0]\n model_name = model.get(\"name\")\n provider = model.get(\"provider\")\n metadata = model.get(\"metadata\", {})\n\n # Get API key from user input or global variables\n api_key = get_api_key_for_provider(self.user_id, provider, self.api_key)\n\n # Validate required fields (Ollama doesn't require API key)\n if not api_key and provider != \"Ollama\":\n msg = (\n f\"{provider} API key is required. \"\n f\"Please provide it in the component or configure it globally as \"\n f\"{provider.upper().replace(' ', '_')}_API_KEY.\"\n )\n raise ValueError(msg)\n\n if not model_name:\n msg = \"Model name is required\"\n raise ValueError(msg)\n\n # Get embedding class\n embedding_class_name = metadata.get(\"embedding_class\")\n if not embedding_class_name:\n msg = f\"No embedding class defined in metadata for {model_name}\"\n raise ValueError(msg)\n\n embedding_class = get_embedding_class(embedding_class_name)\n\n # Build kwargs using parameter mapping for primary instance\n kwargs = self._build_kwargs(model, metadata)\n primary_instance = embedding_class(**kwargs)\n\n # Get all available embedding models for this provider\n available_models_dict = self._build_available_models(\n provider=provider,\n embedding_class=embedding_class,\n metadata=metadata,\n api_key=api_key,\n )\n\n # Wrap with EmbeddingsWithModels to provide available_models metadata\n return EmbeddingsWithModels(\n embeddings=primary_instance,\n available_models=available_models_dict,\n )\n\n def _build_available_models(\n self,\n provider: str,\n embedding_class: type,\n metadata: dict[str, Any],\n api_key: str | None,\n ) -> dict[str, Embeddings]:\n \"\"\"Build a dictionary of all available embedding model instances for the provider.\n\n Args:\n provider: The provider name (e.g., \"OpenAI\", \"Ollama\")\n embedding_class: The embedding class to instantiate\n metadata: Metadata containing param_mapping\n api_key: The API key for the provider\n\n Returns:\n Dict mapping model names to their embedding instances\n \"\"\"\n available_models_dict: dict[str, Embeddings] = {}\n\n # Get all embedding models for this provider from unified models\n all_embedding_models = get_unified_models_detailed(\n providers=[provider],\n model_type=\"embeddings\",\n include_deprecated=False,\n include_unsupported=False,\n )\n\n if not all_embedding_models:\n return available_models_dict\n\n # Extract models from the provider data\n for provider_data in all_embedding_models:\n if provider_data.get(\"provider\") != provider:\n continue\n\n for model_data in provider_data.get(\"models\", []):\n model_name = model_data.get(\"model_name\")\n if not model_name:\n continue\n\n # Create a model dict compatible with _build_kwargs\n model_dict = {\n \"name\": model_name,\n \"provider\": provider,\n \"metadata\": metadata, # Reuse the same metadata/param_mapping\n }\n\n try:\n # Build kwargs for this model\n model_kwargs = self._build_kwargs_for_model(model_dict, metadata, api_key)\n # Create the embedding instance\n available_models_dict[model_name] = embedding_class(**model_kwargs)\n except Exception: # noqa: BLE001\n # Skip models that fail to instantiate\n # This handles cases where specific models have incompatible parameters\n logger.debug(\"Failed to instantiate embedding model %s: skipping\", model_name, exc_info=True)\n continue\n\n return available_models_dict\n\n def _build_kwargs_for_model(\n self,\n model: dict[str, Any],\n metadata: dict[str, Any],\n api_key: str | None,\n ) -> dict[str, Any]:\n \"\"\"Build kwargs dictionary for a specific model using parameter mapping.\n\n This is similar to _build_kwargs but uses the provided api_key directly\n instead of looking it up again.\n\n Args:\n model: Model dict with name and provider\n metadata: Metadata containing param_mapping\n api_key: The API key to use\n\n Returns:\n kwargs dict for embedding class instantiation\n \"\"\"\n param_mapping = metadata.get(\"param_mapping\", {})\n if not param_mapping:\n msg = \"Parameter mapping not found in metadata\"\n raise ValueError(msg)\n\n kwargs = {}\n provider = model.get(\"provider\")\n\n # Required parameters - handle both \"model\" and \"model_id\" (for watsonx)\n if \"model\" in param_mapping:\n kwargs[param_mapping[\"model\"]] = model.get(\"name\")\n elif \"model_id\" in param_mapping:\n kwargs[param_mapping[\"model_id\"]] = model.get(\"name\")\n\n # Add API key if mapped\n if \"api_key\" in param_mapping and api_key:\n kwargs[param_mapping[\"api_key\"]] = api_key\n\n # Optional parameters with their values\n optional_params = {\n \"api_base\": self.api_base if self.api_base else None,\n \"dimensions\": int(self.dimensions) if self.dimensions else None,\n \"chunk_size\": int(self.chunk_size) if self.chunk_size else None,\n \"request_timeout\": float(self.request_timeout) if self.request_timeout else None,\n \"max_retries\": int(self.max_retries) if self.max_retries else None,\n \"show_progress_bar\": self.show_progress_bar if hasattr(self, \"show_progress_bar\") else None,\n \"model_kwargs\": self.model_kwargs if self.model_kwargs else None,\n }\n\n # Watson-specific parameters\n if provider in {\"IBM WatsonX\", \"IBM watsonx.ai\"}:\n # Map base_url_ibm_watsonx to \"url\" parameter for watsonx\n if \"url\" in param_mapping:\n url_value = (\n self.base_url_ibm_watsonx\n if hasattr(self, \"base_url_ibm_watsonx\") and self.base_url_ibm_watsonx\n else \"https://us-south.ml.cloud.ibm.com\"\n )\n kwargs[param_mapping[\"url\"]] = url_value\n # Map project_id for watsonx\n if hasattr(self, \"project_id\") and self.project_id and \"project_id\" in param_mapping:\n kwargs[param_mapping[\"project_id\"]] = self.project_id\n\n # Ollama-specific parameters\n if provider == \"Ollama\" and \"base_url\" in param_mapping:\n # Map api_base to \"base_url\" parameter for Ollama\n base_url_value = self.api_base if hasattr(self, \"api_base\") and self.api_base else \"http://localhost:11434\"\n kwargs[param_mapping[\"base_url\"]] = base_url_value\n\n # Add optional parameters if they have values and are mapped\n for param_name, param_value in optional_params.items():\n if param_value is not None and param_name in param_mapping:\n # Special handling for request_timeout with Google provider\n if param_name == \"request_timeout\":\n if provider == \"Google Generative AI\" and isinstance(param_value, (int, float)):\n kwargs[param_mapping[param_name]] = {\"timeout\": param_value}\n else:\n kwargs[param_mapping[param_name]] = param_value\n else:\n kwargs[param_mapping[param_name]] = param_value\n\n return kwargs\n\n def _build_kwargs(self, model: dict[str, Any], metadata: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Build kwargs dictionary using parameter mapping.\"\"\"\n param_mapping = metadata.get(\"param_mapping\", {})\n if not param_mapping:\n msg = \"Parameter mapping not found in metadata\"\n raise ValueError(msg)\n\n kwargs = {}\n\n # Required parameters - handle both \"model\" and \"model_id\" (for watsonx)\n if \"model\" in param_mapping:\n kwargs[param_mapping[\"model\"]] = model.get(\"name\")\n elif \"model_id\" in param_mapping:\n kwargs[param_mapping[\"model_id\"]] = model.get(\"name\")\n if \"api_key\" in param_mapping:\n kwargs[param_mapping[\"api_key\"]] = get_api_key_for_provider(\n self.user_id,\n model.get(\"provider\"),\n self.api_key,\n )\n\n # Optional parameters with their values\n provider = model.get(\"provider\")\n optional_params = {\n \"api_base\": self.api_base if self.api_base else None,\n \"dimensions\": int(self.dimensions) if self.dimensions else None,\n \"chunk_size\": int(self.chunk_size) if self.chunk_size else None,\n \"request_timeout\": float(self.request_timeout) if self.request_timeout else None,\n \"max_retries\": int(self.max_retries) if self.max_retries else None,\n \"show_progress_bar\": self.show_progress_bar if hasattr(self, \"show_progress_bar\") else None,\n \"model_kwargs\": self.model_kwargs if self.model_kwargs else None,\n }\n\n # Watson-specific parameters\n if provider in {\"IBM WatsonX\", \"IBM watsonx.ai\"}:\n # Map base_url_ibm_watsonx to \"url\" parameter for watsonx\n if \"url\" in param_mapping:\n url_value = (\n self.base_url_ibm_watsonx\n if hasattr(self, \"base_url_ibm_watsonx\") and self.base_url_ibm_watsonx\n else \"https://us-south.ml.cloud.ibm.com\"\n )\n kwargs[param_mapping[\"url\"]] = url_value\n # Map project_id for watsonx\n if hasattr(self, \"project_id\") and self.project_id and \"project_id\" in param_mapping:\n kwargs[param_mapping[\"project_id\"]] = self.project_id\n\n # Ollama-specific parameters\n if provider == \"Ollama\" and \"base_url\" in param_mapping:\n # Map api_base to \"base_url\" parameter for Ollama\n base_url_value = self.api_base if hasattr(self, \"api_base\") and self.api_base else \"http://localhost:11434\"\n kwargs[param_mapping[\"base_url\"]] = base_url_value\n\n # Add optional parameters if they have values and are mapped\n for param_name, param_value in optional_params.items():\n if param_value is not None and param_name in param_mapping:\n # Special handling for request_timeout with Google provider\n if param_name == \"request_timeout\":\n if provider == \"Google Generative AI\" and isinstance(param_value, (int, float)):\n kwargs[param_mapping[param_name]] = {\"timeout\": param_value}\n else:\n kwargs[param_mapping[param_name]] = param_value\n else:\n kwargs[param_mapping[param_name]] = param_value\n\n return kwargs\n" }, "dimensions": { "_input_type": "IntInput", @@ -5159,6 +5405,7 @@ "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", "list": false, "list_add_label": "Add More", + "load_from_db": false, "name": "dimensions", "override_skip": false, "placeholder": "", @@ -5171,28 +5418,6 @@ "type": "int", "value": "" }, - "fail_safe_mode": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Fail-Safe Mode", - "dynamic": false, - "info": "When enabled, errors will be logged instead of raising exceptions. The component will return None on error.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "fail_safe_mode", - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "bool", - "value": true - }, "input_text": { "_input_type": "BoolInput", "advanced": true, @@ -5213,7 +5438,7 @@ "type": "bool", "value": true }, - "is_refresh": false, + "is_refresh": true, "max_retries": { "_input_type": "IntInput", "advanced": true, @@ -5235,34 +5460,147 @@ "value": 3 }, "model": { - "_input_type": "DropdownInput", + "_input_type": "ModelInput", "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Name", + "display_name": "Embedding Model", "dynamic": false, - "external_options": {}, - "info": "Select the embedding model to use", - "load_from_db": false, + "external_options": { + "fields": { + "data": { + "node": { + "display_name": "Connect other models", + "icon": "CornerDownLeft", + "name": "connect_other_models" + } + } + } + }, + "info": "Select your model provider", + "input_types": [ + "Embeddings" + ], + "list": false, + "list_add_label": "Add More", + "model_type": "embedding", "name": "model", "options": [ - "all-minilm:latest", - "nomic-embed-text:latest" + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-small", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-large", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-ada-002", + "provider": "OpenAI" + }, + { + "category": "Google Generative AI", + "icon": "GoogleGenerativeAI", + "metadata": { + "is_disabled_provider": true, + "variable_name": null + }, + "name": "__enable_provider_Google Generative AI__", + "provider": "Google Generative AI" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "is_disabled_provider": true, + "variable_name": null + }, + "name": "__enable_provider_IBM WatsonX__", + "provider": "IBM WatsonX" + } ], - "options_metadata": [], "override_skip": false, - "placeholder": "", + "placeholder": "Setup Provider", "real_time_refresh": true, "refresh_button": true, - "required": false, + "required": true, "show": true, "title_case": false, - "toggle": false, "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "all-minilm:latest" + "trace_as_input": true, + "track_in_telemetry": false, + "type": "model", + "value": [ + { + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-large", + "provider": "OpenAI" + } + ] }, "model_kwargs": { "_input_type": "DictInput", @@ -5284,32 +5622,6 @@ "type": "dict", "value": {} }, - "ollama_base_url": { - "_input_type": "MessageTextInput", - "advanced": false, - "display_name": "Ollama API URL", - "dynamic": false, - "info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": true, - "name": "ollama_base_url", - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "OLLAMA_BASE_URL" - }, "project_id": { "_input_type": "MessageTextInput", "advanced": false, @@ -5335,49 +5647,6 @@ "type": "str", "value": "" }, - "provider": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Provider", - "dynamic": false, - "external_options": {}, - "info": "Select the embedding model provider", - "load_from_db": false, - "name": "provider", - "options": [ - "OpenAI", - "Ollama", - "IBM watsonx.ai" - ], - "options_metadata": [ - { - "icon": "OpenAI" - }, - { - "icon": "Ollama" - }, - { - "icon": "WatsonxAI" - } - ], - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "selected_metadata": { - "icon": "Ollama" - }, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "Ollama" - }, "request_timeout": { "_input_type": "FloatInput", "advanced": true, @@ -5421,115 +5690,229 @@ "truncate_input_tokens": { "_input_type": "IntInput", "advanced": true, - "display_name": "Truncate Input Tokens", + "display_name": "Truncate Input Tokens", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "truncate_input_tokens", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 200 + } + }, + "tool_mode": false + }, + "showNode": true, + "type": "EmbeddingModel" + }, + "dragging": false, + "id": "EmbeddingModel-3LsIP", + "measured": { + "height": 207, + "width": 320 + }, + "position": { + "x": 1307.8810223474497, + "y": 2360.3146379131413 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "id": "TextInput-CTKlr", + "node": { + "base_classes": [ + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Get user text inputs.", + "display_name": "Text Input", + "documentation": "https://docs.langflow.org/text-input-and-output", + "edited": false, + "field_order": [ + "input_value", + "use_global_variable" + ], + "frozen": false, + "icon": "type", + "last_updated": "2026-02-27T18:37:07.463Z", + "legacy": false, + "metadata": { + "code_hash": "518f16485886", + "dependencies": { + "dependencies": [ + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 1 + }, + "module": "lfx.components.input_output.text.TextInputComponent" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Output Text", + "group_outputs": false, + "loop_types": null, + "method": "text_response", + "name": "text", + "options": null, + "required_inputs": null, + "selected": "Message", + "tool_mode": true, + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "5488df7c-b93f-4f87-a446-b67028bc0813" + }, + "_frontend_node_folder_id": { + "value": "2bef1fdd-4d60-4bb6-8fd2-c0a3eae09d1e" + }, + "_type": "Component", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Any\n\nfrom lfx.base.io.text import TextComponent\nfrom lfx.io import BoolInput, MultilineInput, Output\nfrom lfx.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get user text inputs.\"\n documentation: str = \"https://docs.langflow.org/text-input-and-output\"\n icon = \"type\"\n name = \"TextInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n BoolInput(\n name=\"use_global_variable\",\n display_name=\"Use Global Variable\",\n info=\"Enable to select from global variables (shows globe icon). Disables multiline editing.\",\n value=False,\n advanced=True,\n real_time_refresh=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\n ]\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name == \"use_global_variable\":\n if field_value:\n # Enable global variable mode: single-line with password masking and globe dropdown\n build_config[\"input_value\"][\"multiline\"] = False\n build_config[\"input_value\"][\"password\"] = True\n else:\n # Default mode: multiline text editing\n build_config[\"input_value\"][\"multiline\"] = True\n build_config[\"input_value\"][\"password\"] = False\n return build_config\n\n def text_response(self) -> Message:\n return Message(\n text=self.input_value,\n )\n" + }, + "input_value": { + "_input_type": "MultilineInput", + "advanced": false, + "ai_enabled": false, + "copy_field": false, + "display_name": "Text", + "dynamic": false, + "info": "Text to be passed as input.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": true, + "multiline": false, + "name": "input_value", + "override_skip": false, + "password": true, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "ALLOWED_GROUPS" + }, + "is_refresh": false, + "use_global_variable": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Use Global Variable", "dynamic": false, - "info": "", + "info": "Enable to select from global variables (shows globe icon). Disables multiline editing.", "list": false, "list_add_label": "Add More", - "name": "truncate_input_tokens", + "name": "use_global_variable", "override_skip": false, "placeholder": "", + "real_time_refresh": true, "required": false, - "show": false, + "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, "track_in_telemetry": true, - "type": "int", - "value": 200 + "type": "bool", + "value": true } }, "tool_mode": false }, "showNode": false, - "type": "EmbeddingModel" + "type": "TextInput" }, "dragging": false, - "id": "EmbeddingModel-E0hvR", + "id": "TextInput-CTKlr", "measured": { - "height": 48, + "height": 52, "width": 192 }, "position": { - "x": 1361.1152821119326, - "y": 1983.3161387480468 + "x": 1028.9084907232098, + "y": 1062.849997200131 }, "selected": false, "type": "genericNode" }, { "data": { - "id": "EmbeddingModel-3LsIP", + "id": "TextInput-hlgVv", "node": { "base_classes": [ - "Embeddings" + "Message" ], "beta": false, "conditional_paths": [], "custom_fields": {}, - "description": "Generate embeddings using a specified provider.", - "display_name": "Embedding Model", - "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": true, + "description": "Get user text inputs.", + "display_name": "Text Input", + "documentation": "https://docs.langflow.org/text-input-and-output", + "edited": false, "field_order": [ - "provider", - "api_base", - "ollama_base_url", - "base_url_ibm_watsonx", - "model", - "api_key", - "project_id", - "dimensions", - "chunk_size", - "request_timeout", - "max_retries", - "show_progress_bar", - "model_kwargs", - "truncate_input_tokens", - "input_text", - "fail_safe_mode" + "input_value", + "use_global_variable" ], "frozen": false, - "icon": "binary", - "last_updated": "2026-02-11T07:52:18.316Z", + "icon": "type", + "last_updated": "2026-02-27T18:37:07.463Z", "legacy": false, - "lf_version": "1.7.0.dev21", "metadata": { - "code_hash": "0e2d6fe67a26", + "code_hash": "518f16485886", "dependencies": { "dependencies": [ - { - "name": "requests", - "version": "2.32.5" - }, - { - "name": "ibm_watsonx_ai", - "version": "1.4.2" - }, - { - "name": "langchain_openai", - "version": "0.3.23" - }, { "name": "lfx", - "version": "0.2.0.dev21" - }, - { - "name": "langchain_ollama", - "version": "0.3.10" - }, - { - "name": "langchain_community", - "version": "0.3.21" - }, - { - "name": "langchain_ibm", - "version": "0.3.19" + "version": null } ], - "total_dependencies": 7 + "total_dependencies": 1 }, - "module": "custom_components.embedding_model" + "module": "lfx.components.input_output.text.TextInputComponent" }, "minimized": false, "output_types": [], @@ -5537,18 +5920,17 @@ { "allows_loop": false, "cache": true, - "display_name": "Embedding Model", + "display_name": "Output Text", "group_outputs": false, - "hidden": null, "loop_types": null, - "method": "build_embeddings", - "name": "embeddings", + "method": "text_response", + "name": "text", "options": null, "required_inputs": null, - "selected": "Embeddings", + "selected": "Message", "tool_mode": true, "types": [ - "Embeddings" + "Message" ], "value": "__UNDEFINED__" } @@ -5559,106 +5941,9 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "4998653d-b7f9-4104-a939-1bacf3693ad3" + "value": "2bef1fdd-4d60-4bb6-8fd2-c0a3eae09d1e" }, "_type": "Component", - "api_base": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "OpenAI API Base URL", - "dynamic": false, - "info": "Base URL for the API. Leave empty for default.", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "api_base", - "override_skip": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "" - }, - "api_key": { - "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "OpenAI API Key", - "dynamic": false, - "info": "Model Provider API key", - "input_types": [], - "load_from_db": true, - "name": "api_key", - "override_skip": false, - "password": true, - "placeholder": "", - "real_time_refresh": true, - "required": true, - "show": true, - "title_case": false, - "track_in_telemetry": false, - "type": "str", - "value": "OPENAI_API_KEY" - }, - "base_url_ibm_watsonx": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "watsonx API Endpoint", - "dynamic": false, - "external_options": {}, - "info": "The base URL of the API (IBM watsonx.ai only)", - "name": "base_url_ibm_watsonx", - "options": [ - "https://us-south.ml.cloud.ibm.com", - "https://eu-de.ml.cloud.ibm.com", - "https://eu-gb.ml.cloud.ibm.com", - "https://au-syd.ml.cloud.ibm.com", - "https://jp-tok.ml.cloud.ibm.com", - "https://ca-tor.ml.cloud.ibm.com" - ], - "options_metadata": [], - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": false, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "https://us-south.ml.cloud.ibm.com" - }, - "chunk_size": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "Chunk Size", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "name": "chunk_size", - "override_skip": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "int", - "value": 1000 - }, "code": { "advanced": true, "dynamic": true, @@ -5675,316 +5960,387 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"fail_safe_mode\",\n display_name=\"Fail-Safe Mode\",\n value=False,\n advanced=True,\n info=\"When enabled, errors will be logged instead of raising exceptions. \"\n \"The component will return None on error.\",\n real_time_refresh=True,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n async def fetch_ollama_models(self) -> list[str]:\n try:\n return await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n except Exception: # noqa: BLE001\n\n logger.exception(\"Error fetching models\")\n return []\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize OpenAI embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n try:\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await self.fetch_ollama_models()\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize Ollama embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to authenticate with IBM watsonx.ai: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n msg = f\"Unknown provider: {provider}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n # Handle fail_safe_mode changes first - set all required fields to False if enabled\n if field_name == \"fail_safe_mode\":\n if field_value: # If fail_safe_mode is enabled\n build_config[\"api_key\"][\"required\"] = False\n elif hasattr(self, \"provider\"):\n # If fail_safe_mode is disabled, restore required flags based on provider\n if self.provider in [\"OpenAI\", \"IBM watsonx.ai\"]:\n build_config[\"api_key\"][\"required\"] = True\n else: # Ollama\n build_config[\"api_key\"][\"required\"] = False\n\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" - }, - "dimensions": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "Dimensions", - "dynamic": false, - "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", - "list": false, - "list_add_label": "Add More", - "name": "dimensions", - "override_skip": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "int", - "value": "" - }, - "fail_safe_mode": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Fail-Safe Mode", - "dynamic": false, - "info": "When enabled, errors will be logged instead of raising exceptions. The component will return None on error.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "fail_safe_mode", - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "bool", - "value": false - }, - "input_text": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Include the original text in the output", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "name": "input_text", - "override_skip": false, - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "bool", - "value": true - }, - "is_refresh": false, - "max_retries": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "Max Retries", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "name": "max_retries", - "override_skip": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "int", - "value": 3 + "value": "from typing import Any\n\nfrom lfx.base.io.text import TextComponent\nfrom lfx.io import BoolInput, MultilineInput, Output\nfrom lfx.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get user text inputs.\"\n documentation: str = \"https://docs.langflow.org/text-input-and-output\"\n icon = \"type\"\n name = \"TextInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n BoolInput(\n name=\"use_global_variable\",\n display_name=\"Use Global Variable\",\n info=\"Enable to select from global variables (shows globe icon). Disables multiline editing.\",\n value=False,\n advanced=True,\n real_time_refresh=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\n ]\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name == \"use_global_variable\":\n if field_value:\n # Enable global variable mode: single-line with password masking and globe dropdown\n build_config[\"input_value\"][\"multiline\"] = False\n build_config[\"input_value\"][\"password\"] = True\n else:\n # Default mode: multiline text editing\n build_config[\"input_value\"][\"multiline\"] = True\n build_config[\"input_value\"][\"password\"] = False\n return build_config\n\n def text_response(self) -> Message:\n return Message(\n text=self.input_value,\n )\n" }, - "model": { - "_input_type": "DropdownInput", + "input_value": { + "_input_type": "MultilineInput", "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Name", + "ai_enabled": false, + "copy_field": false, + "display_name": "Text", "dynamic": false, - "external_options": {}, - "info": "Select the embedding model to use", - "name": "model", - "options": [ - "text-embedding-3-small", - "text-embedding-3-large", - "text-embedding-ada-002" + "info": "Text to be passed as input.", + "input_types": [ + "Message" ], - "options_metadata": [], + "list": false, + "list_add_label": "Add More", + "load_from_db": true, + "multiline": false, + "name": "input_value", "override_skip": false, + "password": true, "placeholder": "", - "real_time_refresh": true, - "refresh_button": true, "required": false, "show": true, "title_case": false, - "toggle": false, "tool_mode": false, + "trace_as_input": true, "trace_as_metadata": true, - "track_in_telemetry": true, + "track_in_telemetry": false, "type": "str", - "value": "text-embedding-3-small" + "value": "ALLOWED_USERS" }, - "model_kwargs": { - "_input_type": "DictInput", + "is_refresh": false, + "use_global_variable": { + "_input_type": "BoolInput", "advanced": true, - "display_name": "Model Kwargs", + "display_name": "Use Global Variable", "dynamic": false, - "info": "Additional keyword arguments to pass to the model.", + "info": "Enable to select from global variables (shows globe icon). Disables multiline editing.", "list": false, "list_add_label": "Add More", - "name": "model_kwargs", + "name": "use_global_variable", "override_skip": false, "placeholder": "", + "real_time_refresh": true, "required": false, "show": true, "title_case": false, "tool_mode": false, - "trace_as_input": true, - "track_in_telemetry": false, - "type": "dict", - "value": {} + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + } + }, + "tool_mode": false + }, + "showNode": false, + "type": "TextInput" + }, + "dragging": false, + "id": "TextInput-hlgVv", + "measured": { + "height": 52, + "width": 192 + }, + "position": { + "x": 1032.6873315933058, + "y": 1145.9844963422515 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "id": "TextInput-OGCeZ", + "node": { + "base_classes": [ + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Get user text inputs.", + "display_name": "Text Input", + "documentation": "https://docs.langflow.org/text-input-and-output", + "edited": false, + "field_order": [ + "input_value", + "use_global_variable" + ], + "frozen": false, + "icon": "type", + "last_updated": "2026-02-27T18:37:07.463Z", + "legacy": false, + "metadata": { + "code_hash": "518f16485886", + "dependencies": { + "dependencies": [ + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 1 }, - "ollama_base_url": { - "_input_type": "MessageTextInput", - "advanced": false, - "display_name": "Ollama API URL", - "dynamic": false, - "info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434", - "input_types": [ + "module": "lfx.components.input_output.text.TextInputComponent" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Output Text", + "group_outputs": false, + "loop_types": null, + "method": "text_response", + "name": "text", + "options": null, + "required_inputs": null, + "selected": "Message", + "tool_mode": true, + "types": [ "Message" ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "5488df7c-b93f-4f87-a446-b67028bc0813" + }, + "_frontend_node_folder_id": { + "value": "2bef1fdd-4d60-4bb6-8fd2-c0a3eae09d1e" + }, + "_type": "Component", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", "list": false, - "list_add_label": "Add More", "load_from_db": false, - "name": "ollama_base_url", - "override_skip": false, + "multiline": true, + "name": "code", + "password": false, "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": false, + "required": true, + "show": true, "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "" + "type": "code", + "value": "from typing import Any\n\nfrom lfx.base.io.text import TextComponent\nfrom lfx.io import BoolInput, MultilineInput, Output\nfrom lfx.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get user text inputs.\"\n documentation: str = \"https://docs.langflow.org/text-input-and-output\"\n icon = \"type\"\n name = \"TextInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n BoolInput(\n name=\"use_global_variable\",\n display_name=\"Use Global Variable\",\n info=\"Enable to select from global variables (shows globe icon). Disables multiline editing.\",\n value=False,\n advanced=True,\n real_time_refresh=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\n ]\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name == \"use_global_variable\":\n if field_value:\n # Enable global variable mode: single-line with password masking and globe dropdown\n build_config[\"input_value\"][\"multiline\"] = False\n build_config[\"input_value\"][\"password\"] = True\n else:\n # Default mode: multiline text editing\n build_config[\"input_value\"][\"multiline\"] = True\n build_config[\"input_value\"][\"password\"] = False\n return build_config\n\n def text_response(self) -> Message:\n return Message(\n text=self.input_value,\n )\n" }, - "project_id": { - "_input_type": "MessageTextInput", + "input_value": { + "_input_type": "MultilineInput", "advanced": false, - "display_name": "Project ID", + "ai_enabled": false, + "copy_field": false, + "display_name": "Text", "dynamic": false, - "info": "IBM watsonx.ai Project ID (required for IBM watsonx.ai)", + "info": "Text to be passed as input.", "input_types": [ "Message" ], "list": false, "list_add_label": "Add More", - "load_from_db": false, - "name": "project_id", + "load_from_db": true, + "multiline": false, + "name": "input_value", "override_skip": false, + "password": true, "placeholder": "", "required": false, - "show": false, + "show": true, "title_case": false, "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, "track_in_telemetry": false, "type": "str", - "value": "" + "value": "CONNECTOR_TYPE" }, - "provider": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Provider", + "is_refresh": false, + "use_global_variable": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Use Global Variable", "dynamic": false, - "external_options": {}, - "info": "Select the embedding model provider", - "name": "provider", - "options": [ - "OpenAI", - "Ollama", - "IBM watsonx.ai" - ], - "options_metadata": [ - { - "icon": "OpenAI" - }, - { - "icon": "Ollama" - }, - { - "icon": "WatsonxAI" - } - ], + "info": "Enable to select from global variables (shows globe icon). Disables multiline editing.", + "list": false, + "list_add_label": "Add More", + "name": "use_global_variable", "override_skip": false, "placeholder": "", "real_time_refresh": true, "required": false, - "selected_metadata": { - "icon": "OpenAI" - }, "show": true, "title_case": false, - "toggle": false, "tool_mode": false, "trace_as_metadata": true, "track_in_telemetry": true, - "type": "str", - "value": "OpenAI" + "type": "bool", + "value": true + } + }, + "tool_mode": false + }, + "showNode": false, + "type": "TextInput" + }, + "dragging": false, + "id": "TextInput-OGCeZ", + "measured": { + "height": 52, + "width": 192 + }, + "position": { + "x": 1034.5767520283541, + "y": 1227.2295750493238 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "id": "TextInput-PI6at", + "node": { + "base_classes": [ + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Get user text inputs.", + "display_name": "Text Input", + "documentation": "https://docs.langflow.org/text-input-and-output", + "edited": false, + "field_order": [ + "input_value", + "use_global_variable" + ], + "frozen": false, + "icon": "type", + "last_updated": "2026-02-27T18:37:07.463Z", + "legacy": false, + "metadata": { + "code_hash": "518f16485886", + "dependencies": { + "dependencies": [ + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 1 }, - "request_timeout": { - "_input_type": "FloatInput", + "module": "lfx.components.input_output.text.TextInputComponent" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Output Text", + "group_outputs": false, + "loop_types": null, + "method": "text_response", + "name": "text", + "options": null, + "required_inputs": null, + "selected": "Message", + "tool_mode": true, + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "5488df7c-b93f-4f87-a446-b67028bc0813" + }, + "_frontend_node_folder_id": { + "value": "2bef1fdd-4d60-4bb6-8fd2-c0a3eae09d1e" + }, + "_type": "Component", + "code": { "advanced": true, - "display_name": "Request Timeout", - "dynamic": false, + "dynamic": true, + "fileTypes": [], + "file_path": "", "info": "", "list": false, - "list_add_label": "Add More", - "name": "request_timeout", - "override_skip": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, "placeholder": "", - "required": false, + "required": true, "show": true, "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "float", - "value": "" + "type": "code", + "value": "from typing import Any\n\nfrom lfx.base.io.text import TextComponent\nfrom lfx.io import BoolInput, MultilineInput, Output\nfrom lfx.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get user text inputs.\"\n documentation: str = \"https://docs.langflow.org/text-input-and-output\"\n icon = \"type\"\n name = \"TextInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n BoolInput(\n name=\"use_global_variable\",\n display_name=\"Use Global Variable\",\n info=\"Enable to select from global variables (shows globe icon). Disables multiline editing.\",\n value=False,\n advanced=True,\n real_time_refresh=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\n ]\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name == \"use_global_variable\":\n if field_value:\n # Enable global variable mode: single-line with password masking and globe dropdown\n build_config[\"input_value\"][\"multiline\"] = False\n build_config[\"input_value\"][\"password\"] = True\n else:\n # Default mode: multiline text editing\n build_config[\"input_value\"][\"multiline\"] = True\n build_config[\"input_value\"][\"password\"] = False\n return build_config\n\n def text_response(self) -> Message:\n return Message(\n text=self.input_value,\n )\n" }, - "show_progress_bar": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Show Progress Bar", + "input_value": { + "_input_type": "MultilineInput", + "advanced": false, + "ai_enabled": false, + "copy_field": false, + "display_name": "Text", "dynamic": false, - "info": "", + "info": "Text to be passed as input.", + "input_types": [ + "Message" + ], "list": false, "list_add_label": "Add More", - "name": "show_progress_bar", + "load_from_db": true, + "multiline": false, + "name": "input_value", "override_skip": false, + "password": true, "placeholder": "", "required": false, "show": true, "title_case": false, "tool_mode": false, + "trace_as_input": true, "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "bool", - "value": false + "track_in_telemetry": false, + "type": "str", + "value": "DOCUMENT_ID" }, - "truncate_input_tokens": { - "_input_type": "IntInput", + "is_refresh": false, + "use_global_variable": { + "_input_type": "BoolInput", "advanced": true, - "display_name": "Truncate Input Tokens", + "display_name": "Use Global Variable", "dynamic": false, - "info": "", + "info": "Enable to select from global variables (shows globe icon). Disables multiline editing.", "list": false, "list_add_label": "Add More", - "name": "truncate_input_tokens", + "name": "use_global_variable", "override_skip": false, "placeholder": "", + "real_time_refresh": true, "required": false, - "show": false, + "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, "track_in_telemetry": true, - "type": "int", - "value": 200 + "type": "bool", + "value": true } }, "tool_mode": false }, "showNode": false, - "type": "EmbeddingModel" + "type": "TextInput" }, "dragging": false, - "id": "EmbeddingModel-3LsIP", + "id": "TextInput-PI6at", "measured": { - "height": 48, + "height": 52, "width": 192 }, "position": { - "x": 1360.2396840279503, - "y": 2067.099324740414 + "x": 1034.5767520283543, + "y": 1310.364074191445 }, "selected": false, "type": "genericNode" }, { "data": { - "id": "SecretInput-b1QE5", + "id": "TextInput-gRPNR", "node": { "base_classes": [ "Message" @@ -5992,18 +6348,31 @@ "beta": false, "conditional_paths": [], "custom_fields": {}, - "description": "Allows the selection of a secret to be generated as output..", - "display_name": "Secret Input", - "documentation": "https://docs.langflow.org/components-io#text-input", - "edited": true, + "description": "Get user text inputs.", + "display_name": "Text Input", + "documentation": "https://docs.langflow.org/text-input-and-output", + "edited": false, "field_order": [ - "input_value" + "input_value", + "use_global_variable" ], "frozen": false, "icon": "type", + "last_updated": "2026-02-27T18:37:07.463Z", "legacy": false, - "lf_version": "1.7.0.dev21", - "metadata": {}, + "metadata": { + "code_hash": "518f16485886", + "dependencies": { + "dependencies": [ + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 1 + }, + "module": "lfx.components.input_output.text.TextInputComponent" + }, "minimized": false, "output_types": [], "outputs": [ @@ -6012,7 +6381,7 @@ "cache": true, "display_name": "Output Text", "group_outputs": false, - "hidden": null, + "loop_types": null, "method": "text_response", "name": "text", "options": null, @@ -6027,6 +6396,12 @@ ], "pinned": false, "template": { + "_frontend_node_flow_id": { + "value": "5488df7c-b93f-4f87-a446-b67028bc0813" + }, + "_frontend_node_folder_id": { + "value": "2bef1fdd-4d60-4bb6-8fd2-c0a3eae09d1e" + }, "_type": "Component", "code": { "advanced": true, @@ -6044,47 +6419,81 @@ "show": true, "title_case": false, "type": "code", - "value": "from langflow.base.io.text import TextComponent\r\nfrom langflow.io import MultilineInput, Output, SecretStrInput\r\nfrom langflow.schema.message import Message\r\n\r\n\r\nclass SecretInputComponent(TextComponent):\r\n display_name = \"Secret Input\"\r\n description = \"Allows the selection of a secret to be generated as output..\"\r\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\r\n icon = \"type\"\r\n name = \"SecretInput\"\r\n\r\n inputs = [\r\n SecretStrInput(\r\n name=\"input_value\",\r\n display_name=\"Secret\",\r\n info=\"Secret to be passed as input.\",\r\n ),\r\n ]\r\n outputs = [\r\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\r\n ]\r\n\r\n def text_response(self) -> Message:\r\n return Message(\r\n text=self.input_value,\r\n )\r\n" + "value": "from typing import Any\n\nfrom lfx.base.io.text import TextComponent\nfrom lfx.io import BoolInput, MultilineInput, Output\nfrom lfx.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get user text inputs.\"\n documentation: str = \"https://docs.langflow.org/text-input-and-output\"\n icon = \"type\"\n name = \"TextInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n BoolInput(\n name=\"use_global_variable\",\n display_name=\"Use Global Variable\",\n info=\"Enable to select from global variables (shows globe icon). Disables multiline editing.\",\n value=False,\n advanced=True,\n real_time_refresh=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\n ]\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name == \"use_global_variable\":\n if field_value:\n # Enable global variable mode: single-line with password masking and globe dropdown\n build_config[\"input_value\"][\"multiline\"] = False\n build_config[\"input_value\"][\"password\"] = True\n else:\n # Default mode: multiline text editing\n build_config[\"input_value\"][\"multiline\"] = True\n build_config[\"input_value\"][\"password\"] = False\n return build_config\n\n def text_response(self) -> Message:\n return Message(\n text=self.input_value,\n )\n" }, "input_value": { - "_input_type": "SecretStrInput", + "_input_type": "MultilineInput", "advanced": false, - "display_name": "Secret", + "ai_enabled": false, + "copy_field": false, + "display_name": "Text", "dynamic": false, - "info": "Secret to be passed as input.", - "input_types": [], + "info": "Text to be passed as input.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", "load_from_db": true, + "multiline": false, "name": "input_value", + "override_skip": false, "password": true, "placeholder": "", "required": false, "show": true, "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", - "value": "DOCUMENT_ID" + "value": "OWNER" + }, + "is_refresh": false, + "use_global_variable": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Use Global Variable", + "dynamic": false, + "info": "Enable to select from global variables (shows globe icon). Disables multiline editing.", + "list": false, + "list_add_label": "Add More", + "name": "use_global_variable", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true } }, "tool_mode": false }, "showNode": false, - "type": "SecretInput" + "type": "TextInput" }, "dragging": false, - "id": "SecretInput-b1QE5", + "id": "TextInput-gRPNR", "measured": { - "height": 48, + "height": 52, "width": 192 }, "position": { - "x": 1087.2923241423418, - "y": 1396.132629931692 + "x": 1034.5767520283541, + "y": 1393.498573333566 }, "selected": false, "type": "genericNode" }, { "data": { - "id": "SecretInput-KxF3a", + "id": "TextInput-lTHSx", "node": { "base_classes": [ "Message" @@ -6092,18 +6501,31 @@ "beta": false, "conditional_paths": [], "custom_fields": {}, - "description": "Allows the selection of a secret to be generated as output..", - "display_name": "Secret Input", - "documentation": "https://docs.langflow.org/components-io#text-input", - "edited": true, + "description": "Get user text inputs.", + "display_name": "Text Input", + "documentation": "https://docs.langflow.org/text-input-and-output", + "edited": false, "field_order": [ - "input_value" + "input_value", + "use_global_variable" ], "frozen": false, "icon": "type", + "last_updated": "2026-02-27T18:37:07.463Z", "legacy": false, - "lf_version": "1.7.0.dev21", - "metadata": {}, + "metadata": { + "code_hash": "518f16485886", + "dependencies": { + "dependencies": [ + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 1 + }, + "module": "lfx.components.input_output.text.TextInputComponent" + }, "minimized": false, "output_types": [], "outputs": [ @@ -6112,7 +6534,7 @@ "cache": true, "display_name": "Output Text", "group_outputs": false, - "hidden": null, + "loop_types": null, "method": "text_response", "name": "text", "options": null, @@ -6127,6 +6549,12 @@ ], "pinned": false, "template": { + "_frontend_node_flow_id": { + "value": "5488df7c-b93f-4f87-a446-b67028bc0813" + }, + "_frontend_node_folder_id": { + "value": "2bef1fdd-4d60-4bb6-8fd2-c0a3eae09d1e" + }, "_type": "Component", "code": { "advanced": true, @@ -6144,47 +6572,81 @@ "show": true, "title_case": false, "type": "code", - "value": "from langflow.base.io.text import TextComponent\r\nfrom langflow.io import MultilineInput, Output, SecretStrInput\r\nfrom langflow.schema.message import Message\r\n\r\n\r\nclass SecretInputComponent(TextComponent):\r\n display_name = \"Secret Input\"\r\n description = \"Allows the selection of a secret to be generated as output..\"\r\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\r\n icon = \"type\"\r\n name = \"SecretInput\"\r\n\r\n inputs = [\r\n SecretStrInput(\r\n name=\"input_value\",\r\n display_name=\"Secret\",\r\n info=\"Secret to be passed as input.\",\r\n ),\r\n ]\r\n outputs = [\r\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\r\n ]\r\n\r\n def text_response(self) -> Message:\r\n return Message(\r\n text=self.input_value,\r\n )\r\n" + "value": "from typing import Any\n\nfrom lfx.base.io.text import TextComponent\nfrom lfx.io import BoolInput, MultilineInput, Output\nfrom lfx.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get user text inputs.\"\n documentation: str = \"https://docs.langflow.org/text-input-and-output\"\n icon = \"type\"\n name = \"TextInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n BoolInput(\n name=\"use_global_variable\",\n display_name=\"Use Global Variable\",\n info=\"Enable to select from global variables (shows globe icon). Disables multiline editing.\",\n value=False,\n advanced=True,\n real_time_refresh=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\n ]\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name == \"use_global_variable\":\n if field_value:\n # Enable global variable mode: single-line with password masking and globe dropdown\n build_config[\"input_value\"][\"multiline\"] = False\n build_config[\"input_value\"][\"password\"] = True\n else:\n # Default mode: multiline text editing\n build_config[\"input_value\"][\"multiline\"] = True\n build_config[\"input_value\"][\"password\"] = False\n return build_config\n\n def text_response(self) -> Message:\n return Message(\n text=self.input_value,\n )\n" }, "input_value": { - "_input_type": "SecretStrInput", + "_input_type": "MultilineInput", "advanced": false, - "display_name": "Secret", + "ai_enabled": false, + "copy_field": false, + "display_name": "Text", "dynamic": false, - "info": "Secret to be passed as input.", - "input_types": [], + "info": "Text to be passed as input.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", "load_from_db": true, + "multiline": false, "name": "input_value", + "override_skip": false, "password": true, "placeholder": "", "required": false, "show": true, "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", - "value": "SOURCE_URL" + "value": "OWNER_EMAIL" + }, + "is_refresh": false, + "use_global_variable": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Use Global Variable", + "dynamic": false, + "info": "Enable to select from global variables (shows globe icon). Disables multiline editing.", + "list": false, + "list_add_label": "Add More", + "name": "use_global_variable", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true } }, "tool_mode": false }, "showNode": false, - "type": "SecretInput" + "type": "TextInput" }, "dragging": false, - "id": "SecretInput-KxF3a", + "id": "TextInput-lTHSx", "measured": { - "height": 48, + "height": 52, "width": 192 }, "position": { - "x": 1079.2937650249683, - "y": 1622.3523150690187 + "x": 1032.6873315933058, + "y": 1476.633072475687 }, "selected": false, "type": "genericNode" }, { "data": { - "id": "SecretInput-utc4X", + "id": "TextInput-68n9L", "node": { "base_classes": [ "Message" @@ -6192,18 +6654,31 @@ "beta": false, "conditional_paths": [], "custom_fields": {}, - "description": "Allows the selection of a secret to be generated as output..", - "display_name": "Secret Input", - "documentation": "https://docs.langflow.org/components-io#text-input", - "edited": true, + "description": "Get user text inputs.", + "display_name": "Text Input", + "documentation": "https://docs.langflow.org/text-input-and-output", + "edited": false, "field_order": [ - "input_value" + "input_value", + "use_global_variable" ], "frozen": false, "icon": "type", + "last_updated": "2026-02-27T18:37:07.463Z", "legacy": false, - "lf_version": "1.7.0.dev21", - "metadata": {}, + "metadata": { + "code_hash": "518f16485886", + "dependencies": { + "dependencies": [ + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 1 + }, + "module": "lfx.components.input_output.text.TextInputComponent" + }, "minimized": false, "output_types": [], "outputs": [ @@ -6212,7 +6687,7 @@ "cache": true, "display_name": "Output Text", "group_outputs": false, - "hidden": null, + "loop_types": null, "method": "text_response", "name": "text", "options": null, @@ -6227,6 +6702,12 @@ ], "pinned": false, "template": { + "_frontend_node_flow_id": { + "value": "5488df7c-b93f-4f87-a446-b67028bc0813" + }, + "_frontend_node_folder_id": { + "value": "2bef1fdd-4d60-4bb6-8fd2-c0a3eae09d1e" + }, "_type": "Component", "code": { "advanced": true, @@ -6244,47 +6725,81 @@ "show": true, "title_case": false, "type": "code", - "value": "from langflow.base.io.text import TextComponent\r\nfrom langflow.io import MultilineInput, Output, SecretStrInput\r\nfrom langflow.schema.message import Message\r\n\r\n\r\nclass SecretInputComponent(TextComponent):\r\n display_name = \"Secret Input\"\r\n description = \"Allows the selection of a secret to be generated as output..\"\r\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\r\n icon = \"type\"\r\n name = \"SecretInput\"\r\n\r\n inputs = [\r\n SecretStrInput(\r\n name=\"input_value\",\r\n display_name=\"Secret\",\r\n info=\"Secret to be passed as input.\",\r\n ),\r\n ]\r\n outputs = [\r\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\r\n ]\r\n\r\n def text_response(self) -> Message:\r\n return Message(\r\n text=self.input_value,\r\n )\r\n" + "value": "from typing import Any\n\nfrom lfx.base.io.text import TextComponent\nfrom lfx.io import BoolInput, MultilineInput, Output\nfrom lfx.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get user text inputs.\"\n documentation: str = \"https://docs.langflow.org/text-input-and-output\"\n icon = \"type\"\n name = \"TextInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n BoolInput(\n name=\"use_global_variable\",\n display_name=\"Use Global Variable\",\n info=\"Enable to select from global variables (shows globe icon). Disables multiline editing.\",\n value=False,\n advanced=True,\n real_time_refresh=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\n ]\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name == \"use_global_variable\":\n if field_value:\n # Enable global variable mode: single-line with password masking and globe dropdown\n build_config[\"input_value\"][\"multiline\"] = False\n build_config[\"input_value\"][\"password\"] = True\n else:\n # Default mode: multiline text editing\n build_config[\"input_value\"][\"multiline\"] = True\n build_config[\"input_value\"][\"password\"] = False\n return build_config\n\n def text_response(self) -> Message:\n return Message(\n text=self.input_value,\n )\n" }, "input_value": { - "_input_type": "SecretStrInput", + "_input_type": "MultilineInput", "advanced": false, - "display_name": "Secret", + "ai_enabled": false, + "copy_field": false, + "display_name": "Text", "dynamic": false, - "info": "Secret to be passed as input.", - "input_types": [], + "info": "Text to be passed as input.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", "load_from_db": true, + "multiline": false, "name": "input_value", + "override_skip": false, "password": true, "placeholder": "", "required": false, "show": true, "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", - "value": "ALLOWED_USERS" + "value": "OWNER_NAME" + }, + "is_refresh": false, + "use_global_variable": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Use Global Variable", + "dynamic": false, + "info": "Enable to select from global variables (shows globe icon). Disables multiline editing.", + "list": false, + "list_add_label": "Add More", + "name": "use_global_variable", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true } }, "tool_mode": false }, "showNode": false, - "type": "SecretInput" + "type": "TextInput" }, "dragging": false, - "id": "SecretInput-utc4X", + "id": "TextInput-68n9L", "measured": { - "height": 48, + "height": 52, "width": 192 }, "position": { - "x": 1088.4004572420636, - "y": 1262.6017782863833 + "x": 1032.687331593306, + "y": 1552.2098898776146 }, "selected": false, "type": "genericNode" }, { "data": { - "id": "SecretInput-SS8Hy", + "id": "TextInput-UZQ8v", "node": { "base_classes": [ "Message" @@ -6292,18 +6807,31 @@ "beta": false, "conditional_paths": [], "custom_fields": {}, - "description": "Allows the selection of a secret to be generated as output..", - "display_name": "Secret Input", - "documentation": "https://docs.langflow.org/components-io#text-input", - "edited": true, + "description": "Get user text inputs.", + "display_name": "Text Input", + "documentation": "https://docs.langflow.org/text-input-and-output", + "edited": false, "field_order": [ - "input_value" + "input_value", + "use_global_variable" ], "frozen": false, "icon": "type", + "last_updated": "2026-02-27T18:37:07.463Z", "legacy": false, - "lf_version": "1.7.0.dev21", - "metadata": {}, + "metadata": { + "code_hash": "518f16485886", + "dependencies": { + "dependencies": [ + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 1 + }, + "module": "lfx.components.input_output.text.TextInputComponent" + }, "minimized": false, "output_types": [], "outputs": [ @@ -6312,7 +6840,7 @@ "cache": true, "display_name": "Output Text", "group_outputs": false, - "hidden": null, + "loop_types": null, "method": "text_response", "name": "text", "options": null, @@ -6327,6 +6855,12 @@ ], "pinned": false, "template": { + "_frontend_node_flow_id": { + "value": "5488df7c-b93f-4f87-a446-b67028bc0813" + }, + "_frontend_node_folder_id": { + "value": "2bef1fdd-4d60-4bb6-8fd2-c0a3eae09d1e" + }, "_type": "Component", "code": { "advanced": true, @@ -6344,57 +6878,91 @@ "show": true, "title_case": false, "type": "code", - "value": "from langflow.base.io.text import TextComponent\r\nfrom langflow.io import MultilineInput, Output, SecretStrInput\r\nfrom langflow.schema.message import Message\r\n\r\n\r\nclass SecretInputComponent(TextComponent):\r\n display_name = \"Secret Input\"\r\n description = \"Allows the selection of a secret to be generated as output..\"\r\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\r\n icon = \"type\"\r\n name = \"SecretInput\"\r\n\r\n inputs = [\r\n SecretStrInput(\r\n name=\"input_value\",\r\n display_name=\"Secret\",\r\n info=\"Secret to be passed as input.\",\r\n ),\r\n ]\r\n outputs = [\r\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\r\n ]\r\n\r\n def text_response(self) -> Message:\r\n return Message(\r\n text=self.input_value,\r\n )\r\n" + "value": "from typing import Any\n\nfrom lfx.base.io.text import TextComponent\nfrom lfx.io import BoolInput, MultilineInput, Output\nfrom lfx.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get user text inputs.\"\n documentation: str = \"https://docs.langflow.org/text-input-and-output\"\n icon = \"type\"\n name = \"TextInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n BoolInput(\n name=\"use_global_variable\",\n display_name=\"Use Global Variable\",\n info=\"Enable to select from global variables (shows globe icon). Disables multiline editing.\",\n value=False,\n advanced=True,\n real_time_refresh=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\n ]\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name == \"use_global_variable\":\n if field_value:\n # Enable global variable mode: single-line with password masking and globe dropdown\n build_config[\"input_value\"][\"multiline\"] = False\n build_config[\"input_value\"][\"password\"] = True\n else:\n # Default mode: multiline text editing\n build_config[\"input_value\"][\"multiline\"] = True\n build_config[\"input_value\"][\"password\"] = False\n return build_config\n\n def text_response(self) -> Message:\n return Message(\n text=self.input_value,\n )\n" }, "input_value": { - "_input_type": "SecretStrInput", + "_input_type": "MultilineInput", "advanced": false, - "display_name": "Secret", + "ai_enabled": false, + "copy_field": false, + "display_name": "Text", "dynamic": false, - "info": "Secret to be passed as input.", - "input_types": [], + "info": "Text to be passed as input.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", "load_from_db": true, + "multiline": false, "name": "input_value", + "override_skip": false, "password": true, "placeholder": "", "required": false, "show": true, "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", - "value": "ALLOWED_GROUPS" + "value": "SOURCE_URL" + }, + "is_refresh": false, + "use_global_variable": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Use Global Variable", + "dynamic": false, + "info": "Enable to select from global variables (shows globe icon). Disables multiline editing.", + "list": false, + "list_add_label": "Add More", + "name": "use_global_variable", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true } }, "tool_mode": false }, "showNode": false, - "type": "SecretInput" + "type": "TextInput" }, "dragging": false, - "id": "SecretInput-SS8Hy", + "id": "TextInput-UZQ8v", "measured": { - "height": 48, + "height": 52, "width": 192 }, "position": { - "x": 1086.8701684205084, - "y": 1329.4313687635786 + "x": 1034.5767520283541, + "y": 1639.123229889832 }, "selected": false, "type": "genericNode" } ], "viewport": { - "x": -947.6721564006871, - "y": -1169.8013406823577, - "zoom": 0.8841935394256344 + "x": 202.3992870671026, + "y": -396.95628496879374, + "zoom": 0.50751717990745 } }, "description": "Load your data for chat context with Retrieval Augmented Generation.", "endpoint_name": null, - "locked": true, "id": "5488df7c-b93f-4f87-a446-b67028bc0813", "is_component": false, - "last_tested_version": "1.7.0.dev21", + "last_tested_version": "1.8.0", + "locked": true, "name": "OpenSearch Ingestion Flow", "tags": [ "openai", @@ -6402,4 +6970,4 @@ "rag", "q-a" ] -} +} \ No newline at end of file diff --git a/flows/openrag_agent.json b/flows/openrag_agent.json index b2f41b1b6..320d6789b 100644 --- a/flows/openrag_agent.json +++ b/flows/openrag_agent.json @@ -171,34 +171,6 @@ "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE", "targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvEœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}" }, - { - "animated": false, - "className": "", - "data": { - "sourceHandle": { - "dataType": "TextInput", - "id": "TextInput-aHsQb", - "name": "text", - "output_types": [ - "Message" - ] - }, - "targetHandle": { - "fieldName": "filter_expression", - "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE", - "inputTypes": [ - "Message" - ], - "type": "str" - } - }, - "id": "xy-edge__TextInput-aHsQb{œdataTypeœ:œTextInputœ,œidœ:œTextInput-aHsQbœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE{œfieldNameœ:œfilter_expressionœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvEœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", - "selected": false, - "source": "TextInput-aHsQb", - "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-aHsQbœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", - "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE", - "targetHandle": "{œfieldNameœ:œfilter_expressionœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvEœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" - }, { "animated": false, "className": "", @@ -255,13 +227,41 @@ "target": "Agent-Nfw7u", "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œAgent-Nfw7uœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" }, + { + "animated": false, + "className": "", + "data": { + "sourceHandle": { + "dataType": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE", + "name": "component_as_tool", + "output_types": [ + "Tool" + ] + }, + "targetHandle": { + "fieldName": "tools", + "id": "Agent-Nfw7u", + "inputTypes": [ + "Tool" + ], + "type": "other" + } + }, + "id": "xy-edge__OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE{œdataTypeœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvEœ,œnameœ:œcomponent_as_toolœ,œoutput_typesœ:[œToolœ]}-Agent-Nfw7u{œfieldNameœ:œtoolsœ,œidœ:œAgent-Nfw7uœ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}", + "selected": false, + "source": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE", + "sourceHandle": "{œdataTypeœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvEœ,œnameœ:œcomponent_as_toolœ,œoutput_typesœ:[œToolœ]}", + "target": "Agent-Nfw7u", + "targetHandle": "{œfieldNameœ:œtoolsœ,œidœ:œAgent-Nfw7uœ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}" + }, { "animated": false, "className": "", "data": { "sourceHandle": { "dataType": "TextInput", - "id": "TextInput-aHsQb", + "id": "TextInput-i0a3P", "name": "text", "output_types": [ "Message" @@ -276,10 +276,10 @@ "type": "str" } }, - "id": "xy-edge__TextInput-aHsQb{œdataTypeœ:œTextInputœ,œidœ:œTextInput-aHsQbœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-Prompt Template-7kZsI{œfieldNameœ:œfilterœ,œidœ:œPrompt Template-7kZsIœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "id": "xy-edge__TextInput-i0a3P{œdataTypeœ:œTextInputœ,œidœ:œTextInput-i0a3Pœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-Prompt Template-7kZsI{œfieldNameœ:œfilterœ,œidœ:œPrompt Template-7kZsIœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", "selected": false, - "source": "TextInput-aHsQb", - "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-aHsQbœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "source": "TextInput-i0a3P", + "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-i0a3Pœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", "target": "Prompt Template-7kZsI", "targetHandle": "{œfieldNameœ:œfilterœ,œidœ:œPrompt Template-7kZsIœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" }, @@ -288,131 +288,31 @@ "className": "", "data": { "sourceHandle": { - "dataType": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding", - "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE", - "name": "component_as_tool", + "dataType": "TextInput", + "id": "TextInput-i0a3P", + "name": "text", "output_types": [ - "Tool" + "Message" ] }, "targetHandle": { - "fieldName": "tools", - "id": "Agent-Nfw7u", + "fieldName": "filter_expression", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE", "inputTypes": [ - "Tool" + "Message" ], - "type": "other" + "type": "str" } }, - "id": "xy-edge__OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE{œdataTypeœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvEœ,œnameœ:œcomponent_as_toolœ,œoutput_typesœ:[œToolœ]}-Agent-Nfw7u{œfieldNameœ:œtoolsœ,œidœ:œAgent-Nfw7uœ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}", + "id": "xy-edge__TextInput-i0a3P{œdataTypeœ:œTextInputœ,œidœ:œTextInput-i0a3Pœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE{œfieldNameœ:œfilter_expressionœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvEœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", "selected": false, - "source": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE", - "sourceHandle": "{œdataTypeœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvEœ,œnameœ:œcomponent_as_toolœ,œoutput_typesœ:[œToolœ]}", - "target": "Agent-Nfw7u", - "targetHandle": "{œfieldNameœ:œtoolsœ,œidœ:œAgent-Nfw7uœ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}" + "source": "TextInput-i0a3P", + "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-i0a3Pœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE", + "targetHandle": "{œfieldNameœ:œfilter_expressionœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvEœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" } ], "nodes": [ - { - "data": { - "id": "TextInput-aHsQb", - "node": { - "base_classes": [ - "Message" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Get user text inputs.", - "display_name": "Text Input", - "documentation": "https://docs.langflow.org/components-io#text-input", - "edited": true, - "field_order": [ - "input_value" - ], - "frozen": false, - "icon": "type", - "legacy": false, - "lf_version": "1.7.0.dev21", - "metadata": {}, - "minimized": false, - "output_types": [], - "outputs": [ - { - "allows_loop": false, - "cache": true, - "display_name": "Output Text", - "group_outputs": false, - "hidden": null, - "method": "text_response", - "name": "text", - "options": null, - "required_inputs": null, - "selected": "Message", - "tool_mode": true, - "types": [ - "Message" - ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "code", - "value": "from langflow.base.io.text import TextComponent\nfrom langflow.io import SecretStrInput, Output\nfrom langflow.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get user text inputs.\"\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\n icon = \"type\"\n name = \"TextInput\"\n\n inputs = [\n SecretStrInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n ]\n outputs = [\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\n ]\n\n def text_response(self) -> Message:\n return Message(\n text=self.input_value,\n )\n" - }, - "input_value": { - "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "Text", - "dynamic": false, - "info": "Text to be passed as input.", - "input_types": [], - "load_from_db": true, - "name": "input_value", - "password": true, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "OPENRAG-QUERY-FILTER" - } - }, - "tool_mode": false - }, - "showNode": true, - "type": "TextInput" - }, - "dragging": false, - "id": "TextInput-aHsQb", - "measured": { - "height": 204, - "width": 320 - }, - "position": { - "x": 499.6078970988059, - "y": 2370.091341967585 - }, - "selected": false, - "type": "genericNode" - }, { "data": { "id": "MCP-7EY21", @@ -437,7 +337,7 @@ "frozen": false, "icon": "Mcp", "key": "mcp_lf-starter_project", - "last_updated": "2025-12-02T21:33:13.267Z", + "last_updated": "2026-03-06T16:27:48.910Z", "legacy": false, "mcpServerName": "lf-starter_project", "metadata": { @@ -489,7 +389,7 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "69a7745e-dfb8-40a7-b5cb-5da3af0b10b6" + "value": "c2b04832-8cbf-473b-afd0-014e4e86b84b" }, "_type": "Component", "code": { @@ -653,7 +553,7 @@ "dragging": false, "id": "MCP-7EY21", "measured": { - "height": 284, + "height": 287, "width": 320 }, "position": { @@ -979,7 +879,7 @@ "dragging": false, "id": "ChatInput-ci8VE", "measured": { - "height": 48, + "height": 52, "width": 192 }, "position": { @@ -1019,7 +919,7 @@ "icon": "MessagesSquare", "legacy": false, "metadata": { - "code_hash": "cae45e2d53f6", + "code_hash": "8c87e536cca4", "dependencies": { "dependencies": [ { @@ -1028,7 +928,7 @@ }, { "name": "fastapi", - "version": "0.120.0" + "version": "0.133.1" }, { "name": "lfx", @@ -1099,7 +999,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/chat-input-and-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n advanced=True,\n info=\"Whether to clean data before converting to string.\",\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, _, display_name, source_id = self.get_properties_from_source_component()\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message) and not self.is_connected_to_chat_input():\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id or self.graph.session_id or \"\"\n message.context_id = self.context_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n\n # Store message if needed\n if message.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n clean_data: bool = getattr(self, \"clean_data\", False)\n return \"\\n\".join([safe_convert(item, clean_data=clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" + "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/chat-input-and-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n advanced=True,\n info=\"Whether to clean data before converting to string.\",\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, _, display_name, source_id = self.get_properties_from_source_component()\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message) and not self.is_connected_to_chat_input():\n message = self.input_value\n # Update message properties\n message.text = text\n # Preserve existing session_id from the incoming message if it exists\n existing_session_id = message.session_id\n else:\n message = Message(text=text)\n existing_session_id = None\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n # Preserve session_id from incoming message, or use component/graph session_id\n message.session_id = (\n self.session_id or existing_session_id or (self.graph.session_id if hasattr(self, \"graph\") else None) or \"\"\n )\n message.context_id = self.context_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n\n # Store message if needed\n if message.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n clean_data: bool = getattr(self, \"clean_data\", False)\n return \"\\n\".join([safe_convert(item, clean_data=clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" }, "context_id": { "_input_type": "MessageTextInput", @@ -1281,7 +1181,7 @@ "dragging": false, "id": "ChatOutput-gWl8E", "measured": { - "height": 48, + "height": 52, "width": 192 }, "position": { @@ -1293,6 +1193,8 @@ }, { "data": { + "description": "Define the agent's instructions, then enter a task to complete using tools.", + "display_name": "Agent", "id": "Agent-Nfw7u", "node": { "base_classes": [ @@ -1306,23 +1208,14 @@ "documentation": "https://docs.langflow.org/agents", "edited": false, "field_order": [ - "agent_llm", + "model", "api_key", - "base_url", + "base_url_ibm_watsonx", "project_id", - "max_output_tokens", - "max_tokens", - "model_kwargs", - "model_name", - "openai_api_base", - "api_key", - "temperature", - "seed", - "max_retries", - "timeout", "system_prompt", "context_id", "n_messages", + "max_tokens", "format_instructions", "output_schema", "tools", @@ -1335,23 +1228,23 @@ ], "frozen": false, "icon": "bot", - "last_updated": "2025-12-02T21:33:13.268Z", + "last_updated": "2026-03-06T16:29:04.384Z", "legacy": false, "metadata": { - "code_hash": "d64b11c24a1c", + "code_hash": "108da32d83f1", "dependencies": { "dependencies": [ - { - "name": "langchain_core", - "version": "0.3.79" - }, { "name": "pydantic", - "version": "2.10.6" + "version": "2.11.10" }, { "name": "lfx", - "version": "0.2.0.dev19" + "version": "0.3.0" + }, + { + "name": "langchain_core", + "version": "0.3.83" } ], "total_dependencies": 3 @@ -1366,6 +1259,7 @@ "cache": true, "display_name": "Response", "group_outputs": false, + "hidden": null, "loop_types": null, "method": "message_response", "name": "response", @@ -1382,10 +1276,12 @@ "pinned": false, "template": { "_frontend_node_flow_id": { + "input_types": [], "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "69a7745e-dfb8-40a7-b5cb-5da3af0b10b6" + "input_types": [], + "value": "c2b04832-8cbf-473b-afd0-014e4e86b84b" }, "_type": "Component", "add_current_date_tool": { @@ -1398,6 +1294,7 @@ "list": false, "list_add_label": "Add More", "name": "add_current_date_tool", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1411,6 +1308,7 @@ "agent_description": { "_input_type": "MultilineInput", "advanced": true, + "ai_enabled": false, "copy_field": false, "display_name": "Agent Description [Deprecated]", "dynamic": false, @@ -1423,6 +1321,8 @@ "load_from_db": false, "multiline": true, "name": "agent_description", + "override_skip": false, + "password": false, "placeholder": "", "required": false, "show": true, @@ -1434,86 +1334,58 @@ "type": "str", "value": "A helpful assistant with access to the following tools:" }, - "agent_llm": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Provider", + "api_key": { + "_input_type": "SecretStrInput", + "advanced": true, + "display_name": "API Key", "dynamic": false, - "external_options": { - "fields": { - "data": { - "node": { - "display_name": "Connect other models", - "icon": "CornerDownLeft", - "name": "connect_other_models" - } - } - } - }, - "info": "The provider of the language model that the agent will use to generate responses.", + "info": "Falls back to OPENAI_API_KEY environment variable", "input_types": [], - "name": "agent_llm", - "options": [ - "Anthropic", - "Google Generative AI", - "OpenAI", - "IBM watsonx.ai", - "Ollama" - ], - "options_metadata": [ - { - "icon": "Anthropic" - }, - { - "icon": "GoogleGenerativeAI" - }, - { - "icon": "OpenAI" - }, - { - "icon": "WatsonxAI" - }, - { - "icon": "Ollama" - } - ], + "load_from_db": false, + "name": "api_key", + "override_skip": false, + "password": true, "placeholder": "", "real_time_refresh": true, - "refresh_button": false, "required": false, - "selected_metadata": { - "icon": "OpenAI" - }, "show": true, "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, + "track_in_telemetry": false, "type": "str", - "value": "OpenAI" + "value": "" }, - "api_key": { - "_input_type": "SecretStrInput", + "base_url_ibm_watsonx": { + "_input_type": "DropdownInput", "advanced": false, - "display_name": "OpenAI API Key", + "combobox": false, + "dialog_inputs": {}, + "display_name": "watsonx API Endpoint", "dynamic": false, - "info": "The OpenAI API Key to use for the OpenAI model.", + "external_options": {}, + "info": "The base URL of the API (IBM watsonx.ai only)", "input_types": [], - "load_from_db": true, - "name": "api_key", + "name": "base_url_ibm_watsonx", + "options": [ + "https://us-south.ml.cloud.ibm.com", + "https://eu-de.ml.cloud.ibm.com", + "https://eu-gb.ml.cloud.ibm.com", + "https://au-syd.ml.cloud.ibm.com", + "https://jp-tok.ml.cloud.ibm.com", + "https://ca-tor.ml.cloud.ibm.com" + ], + "options_metadata": [], "override_skip": false, - "password": true, "placeholder": "", "real_time_refresh": true, "required": false, - "show": true, + "show": false, "title_case": false, - "track_in_telemetry": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", - "value": "OPENAI_API_KEY" + "value": "https://us-south.ml.cloud.ibm.com" }, "code": { "advanced": true, @@ -1532,7 +1404,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import json\nimport re\n\nfrom langchain_core.tools import StructuredTool, Tool\nfrom pydantic import ValidationError\n\nfrom lfx.base.agents.agent import LCToolsAgentComponent\nfrom lfx.base.agents.events import ExceptionWithMessageError\nfrom lfx.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODEL_PROVIDERS_LIST,\n MODELS_METADATA,\n)\nfrom lfx.base.models.model_utils import get_model_name\nfrom lfx.components.helpers import CurrentDateComponent\nfrom lfx.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom lfx.components.models_and_agents.memory import MemoryComponent\nfrom lfx.custom.custom_component.component import get_component_toolkit\nfrom lfx.custom.utils import update_component_build_config\nfrom lfx.helpers.base_model import build_model_from_schema\nfrom lfx.inputs.inputs import BoolInput, SecretStrInput, StrInput\nfrom lfx.io import DropdownInput, IntInput, MessageTextInput, MultilineInput, Output, TableInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.data import Data\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.schema.message import Message\nfrom lfx.schema.table import EditMode\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n documentation: str = \"https://docs.langflow.org/agents\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n # Filter out json_mode from OpenAI inputs since we handle structured output differently\n if \"OpenAI\" in MODEL_PROVIDERS_DICT:\n openai_inputs_filtered = [\n input_field\n for input_field in MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"]\n if not (hasattr(input_field, \"name\") and input_field.name == \"json_mode\")\n ]\n else:\n openai_inputs_filtered = []\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*MODEL_PROVIDERS_LIST],\n value=\"OpenAI\",\n real_time_refresh=True,\n refresh_button=False,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in MODEL_PROVIDERS_LIST if key in MODELS_METADATA],\n external_options={\n \"fields\": {\n \"data\": {\n \"node\": {\n \"name\": \"connect_other_models\",\n \"display_name\": \"Connect other models\",\n \"icon\": \"CornerDownLeft\",\n }\n }\n },\n },\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"The API key to use for the model.\",\n required=True,\n ),\n StrInput(\n name=\"base_url\",\n display_name=\"Base URL\",\n info=\"The base URL of the API.\",\n required=True,\n show=False,\n ),\n StrInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"The project ID of the model.\",\n required=True,\n show=False,\n ),\n IntInput(\n name=\"max_output_tokens\",\n display_name=\"Max Output Tokens\",\n info=\"The maximum number of tokens to generate.\",\n show=False,\n ),\n *openai_inputs_filtered,\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Chat History Messages\",\n value=100,\n info=\"Number of chat history messages to retrieve.\",\n advanced=True,\n show=True,\n ),\n MultilineInput(\n name=\"format_instructions\",\n display_name=\"Output Format Instructions\",\n info=\"Generic Template for structured output formatting. Valid only with Structured response.\",\n value=(\n \"You are an AI that extracts structured JSON objects from unstructured text. \"\n \"Use a predefined schema with expected types (str, int, float, bool, dict). \"\n \"Extract ALL relevant instances that match the schema - if multiple patterns exist, capture them all. \"\n \"Fill missing or ambiguous values with defaults: null for missing values. \"\n \"Remove exact duplicates but keep variations that have different field values. \"\n \"Always return valid JSON in the expected format, never throw errors. \"\n \"If multiple objects can be extracted, return them all in the structured format.\"\n ),\n advanced=True,\n ),\n TableInput(\n name=\"output_schema\",\n display_name=\"Output Schema\",\n info=(\n \"Schema Validation: Define the structure and data types for structured output. \"\n \"No validation if no output schema.\"\n ),\n advanced=True,\n required=False,\n value=[],\n table_schema=[\n {\n \"name\": \"name\",\n \"display_name\": \"Name\",\n \"type\": \"str\",\n \"description\": \"Specify the name of the output field.\",\n \"default\": \"field\",\n \"edit_mode\": EditMode.INLINE,\n },\n {\n \"name\": \"description\",\n \"display_name\": \"Description\",\n \"type\": \"str\",\n \"description\": \"Describe the purpose of the output field.\",\n \"default\": \"description of field\",\n \"edit_mode\": EditMode.POPOVER,\n },\n {\n \"name\": \"type\",\n \"display_name\": \"Type\",\n \"type\": \"str\",\n \"edit_mode\": EditMode.INLINE,\n \"description\": (\"Indicate the data type of the output field (e.g., str, int, float, bool, dict).\"),\n \"options\": [\"str\", \"int\", \"float\", \"bool\", \"dict\"],\n \"default\": \"str\",\n },\n {\n \"name\": \"multiple\",\n \"display_name\": \"As List\",\n \"type\": \"boolean\",\n \"description\": \"Set to True if this output field should be a list of the specified type.\",\n \"default\": \"False\",\n \"edit_mode\": EditMode.INLINE,\n },\n ],\n ),\n *LCToolsAgentComponent.get_base_inputs(),\n # removed memory inputs from agent component\n # *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [\n Output(name=\"response\", display_name=\"Response\", method=\"message_response\"),\n ]\n\n async def get_agent_requirements(self):\n \"\"\"Get the agent requirements for the agent.\"\"\"\n llm_model, display_name = await self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n await logger.adebug(f\"Retrieved {len(self.chat_history)} chat history messages\")\n if isinstance(self.chat_history, Message):\n self.chat_history = [self.chat_history]\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Set shared callbacks for tracing the tools used by the agent\n self.set_tools_callbacks(self.tools, self._get_shared_callbacks())\n\n return llm_model, self.chat_history, self.tools\n\n async def message_response(self) -> Message:\n try:\n llm_model, self.chat_history, self.tools = await self.get_agent_requirements()\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools or [],\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n result = await self.run_agent(agent)\n\n # Store result for potential JSON output\n self._agent_result = result\n\n except (ValueError, TypeError, KeyError) as e:\n await logger.aerror(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n await logger.aerror(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n # Avoid catching blind Exception; let truly unexpected exceptions propagate\n except Exception as e:\n await logger.aerror(f\"Unexpected error: {e!s}\")\n raise\n else:\n return result\n\n def _preprocess_schema(self, schema):\n \"\"\"Preprocess schema to ensure correct data types for build_model_from_schema.\"\"\"\n processed_schema = []\n for field in schema:\n processed_field = {\n \"name\": str(field.get(\"name\", \"field\")),\n \"type\": str(field.get(\"type\", \"str\")),\n \"description\": str(field.get(\"description\", \"\")),\n \"multiple\": field.get(\"multiple\", False),\n }\n # Ensure multiple is handled correctly\n if isinstance(processed_field[\"multiple\"], str):\n processed_field[\"multiple\"] = processed_field[\"multiple\"].lower() in [\n \"true\",\n \"1\",\n \"t\",\n \"y\",\n \"yes\",\n ]\n processed_schema.append(processed_field)\n return processed_schema\n\n async def build_structured_output_base(self, content: str):\n \"\"\"Build structured output with optional BaseModel validation.\"\"\"\n json_pattern = r\"\\{.*\\}\"\n schema_error_msg = \"Try setting an output schema\"\n\n # Try to parse content as JSON first\n json_data = None\n try:\n json_data = json.loads(content)\n except json.JSONDecodeError:\n json_match = re.search(json_pattern, content, re.DOTALL)\n if json_match:\n try:\n json_data = json.loads(json_match.group())\n except json.JSONDecodeError:\n return {\"content\": content, \"error\": schema_error_msg}\n else:\n return {\"content\": content, \"error\": schema_error_msg}\n\n # If no output schema provided, return parsed JSON without validation\n if not hasattr(self, \"output_schema\") or not self.output_schema or len(self.output_schema) == 0:\n return json_data\n\n # Use BaseModel validation with schema\n try:\n processed_schema = self._preprocess_schema(self.output_schema)\n output_model = build_model_from_schema(processed_schema)\n\n # Validate against the schema\n if isinstance(json_data, list):\n # Multiple objects\n validated_objects = []\n for item in json_data:\n try:\n validated_obj = output_model.model_validate(item)\n validated_objects.append(validated_obj.model_dump())\n except ValidationError as e:\n await logger.aerror(f\"Validation error for item: {e}\")\n # Include invalid items with error info\n validated_objects.append({\"data\": item, \"validation_error\": str(e)})\n return validated_objects\n\n # Single object\n try:\n validated_obj = output_model.model_validate(json_data)\n return [validated_obj.model_dump()] # Return as list for consistency\n except ValidationError as e:\n await logger.aerror(f\"Validation error: {e}\")\n return [{\"data\": json_data, \"validation_error\": str(e)}]\n\n except (TypeError, ValueError) as e:\n await logger.aerror(f\"Error building structured output: {e}\")\n # Fallback to parsed JSON without validation\n return json_data\n\n async def json_response(self) -> Data:\n \"\"\"Convert agent response to structured JSON Data output with schema validation.\"\"\"\n # Always use structured chat agent for JSON response mode for better JSON formatting\n try:\n system_components = []\n\n # 1. Agent Instructions (system_prompt)\n agent_instructions = getattr(self, \"system_prompt\", \"\") or \"\"\n if agent_instructions:\n system_components.append(f\"{agent_instructions}\")\n\n # 2. Format Instructions\n format_instructions = getattr(self, \"format_instructions\", \"\") or \"\"\n if format_instructions:\n system_components.append(f\"Format instructions: {format_instructions}\")\n\n # 3. Schema Information from BaseModel\n if hasattr(self, \"output_schema\") and self.output_schema and len(self.output_schema) > 0:\n try:\n processed_schema = self._preprocess_schema(self.output_schema)\n output_model = build_model_from_schema(processed_schema)\n schema_dict = output_model.model_json_schema()\n schema_info = (\n \"You are given some text that may include format instructions, \"\n \"explanations, or other content alongside a JSON schema.\\n\\n\"\n \"Your task:\\n\"\n \"- Extract only the JSON schema.\\n\"\n \"- Return it as valid JSON.\\n\"\n \"- Do not include format instructions, explanations, or extra text.\\n\\n\"\n \"Input:\\n\"\n f\"{json.dumps(schema_dict, indent=2)}\\n\\n\"\n \"Output (only JSON schema):\"\n )\n system_components.append(schema_info)\n except (ValidationError, ValueError, TypeError, KeyError) as e:\n await logger.aerror(f\"Could not build schema for prompt: {e}\", exc_info=True)\n\n # Combine all components\n combined_instructions = \"\\n\\n\".join(system_components) if system_components else \"\"\n llm_model, self.chat_history, self.tools = await self.get_agent_requirements()\n self.set(\n llm=llm_model,\n tools=self.tools or [],\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=combined_instructions,\n )\n\n # Create and run structured chat agent\n try:\n structured_agent = self.create_agent_runnable()\n except (NotImplementedError, ValueError, TypeError) as e:\n await logger.aerror(f\"Error with structured chat agent: {e}\")\n raise\n try:\n result = await self.run_agent(structured_agent)\n except (\n ExceptionWithMessageError,\n ValueError,\n TypeError,\n RuntimeError,\n ) as e:\n await logger.aerror(f\"Error with structured agent result: {e}\")\n raise\n # Extract content from structured agent result\n if hasattr(result, \"content\"):\n content = result.content\n elif hasattr(result, \"text\"):\n content = result.text\n else:\n content = str(result)\n\n except (\n ExceptionWithMessageError,\n ValueError,\n TypeError,\n NotImplementedError,\n AttributeError,\n ) as e:\n await logger.aerror(f\"Error with structured chat agent: {e}\")\n # Fallback to regular agent\n content_str = \"No content returned from agent\"\n return Data(data={\"content\": content_str, \"error\": str(e)})\n\n # Process with structured output validation\n try:\n structured_output = await self.build_structured_output_base(content)\n\n # Handle different output formats\n if isinstance(structured_output, list) and structured_output:\n if len(structured_output) == 1:\n return Data(data=structured_output[0])\n return Data(data={\"results\": structured_output})\n if isinstance(structured_output, dict):\n return Data(data=structured_output)\n return Data(data={\"content\": content})\n\n except (ValueError, TypeError) as e:\n await logger.aerror(f\"Error in structured output processing: {e}\")\n return Data(data={\"content\": content, \"error\": str(e)})\n\n async def get_memory_data(self):\n # TODO: This is a temporary fix to avoid message duplication. We should develop a function for this.\n messages = (\n await MemoryComponent(**self.get_base_args())\n .set(\n session_id=self.graph.session_id,\n context_id=self.context_id,\n order=\"Ascending\",\n n_messages=self.n_messages,\n )\n .retrieve_messages()\n )\n return [\n message for message in messages if getattr(message, \"id\", None) != getattr(self.input_value, \"id\", None)\n ]\n\n async def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except (AttributeError, ValueError, TypeError, RuntimeError) as e:\n await logger.aerror(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n # Filter out json_mode and only use attributes that exist on this component\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n if build_config is not None and field in build_config:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n build_config[\"agent_llm\"][\"display_name\"] = \"Model Provider\"\n elif field_value == \"connect_other_models\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*MODEL_PROVIDERS_LIST],\n real_time_refresh=True,\n refresh_button=False,\n input_types=[\"LanguageModel\"],\n placeholder=\"Awaiting model input.\",\n options_metadata=[MODELS_METADATA[key] for key in MODEL_PROVIDERS_LIST if key in MODELS_METADATA],\n external_options={\n \"fields\": {\n \"data\": {\n \"node\": {\n \"name\": \"connect_other_models\",\n \"display_name\": \"Connect other models\",\n \"icon\": \"CornerDownLeft\",\n },\n }\n },\n },\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\",\n tool_description=description,\n # here we do not use the shared callbacks as we are exposing the agent as a tool\n callbacks=self.get_langchain_callbacks(),\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n\n return tools\n" + "value": "from __future__ import annotations\n\nimport json\nimport re\nfrom typing import TYPE_CHECKING\n\nfrom pydantic import ValidationError\n\nfrom lfx.components.models_and_agents.memory import MemoryComponent\n\nif TYPE_CHECKING:\n from langchain_core.tools import Tool\n\nfrom lfx.base.agents.agent import LCToolsAgentComponent\nfrom lfx.base.agents.events import ExceptionWithMessageError\nfrom lfx.base.models.unified_models import (\n apply_provider_variable_config_to_build_config,\n get_language_model_options,\n get_llm,\n update_model_options_in_build_config,\n)\nfrom lfx.base.models.watsonx_constants import IBM_WATSONX_URLS\nfrom lfx.components.helpers import CurrentDateComponent\nfrom lfx.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom lfx.custom.custom_component.component import get_component_toolkit\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.helpers.base_model import build_model_from_schema\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, ModelInput, StrInput\nfrom lfx.io import IntInput, MessageTextInput, MultilineInput, Output, SecretStrInput, TableInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.data import Data\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.schema.message import Message\nfrom lfx.schema.table import EditMode\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n documentation: str = \"https://docs.langflow.org/agents\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n ModelInput(\n name=\"model\",\n display_name=\"Language Model\",\n info=\"Select your model provider\",\n real_time_refresh=True,\n required=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"Model Provider API key\",\n real_time_refresh=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"project_id\",\n display_name=\"watsonx Project ID\",\n info=\"The project ID associated with the foundation model (IBM watsonx.ai only)\",\n show=False,\n required=False,\n ),\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Chat History Messages\",\n value=100,\n info=\"Number of chat history messages to retrieve.\",\n advanced=True,\n show=True,\n ),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n info=\"Maximum number of tokens to generate. Field name varies by provider.\",\n advanced=True,\n range_spec=RangeSpec(min=1, max=128000, step=1, step_type=\"int\"),\n ),\n MultilineInput(\n name=\"format_instructions\",\n display_name=\"Output Format Instructions\",\n info=\"Generic Template for structured output formatting. Valid only with Structured response.\",\n value=(\n \"You are an AI that extracts structured JSON objects from unstructured text. \"\n \"Use a predefined schema with expected types (str, int, float, bool, dict). \"\n \"Extract ALL relevant instances that match the schema - if multiple patterns exist, capture them all. \"\n \"Fill missing or ambiguous values with defaults: null for missing values. \"\n \"Remove exact duplicates but keep variations that have different field values. \"\n \"Always return valid JSON in the expected format, never throw errors. \"\n \"If multiple objects can be extracted, return them all in the structured format.\"\n ),\n advanced=True,\n ),\n TableInput(\n name=\"output_schema\",\n display_name=\"Output Schema\",\n info=(\n \"Schema Validation: Define the structure and data types for structured output. \"\n \"No validation if no output schema.\"\n ),\n advanced=True,\n required=False,\n value=[],\n table_schema=[\n {\n \"name\": \"name\",\n \"display_name\": \"Name\",\n \"type\": \"str\",\n \"description\": \"Specify the name of the output field.\",\n \"default\": \"field\",\n \"edit_mode\": EditMode.INLINE,\n },\n {\n \"name\": \"description\",\n \"display_name\": \"Description\",\n \"type\": \"str\",\n \"description\": \"Describe the purpose of the output field.\",\n \"default\": \"description of field\",\n \"edit_mode\": EditMode.POPOVER,\n },\n {\n \"name\": \"type\",\n \"display_name\": \"Type\",\n \"type\": \"str\",\n \"edit_mode\": EditMode.INLINE,\n \"description\": (\"Indicate the data type of the output field (e.g., str, int, float, bool, dict).\"),\n \"options\": [\"str\", \"int\", \"float\", \"bool\", \"dict\"],\n \"default\": \"str\",\n },\n {\n \"name\": \"multiple\",\n \"display_name\": \"As List\",\n \"type\": \"boolean\",\n \"description\": \"Set to True if this output field should be a list of the specified type.\",\n \"default\": \"False\",\n \"edit_mode\": EditMode.INLINE,\n },\n ],\n ),\n *LCToolsAgentComponent.get_base_inputs(),\n # removed memory inputs from agent component\n # *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [\n Output(name=\"response\", display_name=\"Response\", method=\"message_response\"),\n ]\n\n async def get_agent_requirements(self):\n \"\"\"Get the agent requirements for the agent.\"\"\"\n from langchain_core.tools import StructuredTool\n\n max_tokens_val = getattr(self, \"max_tokens\", None)\n if max_tokens_val in {\"\", 0}:\n max_tokens_val = None\n llm_model = get_llm(\n model=self.model,\n user_id=self.user_id,\n api_key=self.api_key,\n max_tokens=max_tokens_val,\n watsonx_url=getattr(self, \"base_url_ibm_watsonx\", None),\n watsonx_project_id=getattr(self, \"project_id\", None),\n )\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n await logger.adebug(f\"Retrieved {len(self.chat_history)} chat history messages\")\n if isinstance(self.chat_history, Message):\n self.chat_history = [self.chat_history]\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Set shared callbacks for tracing the tools used by the agent\n self.set_tools_callbacks(self.tools, self._get_shared_callbacks())\n\n return llm_model, self.chat_history, self.tools\n\n async def message_response(self) -> Message:\n try:\n llm_model, self.chat_history, self.tools = await self.get_agent_requirements()\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools or [],\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n result = await self.run_agent(agent)\n\n # Store result for potential JSON output\n self._agent_result = result\n\n except (ValueError, TypeError, KeyError) as e:\n await logger.aerror(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n await logger.aerror(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n # Avoid catching blind Exception; let truly unexpected exceptions propagate\n except Exception as e:\n await logger.aerror(f\"Unexpected error: {e!s}\")\n raise\n else:\n return result\n\n def _preprocess_schema(self, schema):\n \"\"\"Preprocess schema to ensure correct data types for build_model_from_schema.\"\"\"\n processed_schema = []\n for field in schema:\n processed_field = {\n \"name\": str(field.get(\"name\", \"field\")),\n \"type\": str(field.get(\"type\", \"str\")),\n \"description\": str(field.get(\"description\", \"\")),\n \"multiple\": field.get(\"multiple\", False),\n }\n # Ensure multiple is handled correctly\n if isinstance(processed_field[\"multiple\"], str):\n processed_field[\"multiple\"] = processed_field[\"multiple\"].lower() in [\n \"true\",\n \"1\",\n \"t\",\n \"y\",\n \"yes\",\n ]\n processed_schema.append(processed_field)\n return processed_schema\n\n async def build_structured_output_base(self, content: str):\n \"\"\"Build structured output with optional BaseModel validation.\"\"\"\n json_pattern = r\"\\{.*\\}\"\n schema_error_msg = \"Try setting an output schema\"\n\n # Try to parse content as JSON first\n json_data = None\n try:\n json_data = json.loads(content)\n except json.JSONDecodeError:\n json_match = re.search(json_pattern, content, re.DOTALL)\n if json_match:\n try:\n json_data = json.loads(json_match.group())\n except json.JSONDecodeError:\n return {\"content\": content, \"error\": schema_error_msg}\n else:\n return {\"content\": content, \"error\": schema_error_msg}\n\n # If no output schema provided, return parsed JSON without validation\n if not hasattr(self, \"output_schema\") or not self.output_schema or len(self.output_schema) == 0:\n return json_data\n\n # Use BaseModel validation with schema\n try:\n processed_schema = self._preprocess_schema(self.output_schema)\n output_model = build_model_from_schema(processed_schema)\n\n # Validate against the schema\n if isinstance(json_data, list):\n # Multiple objects\n validated_objects = []\n for item in json_data:\n try:\n validated_obj = output_model.model_validate(item)\n validated_objects.append(validated_obj.model_dump())\n except ValidationError as e:\n await logger.aerror(f\"Validation error for item: {e}\")\n # Include invalid items with error info\n validated_objects.append({\"data\": item, \"validation_error\": str(e)})\n return validated_objects\n\n # Single object\n try:\n validated_obj = output_model.model_validate(json_data)\n return [validated_obj.model_dump()] # Return as list for consistency\n except ValidationError as e:\n await logger.aerror(f\"Validation error: {e}\")\n return [{\"data\": json_data, \"validation_error\": str(e)}]\n\n except (TypeError, ValueError) as e:\n await logger.aerror(f\"Error building structured output: {e}\")\n # Fallback to parsed JSON without validation\n return json_data\n\n async def json_response(self) -> Data:\n \"\"\"Convert agent response to structured JSON Data output with schema validation.\"\"\"\n # Always use structured chat agent for JSON response mode for better JSON formatting\n try:\n system_components = []\n\n # 1. Agent Instructions (system_prompt)\n agent_instructions = getattr(self, \"system_prompt\", \"\") or \"\"\n if agent_instructions:\n system_components.append(f\"{agent_instructions}\")\n\n # 2. Format Instructions\n format_instructions = getattr(self, \"format_instructions\", \"\") or \"\"\n if format_instructions:\n system_components.append(f\"Format instructions: {format_instructions}\")\n\n # 3. Schema Information from BaseModel\n if hasattr(self, \"output_schema\") and self.output_schema and len(self.output_schema) > 0:\n try:\n processed_schema = self._preprocess_schema(self.output_schema)\n output_model = build_model_from_schema(processed_schema)\n schema_dict = output_model.model_json_schema()\n schema_info = (\n \"You are given some text that may include format instructions, \"\n \"explanations, or other content alongside a JSON schema.\\n\\n\"\n \"Your task:\\n\"\n \"- Extract only the JSON schema.\\n\"\n \"- Return it as valid JSON.\\n\"\n \"- Do not include format instructions, explanations, or extra text.\\n\\n\"\n \"Input:\\n\"\n f\"{json.dumps(schema_dict, indent=2)}\\n\\n\"\n \"Output (only JSON schema):\"\n )\n system_components.append(schema_info)\n except (ValidationError, ValueError, TypeError, KeyError) as e:\n await logger.aerror(f\"Could not build schema for prompt: {e}\", exc_info=True)\n\n # Combine all components\n combined_instructions = \"\\n\\n\".join(system_components) if system_components else \"\"\n llm_model, self.chat_history, self.tools = await self.get_agent_requirements()\n self.set(\n llm=llm_model,\n tools=self.tools or [],\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=combined_instructions,\n )\n\n # Create and run structured chat agent\n try:\n structured_agent = self.create_agent_runnable()\n except (NotImplementedError, ValueError, TypeError) as e:\n await logger.aerror(f\"Error with structured chat agent: {e}\")\n raise\n try:\n result = await self.run_agent(structured_agent)\n except (\n ExceptionWithMessageError,\n ValueError,\n TypeError,\n RuntimeError,\n ) as e:\n await logger.aerror(f\"Error with structured agent result: {e}\")\n raise\n # Extract content from structured agent result\n if hasattr(result, \"content\"):\n content = result.content\n elif hasattr(result, \"text\"):\n content = result.text\n else:\n content = str(result)\n\n except (\n ExceptionWithMessageError,\n ValueError,\n TypeError,\n NotImplementedError,\n AttributeError,\n ) as e:\n await logger.aerror(f\"Error with structured chat agent: {e}\")\n # Fallback to regular agent\n content_str = \"No content returned from agent\"\n return Data(data={\"content\": content_str, \"error\": str(e)})\n\n # Process with structured output validation\n try:\n structured_output = await self.build_structured_output_base(content)\n\n # Handle different output formats\n if isinstance(structured_output, list) and structured_output:\n if len(structured_output) == 1:\n return Data(data=structured_output[0])\n return Data(data={\"results\": structured_output})\n if isinstance(structured_output, dict):\n return Data(data=structured_output)\n return Data(data={\"content\": content})\n\n except (ValueError, TypeError) as e:\n await logger.aerror(f\"Error in structured output processing: {e}\")\n return Data(data={\"content\": content, \"error\": str(e)})\n\n async def get_memory_data(self):\n # TODO: This is a temporary fix to avoid message duplication. We should develop a function for this.\n messages = (\n await MemoryComponent(**self.get_base_args())\n .set(\n session_id=self.graph.session_id,\n context_id=self.context_id,\n order=\"Ascending\",\n n_messages=self.n_messages,\n )\n .retrieve_messages()\n )\n return [\n message for message in messages if getattr(message, \"id\", None) != getattr(self.input_value, \"id\", None)\n ]\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self,\n build_config: dotdict,\n field_value: list[dict],\n field_name: str | None = None,\n ) -> dotdict:\n # Update model options with caching (for all field changes)\n # Agents require tool calling, so filter for only tool-calling capable models\n def get_tool_calling_model_options(user_id=None):\n return get_language_model_options(user_id=user_id, tool_calling=True)\n\n build_config = update_model_options_in_build_config(\n component=self,\n build_config=dict(build_config),\n cache_key_prefix=\"language_model_options_tool_calling\",\n get_options_func=get_tool_calling_model_options,\n field_name=field_name,\n field_value=field_value,\n )\n build_config = dotdict(build_config)\n\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n if field_name == \"model\":\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Show/hide provider-specific fields based on selected model\n # Get current model value - from field_value if model is being changed, otherwise from build_config\n current_model_value = field_value if field_name == \"model\" else build_config.get(\"model\", {}).get(\"value\")\n if isinstance(current_model_value, list) and len(current_model_value) > 0:\n selected_model = current_model_value[0]\n provider = selected_model.get(\"provider\", \"\")\n\n # Hide provider-specific fields by default before applying provider config\n for field in [\"base_url_ibm_watsonx\", \"project_id\"]:\n if field in build_config:\n build_config[field][\"show\"] = False\n build_config[field][\"required\"] = False\n\n # Apply provider variable configuration (advanced, required, info, env var fallback)\n if provider:\n build_config = apply_provider_variable_config_to_build_config(build_config, provider)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"model\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\",\n tool_description=description,\n # here we do not use the shared callbacks as we are exposing the agent as a tool\n callbacks=self.get_langchain_callbacks(),\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n\n return tools\n" }, "context_id": { "_input_type": "MessageTextInput", @@ -1547,6 +1419,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "context_id", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1561,6 +1434,7 @@ "format_instructions": { "_input_type": "MultilineInput", "advanced": true, + "ai_enabled": false, "copy_field": false, "display_name": "Output Format Instructions", "dynamic": false, @@ -1573,6 +1447,8 @@ "load_from_db": false, "multiline": true, "name": "format_instructions", + "override_skip": false, + "password": false, "placeholder": "", "required": false, "show": true, @@ -1594,6 +1470,7 @@ "list": false, "list_add_label": "Add More", "name": "handle_parsing_errors", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1617,6 +1494,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "input_value", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1629,27 +1507,6 @@ "value": "" }, "is_refresh": false, - "json_mode": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "JSON Mode", - "dynamic": false, - "info": "If True, it will output JSON regardless of passing a schema.", - "input_types": [], - "list": false, - "list_add_label": "Add More", - "name": "json_mode", - "override_skip": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "bool", - "value": false - }, "max_iterations": { "_input_type": "IntInput", "advanced": true, @@ -1660,26 +1517,6 @@ "list": false, "list_add_label": "Add More", "name": "max_iterations", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "int", - "value": 15 - }, - "max_retries": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "Max Retries", - "dynamic": false, - "info": "The maximum number of retries to make when generating.", - "input_types": [], - "list": false, - "list_add_label": "Add More", - "name": "max_retries", "override_skip": false, "placeholder": "", "required": false, @@ -1689,25 +1526,26 @@ "trace_as_metadata": true, "track_in_telemetry": true, "type": "int", - "value": 5 + "value": 15 }, "max_tokens": { "_input_type": "IntInput", "advanced": true, "display_name": "Max Tokens", "dynamic": false, - "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", + "info": "Maximum number of tokens to generate. Field name varies by provider.", "input_types": [], "list": false, "list_add_label": "Add More", + "load_from_db": false, "name": "max_tokens", "override_skip": false, "placeholder": "", "range_spec": { "max": 128000, - "min": 0, - "step": 0.1, - "step_type": "float" + "min": 1, + "step": 1, + "step_type": "int" }, "required": false, "show": true, @@ -1718,73 +1556,275 @@ "type": "int", "value": "" }, - "model_kwargs": { - "_input_type": "DictInput", - "advanced": true, - "display_name": "Model Kwargs", + "model": { + "_input_type": "ModelInput", + "advanced": false, + "display_name": "Language Model", "dynamic": false, - "info": "Additional keyword arguments to pass to the model.", - "input_types": [], + "external_options": { + "fields": { + "data": { + "node": { + "display_name": "Connect other models", + "icon": "CornerDownLeft", + "name": "connect_other_models" + } + } + } + }, + "info": "Select your model provider", + "input_types": [ + "LanguageModel" + ], "list": false, "list_add_label": "Add More", - "name": "model_kwargs", - "override_skip": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "track_in_telemetry": false, - "type": "dict", - "value": {} - }, - "model_name": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": true, - "dialog_inputs": {}, - "display_name": "Model Name", - "dynamic": false, - "external_options": {}, - "info": "To see the model names, first choose a provider. Then, enter your API key and click the refresh button next to the model name.", - "input_types": [], - "name": "model_name", + "load_from_db": false, + "model_type": "language", + "name": "model", "options": [ - "gpt-4o-mini", - "gpt-4o", - "gpt-4.1", - "gpt-4.1-mini", - "gpt-4.1-nano", - "gpt-4-turbo", - "gpt-4-turbo-preview", - "gpt-4", - "gpt-3.5-turbo", - "gpt-5.1", - "gpt-5", - "gpt-5-mini", - "gpt-5-nano", - "gpt-5-chat-latest", - "o1", - "o3-mini", - "o3", - "o3-pro", - "o4-mini", - "o4-mini-high" + { + "category": "Anthropic", + "icon": "Anthropic", + "metadata": { + "api_key_param": "api_key", + "context_length": 128000, + "model_class": "ChatAnthropic", + "model_name_param": "model" + }, + "name": "claude-opus-4-6", + "provider": "Anthropic" + }, + { + "category": "Anthropic", + "icon": "Anthropic", + "metadata": { + "api_key_param": "api_key", + "context_length": 128000, + "model_class": "ChatAnthropic", + "model_name_param": "model" + }, + "name": "claude-sonnet-4-6", + "provider": "Anthropic" + }, + { + "category": "Anthropic", + "icon": "Anthropic", + "metadata": { + "api_key_param": "api_key", + "context_length": 128000, + "model_class": "ChatAnthropic", + "model_name_param": "model" + }, + "name": "claude-haiku-4-5-20251001", + "provider": "Anthropic" + }, + { + "category": "Anthropic", + "icon": "Anthropic", + "metadata": { + "api_key_param": "api_key", + "context_length": 128000, + "model_class": "ChatAnthropic", + "model_name_param": "model" + }, + "name": "claude-opus-4-5-20251101", + "provider": "Anthropic" + }, + { + "category": "Anthropic", + "icon": "Anthropic", + "metadata": { + "api_key_param": "api_key", + "context_length": 128000, + "model_class": "ChatAnthropic", + "model_name_param": "model" + }, + "name": "claude-sonnet-4-5-20250929", + "provider": "Anthropic" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "api_key_param": "api_key", + "context_length": 128000, + "model_class": "ChatOpenAI", + "model_name_param": "model", + "reasoning_models": [ + "gpt-5.2" + ] + }, + "name": "gpt-5.2", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "api_key_param": "api_key", + "context_length": 128000, + "model_class": "ChatOpenAI", + "model_name_param": "model", + "reasoning_models": [ + "gpt-5.1" + ] + }, + "name": "gpt-5.1", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "api_key_param": "api_key", + "context_length": 128000, + "model_class": "ChatOpenAI", + "model_name_param": "model", + "reasoning_models": [ + "gpt-5" + ] + }, + "name": "gpt-5", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "api_key_param": "api_key", + "context_length": 128000, + "model_class": "ChatOpenAI", + "model_name_param": "model", + "reasoning_models": [ + "gpt-5-mini" + ] + }, + "name": "gpt-5-mini", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "api_key_param": "api_key", + "context_length": 128000, + "model_class": "ChatOpenAI", + "model_name_param": "model", + "reasoning_models": [ + "gpt-5-nano" + ] + }, + "name": "gpt-5-nano", + "provider": "OpenAI" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "api_key_param": "apikey", + "context_length": 128000, + "model_class": "ChatWatsonx", + "model_name_param": "model_id", + "project_id_param": "project_id", + "url_param": "url" + }, + "name": "ibm/granite-3-2-8b-instruct", + "provider": "IBM WatsonX" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "api_key_param": "apikey", + "context_length": 128000, + "model_class": "ChatWatsonx", + "model_name_param": "model_id", + "project_id_param": "project_id", + "url_param": "url" + }, + "name": "ibm/granite-3-3-8b-instruct", + "provider": "IBM WatsonX" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "api_key_param": "apikey", + "context_length": 128000, + "model_class": "ChatWatsonx", + "model_name_param": "model_id", + "project_id_param": "project_id", + "url_param": "url" + }, + "name": "ibm/granite-3-8b-instruct", + "provider": "IBM WatsonX" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "api_key_param": "apikey", + "context_length": 128000, + "model_class": "ChatWatsonx", + "model_name_param": "model_id", + "project_id_param": "project_id", + "url_param": "url" + }, + "name": "ibm/granite-4-h-small", + "provider": "IBM WatsonX" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "api_key_param": "apikey", + "context_length": 128000, + "model_class": "ChatWatsonx", + "model_name_param": "model_id", + "project_id_param": "project_id", + "url_param": "url" + }, + "name": "ibm/granite-8b-code-instruct", + "provider": "IBM WatsonX" + }, + { + "category": "Google Generative AI", + "icon": "GoogleGenerativeAI", + "metadata": { + "is_disabled_provider": true, + "variable_name": null + }, + "name": "__enable_provider_Google Generative AI__", + "provider": "Google Generative AI" + } ], - "options_metadata": [], "override_skip": false, - "placeholder": "", - "real_time_refresh": false, - "required": false, + "placeholder": "Setup Provider", + "real_time_refresh": true, + "refresh_button": true, + "required": true, "show": true, "title_case": false, - "toggle": false, "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "gpt-4o-mini" + "trace_as_input": true, + "track_in_telemetry": false, + "type": "model", + "value": [ + { + "icon": "OpenAI", + "metadata": { + "api_key_param": "api_key", + "context_length": 128000, + "model_class": "ChatOpenAI", + "model_name_param": "model", + "reasoning_models": [ + "gpt-5.2" + ] + }, + "name": "gpt-5.2", + "provider": "OpenAI" + } + ] }, "n_messages": { "_input_type": "IntInput", @@ -1796,6 +1836,7 @@ "list": false, "list_add_label": "Add More", "name": "n_messages", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1806,28 +1847,6 @@ "type": "int", "value": 100 }, - "openai_api_base": { - "_input_type": "StrInput", - "advanced": true, - "display_name": "OpenAI API Base", - "dynamic": false, - "info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.", - "input_types": [], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "openai_api_base", - "override_skip": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "" - }, "output_schema": { "_input_type": "TableInput", "advanced": true, @@ -1838,6 +1857,7 @@ "is_list": true, "list_add_label": "Add More", "name": "output_schema", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1896,30 +1916,32 @@ "type": "table", "value": [] }, - "seed": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "Seed", + "project_id": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "watsonx Project ID", "dynamic": false, - "info": "The seed controls the reproducibility of the job.", + "info": "The project ID associated with the foundation model (IBM watsonx.ai only)", "input_types": [], "list": false, "list_add_label": "Add More", - "name": "seed", + "load_from_db": false, + "name": "project_id", "override_skip": false, "placeholder": "", "required": false, - "show": true, + "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "int", - "value": 1 + "track_in_telemetry": false, + "type": "str", + "value": "" }, "system_prompt": { "_input_type": "MultilineInput", "advanced": false, + "ai_enabled": false, "copy_field": false, "display_name": "Agent Instructions", "dynamic": false, @@ -1932,6 +1954,8 @@ "load_from_db": false, "multiline": true, "name": "system_prompt", + "override_skip": false, + "password": false, "placeholder": "", "required": false, "show": true, @@ -1943,58 +1967,6 @@ "type": "str", "value": "You are the OpenRAG Agent. You answer questions using retrieval, reasoning, and tool use.\nYou have access to several tools. Your job is to determine **which tool to use and when**.\n### Available Tools\n- OpenSearch Retrieval Tool:\n Use this to search the indexed knowledge base. Use when the user asks about product details, internal concepts, processes, architecture, documentation, roadmaps, or anything that may be stored in the index.\n- Conversation History:\n Use this to maintain continuity when the user is referring to previous turns. \n Do not treat history as a factual source.\n- Conversation File Context:\n Use this when the user asks about a document they uploaded or refers directly to its contents.\n- URL Ingestion Tool:\n Use this **only** when the user explicitly asks you to read, summarize, or analyze the content of a URL.\n Do not ingest URLs automatically.\n- Calculator / Expression Evaluation Tool:\n Use this when the user asks to compare numbers, compute estimates, calculate totals, analyze pricing, or answer any question requiring mathematics or quantitative reasoning.\n If the answer requires arithmetic, call the calculator tool rather than calculating internally.\n### Retrieval Decision Rules\nUse OpenSearch **whenever**:\n1. The question may be answered from internal or indexed data.\n2. The user references team names, product names, release plans, configurations, requirements, or official information.\n3. The user needs a factual, grounded answer.\nDo **not** use retrieval if:\n- The question is purely creative (e.g., storytelling, analogies) or personal preference.\n- The user simply wants text reformatted or rewritten from what is already present in the conversation.\nWhen uncertain → **Retrieve.** Retrieval is low risk and improves grounding.\n### URL Ingestion Rules\nOnly ingest URLs when the user explicitly says:\n- \"Read this link\"\n- \"Summarize this webpage\"\n- \"What does this site say?\"\n- \"Ingest this URL\"\nIf unclear → ask a clarifying question.\n### Calculator Usage Rules\nUse the calculator when:\n- Performing arithmetic\n- Estimating totals\n- Comparing values\n- Modeling cost, time, effort, scale, or projections\nDo not perform math internally. **Call the calculator tool instead.**\n### Answer Construction Rules\n1. When asked: \"What is OpenRAG\", answer the following:\n\"OpenRAG is an open-source package for building agentic RAG systems. It supports integration with a wide range of orchestration tools, vector databases, and LLM providers. OpenRAG connects and amplifies three popular, proven open-source projects into one powerful platform:\n**Langflow** – Langflow is a powerful tool to build and deploy AI agents and MCP servers. [Read more](https://www.langflow.org/)\n**OpenSearch** – OpenSearch is an open source, search and observability suite that brings order to unstructured data at scale. [Read more](https://opensearch.org/)\n**Docling** – Docling simplifies document processing with advanced PDF understanding, OCR support, and seamless AI integrations. Parse PDFs, DOCX, PPTX, images & more. [Read more](https://www.docling.ai/)\"\n2. Synthesize retrieved or ingested content in your own words.\n3. Support factual claims with citations in the format:\n (Source: )\n4. If no supporting evidence is found:\n Say: \"No relevant supporting sources were found for that request.\"\n5. Never invent facts or hallucinate details.\n6. Be concise, direct, and confident. \n7. Do not reveal internal chain-of-thought." }, - "temperature": { - "_input_type": "SliderInput", - "advanced": true, - "display_name": "Temperature", - "dynamic": false, - "info": "", - "input_types": [], - "max_label": "", - "max_label_icon": "", - "min_label": "", - "min_label_icon": "", - "name": "temperature", - "override_skip": false, - "placeholder": "", - "range_spec": { - "max": 1, - "min": 0, - "step": 0.01, - "step_type": "float" - }, - "required": false, - "show": true, - "slider_buttons": false, - "slider_buttons_options": [], - "slider_input": false, - "title_case": false, - "tool_mode": false, - "track_in_telemetry": false, - "type": "slider", - "value": 0.1 - }, - "timeout": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "Timeout", - "dynamic": false, - "info": "The timeout for requests to OpenAI completion API.", - "input_types": [], - "list": false, - "list_add_label": "Add More", - "name": "timeout", - "override_skip": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "int", - "value": 700 - }, "tools": { "_input_type": "HandleInput", "advanced": false, @@ -2007,6 +1979,7 @@ "list": true, "list_add_label": "Add More", "name": "tools", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -2026,6 +1999,7 @@ "list": false, "list_add_label": "Add More", "name": "verbose", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -2045,7 +2019,7 @@ "dragging": false, "id": "Agent-Nfw7u", "measured": { - "height": 594, + "height": 431, "width": 320 }, "position": { @@ -2076,7 +2050,7 @@ ], "frozen": false, "icon": "calculator", - "last_updated": "2025-12-02T21:33:13.268Z", + "last_updated": "2026-03-06T16:27:48.913Z", "legacy": false, "metadata": { "code_hash": "acbe2603b034", @@ -2119,7 +2093,7 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "69a7745e-dfb8-40a7-b5cb-5da3af0b10b6" + "value": "c2b04832-8cbf-473b-afd0-014e4e86b84b" }, "_type": "Component", "code": { @@ -2216,7 +2190,7 @@ "dragging": false, "id": "CalculatorComponent-KrlMH", "measured": { - "height": 218, + "height": 221, "width": 320 }, "position": { @@ -2241,14 +2215,12 @@ "description": "Generate embeddings using a specified provider.", "display_name": "Embedding Model", "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": true, + "edited": false, "field_order": [ - "provider", - "api_base", - "ollama_base_url", - "base_url_ibm_watsonx", "model", "api_key", + "api_base", + "base_url_ibm_watsonx", "project_id", "dimensions", "chunk_size", @@ -2257,48 +2229,26 @@ "show_progress_bar", "model_kwargs", "truncate_input_tokens", - "input_text", - "fail_safe_mode" + "input_text" ], "frozen": false, "icon": "binary", - "last_updated": "2025-12-02T21:33:13.269Z", + "last_updated": "2026-02-27T18:43:24.061Z", "legacy": false, - "lf_version": "1.7.0.dev21", "metadata": { - "code_hash": "0e2d6fe67a26", + "code_hash": "c5ce0982da48", "dependencies": { "dependencies": [ - { - "name": "requests", - "version": "2.32.5" - }, - { - "name": "ibm_watsonx_ai", - "version": "1.4.2" - }, - { - "name": "langchain_openai", - "version": "0.3.23" - }, { "name": "lfx", - "version": "0.2.0.dev21" - }, - { - "name": "langchain_ollama", - "version": "0.3.10" - }, - { - "name": "langchain_community", - "version": "0.3.21" + "version": null }, { - "name": "langchain_ibm", - "version": "0.3.19" + "name": "langchain_core", + "version": "0.3.83" } ], - "total_dependencies": 7 + "total_dependencies": 2 }, "module": "custom_components.embedding_model" }, @@ -2330,13 +2280,13 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "69a7745e-dfb8-40a7-b5cb-5da3af0b10b6" + "value": "c2b04832-8cbf-473b-afd0-014e4e86b84b" }, "_type": "Component", "api_base": { "_input_type": "MessageTextInput", - "advanced": false, - "display_name": "OpenAI API Base URL", + "advanced": true, + "display_name": "API Base URL", "dynamic": false, "info": "Base URL for the API. Leave empty for default.", "input_types": [ @@ -2360,12 +2310,12 @@ }, "api_key": { "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "OpenAI API Key", + "advanced": true, + "display_name": "API Key", "dynamic": false, "info": "Model Provider API key", "input_types": [], - "load_from_db": true, + "load_from_db": false, "name": "api_key", "override_skip": false, "password": true, @@ -2376,7 +2326,7 @@ "title_case": false, "track_in_telemetry": false, "type": "str", - "value": "OPENAI_API_KEY" + "value": "" }, "base_url_ibm_watsonx": { "_input_type": "DropdownInput", @@ -2446,7 +2396,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"fail_safe_mode\",\n display_name=\"Fail-Safe Mode\",\n value=False,\n advanced=True,\n info=\"When enabled, errors will be logged instead of raising exceptions. \"\n \"The component will return None on error.\",\n real_time_refresh=True,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n async def fetch_ollama_models(self) -> list[str]:\n try:\n return await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n except Exception: # noqa: BLE001\n\n logger.exception(\"Error fetching models\")\n return []\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize OpenAI embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n try:\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await self.fetch_ollama_models()\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize Ollama embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to authenticate with IBM watsonx.ai: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n msg = f\"Unknown provider: {provider}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n # Handle fail_safe_mode changes first - set all required fields to False if enabled\n if field_name == \"fail_safe_mode\":\n if field_value: # If fail_safe_mode is enabled\n build_config[\"api_key\"][\"required\"] = False\n elif hasattr(self, \"provider\"):\n # If fail_safe_mode is disabled, restore required flags based on provider\n if self.provider in [\"OpenAI\", \"IBM watsonx.ai\"]:\n build_config[\"api_key\"][\"required\"] = True\n else: # Ollama\n build_config[\"api_key\"][\"required\"] = False\n\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.unified_models import (\n get_api_key_for_provider,\n get_embedding_class,\n get_embedding_model_options,\n get_unified_models_detailed,\n update_model_options_in_build_config,\n)\nfrom lfx.base.models.watsonx_constants import IBM_WATSONX_URLS\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n ModelInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n \"\"\"Dynamically update build config with user-filtered model options.\"\"\"\n # Update model options\n build_config = update_model_options_in_build_config(\n component=self,\n build_config=build_config,\n cache_key_prefix=\"embedding_model_options\",\n get_options_func=get_embedding_model_options,\n field_name=field_name,\n field_value=field_value,\n )\n\n # Show/hide provider-specific fields based on selected model\n if field_name == \"model\" and isinstance(field_value, list) and len(field_value) > 0:\n selected_model = field_value[0]\n provider = selected_model.get(\"provider\", \"\")\n\n # Show/hide watsonx fields\n is_watsonx = provider == \"IBM WatsonX\"\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = is_watsonx\n build_config[\"project_id\"][\"show\"] = is_watsonx\n build_config[\"truncate_input_tokens\"][\"show\"] = is_watsonx\n build_config[\"input_text\"][\"show\"] = is_watsonx\n if is_watsonx:\n build_config[\"base_url_ibm_watsonx\"][\"required\"] = True\n build_config[\"project_id\"][\"required\"] = True\n\n return build_config\n\n inputs = [\n ModelInput(\n name=\"model\",\n display_name=\"Embedding Model\",\n info=\"Select your model provider\",\n real_time_refresh=True,\n required=True,\n model_type=\"embedding\",\n input_types=[\"Embeddings\"], # Override default to accept Embeddings instead of LanguageModel\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"Model Provider API key\",\n real_time_refresh=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n # Watson-specific inputs\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n advanced=True,\n value=1000,\n ),\n FloatInput(\n name=\"request_timeout\",\n display_name=\"Request Timeout\",\n advanced=True,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n advanced=True,\n value=3,\n ),\n BoolInput(\n name=\"show_progress_bar\",\n display_name=\"Show Progress Bar\",\n advanced=True,\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n \"\"\"Build and return an embeddings instance based on the selected model.\n\n Returns an EmbeddingsWithModels wrapper that contains:\n - The primary embedding instance (for the selected model)\n - available_models dict mapping all available model names to their instances\n \"\"\"\n # If an Embeddings object is directly connected, return it\n try:\n from langchain_core.embeddings import Embeddings as BaseEmbeddings\n\n if isinstance(self.model, BaseEmbeddings):\n return self.model\n except ImportError:\n pass\n\n # Safely extract model configuration\n if not self.model or not isinstance(self.model, list):\n msg = \"Model must be a non-empty list\"\n raise ValueError(msg)\n\n model = self.model[0]\n model_name = model.get(\"name\")\n provider = model.get(\"provider\")\n metadata = model.get(\"metadata\", {})\n\n # Get API key from user input or global variables\n api_key = get_api_key_for_provider(self.user_id, provider, self.api_key)\n\n # Validate required fields (Ollama doesn't require API key)\n if not api_key and provider != \"Ollama\":\n msg = (\n f\"{provider} API key is required. \"\n f\"Please provide it in the component or configure it globally as \"\n f\"{provider.upper().replace(' ', '_')}_API_KEY.\"\n )\n raise ValueError(msg)\n\n if not model_name:\n msg = \"Model name is required\"\n raise ValueError(msg)\n\n # Get embedding class\n embedding_class_name = metadata.get(\"embedding_class\")\n if not embedding_class_name:\n msg = f\"No embedding class defined in metadata for {model_name}\"\n raise ValueError(msg)\n\n embedding_class = get_embedding_class(embedding_class_name)\n\n # Build kwargs using parameter mapping for primary instance\n kwargs = self._build_kwargs(model, metadata)\n primary_instance = embedding_class(**kwargs)\n\n # Get all available embedding models for this provider\n available_models_dict = self._build_available_models(\n provider=provider,\n embedding_class=embedding_class,\n metadata=metadata,\n api_key=api_key,\n )\n\n # Wrap with EmbeddingsWithModels to provide available_models metadata\n return EmbeddingsWithModels(\n embeddings=primary_instance,\n available_models=available_models_dict,\n )\n\n def _build_available_models(\n self,\n provider: str,\n embedding_class: type,\n metadata: dict[str, Any],\n api_key: str | None,\n ) -> dict[str, Embeddings]:\n \"\"\"Build a dictionary of all available embedding model instances for the provider.\n\n Args:\n provider: The provider name (e.g., \"OpenAI\", \"Ollama\")\n embedding_class: The embedding class to instantiate\n metadata: Metadata containing param_mapping\n api_key: The API key for the provider\n\n Returns:\n Dict mapping model names to their embedding instances\n \"\"\"\n available_models_dict: dict[str, Embeddings] = {}\n\n # Get all embedding models for this provider from unified models\n all_embedding_models = get_unified_models_detailed(\n providers=[provider],\n model_type=\"embeddings\",\n include_deprecated=False,\n include_unsupported=False,\n )\n\n if not all_embedding_models:\n return available_models_dict\n\n # Extract models from the provider data\n for provider_data in all_embedding_models:\n if provider_data.get(\"provider\") != provider:\n continue\n\n for model_data in provider_data.get(\"models\", []):\n model_name = model_data.get(\"model_name\")\n if not model_name:\n continue\n\n # Create a model dict compatible with _build_kwargs\n model_dict = {\n \"name\": model_name,\n \"provider\": provider,\n \"metadata\": metadata, # Reuse the same metadata/param_mapping\n }\n\n try:\n # Build kwargs for this model\n model_kwargs = self._build_kwargs_for_model(model_dict, metadata, api_key)\n # Create the embedding instance\n available_models_dict[model_name] = embedding_class(**model_kwargs)\n except Exception: # noqa: BLE001\n # Skip models that fail to instantiate\n # This handles cases where specific models have incompatible parameters\n logger.debug(\"Failed to instantiate embedding model %s: skipping\", model_name, exc_info=True)\n continue\n\n return available_models_dict\n\n def _build_kwargs_for_model(\n self,\n model: dict[str, Any],\n metadata: dict[str, Any],\n api_key: str | None,\n ) -> dict[str, Any]:\n \"\"\"Build kwargs dictionary for a specific model using parameter mapping.\n\n This is similar to _build_kwargs but uses the provided api_key directly\n instead of looking it up again.\n\n Args:\n model: Model dict with name and provider\n metadata: Metadata containing param_mapping\n api_key: The API key to use\n\n Returns:\n kwargs dict for embedding class instantiation\n \"\"\"\n param_mapping = metadata.get(\"param_mapping\", {})\n if not param_mapping:\n msg = \"Parameter mapping not found in metadata\"\n raise ValueError(msg)\n\n kwargs = {}\n provider = model.get(\"provider\")\n\n # Required parameters - handle both \"model\" and \"model_id\" (for watsonx)\n if \"model\" in param_mapping:\n kwargs[param_mapping[\"model\"]] = model.get(\"name\")\n elif \"model_id\" in param_mapping:\n kwargs[param_mapping[\"model_id\"]] = model.get(\"name\")\n\n # Add API key if mapped\n if \"api_key\" in param_mapping and api_key:\n kwargs[param_mapping[\"api_key\"]] = api_key\n\n # Optional parameters with their values\n optional_params = {\n \"api_base\": self.api_base if self.api_base else None,\n \"dimensions\": int(self.dimensions) if self.dimensions else None,\n \"chunk_size\": int(self.chunk_size) if self.chunk_size else None,\n \"request_timeout\": float(self.request_timeout) if self.request_timeout else None,\n \"max_retries\": int(self.max_retries) if self.max_retries else None,\n \"show_progress_bar\": self.show_progress_bar if hasattr(self, \"show_progress_bar\") else None,\n \"model_kwargs\": self.model_kwargs if self.model_kwargs else None,\n }\n\n # Watson-specific parameters\n if provider in {\"IBM WatsonX\", \"IBM watsonx.ai\"}:\n # Map base_url_ibm_watsonx to \"url\" parameter for watsonx\n if \"url\" in param_mapping:\n url_value = (\n self.base_url_ibm_watsonx\n if hasattr(self, \"base_url_ibm_watsonx\") and self.base_url_ibm_watsonx\n else \"https://us-south.ml.cloud.ibm.com\"\n )\n kwargs[param_mapping[\"url\"]] = url_value\n # Map project_id for watsonx\n if hasattr(self, \"project_id\") and self.project_id and \"project_id\" in param_mapping:\n kwargs[param_mapping[\"project_id\"]] = self.project_id\n\n # Ollama-specific parameters\n if provider == \"Ollama\" and \"base_url\" in param_mapping:\n # Map api_base to \"base_url\" parameter for Ollama\n base_url_value = self.api_base if hasattr(self, \"api_base\") and self.api_base else \"http://localhost:11434\"\n kwargs[param_mapping[\"base_url\"]] = base_url_value\n\n # Add optional parameters if they have values and are mapped\n for param_name, param_value in optional_params.items():\n if param_value is not None and param_name in param_mapping:\n # Special handling for request_timeout with Google provider\n if param_name == \"request_timeout\":\n if provider == \"Google Generative AI\" and isinstance(param_value, (int, float)):\n kwargs[param_mapping[param_name]] = {\"timeout\": param_value}\n else:\n kwargs[param_mapping[param_name]] = param_value\n else:\n kwargs[param_mapping[param_name]] = param_value\n\n return kwargs\n\n def _build_kwargs(self, model: dict[str, Any], metadata: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Build kwargs dictionary using parameter mapping.\"\"\"\n param_mapping = metadata.get(\"param_mapping\", {})\n if not param_mapping:\n msg = \"Parameter mapping not found in metadata\"\n raise ValueError(msg)\n\n kwargs = {}\n\n # Required parameters - handle both \"model\" and \"model_id\" (for watsonx)\n if \"model\" in param_mapping:\n kwargs[param_mapping[\"model\"]] = model.get(\"name\")\n elif \"model_id\" in param_mapping:\n kwargs[param_mapping[\"model_id\"]] = model.get(\"name\")\n if \"api_key\" in param_mapping:\n kwargs[param_mapping[\"api_key\"]] = get_api_key_for_provider(\n self.user_id,\n model.get(\"provider\"),\n self.api_key,\n )\n\n # Optional parameters with their values\n provider = model.get(\"provider\")\n optional_params = {\n \"api_base\": self.api_base if self.api_base else None,\n \"dimensions\": int(self.dimensions) if self.dimensions else None,\n \"chunk_size\": int(self.chunk_size) if self.chunk_size else None,\n \"request_timeout\": float(self.request_timeout) if self.request_timeout else None,\n \"max_retries\": int(self.max_retries) if self.max_retries else None,\n \"show_progress_bar\": self.show_progress_bar if hasattr(self, \"show_progress_bar\") else None,\n \"model_kwargs\": self.model_kwargs if self.model_kwargs else None,\n }\n\n # Watson-specific parameters\n if provider in {\"IBM WatsonX\", \"IBM watsonx.ai\"}:\n # Map base_url_ibm_watsonx to \"url\" parameter for watsonx\n if \"url\" in param_mapping:\n url_value = (\n self.base_url_ibm_watsonx\n if hasattr(self, \"base_url_ibm_watsonx\") and self.base_url_ibm_watsonx\n else \"https://us-south.ml.cloud.ibm.com\"\n )\n kwargs[param_mapping[\"url\"]] = url_value\n # Map project_id for watsonx\n if hasattr(self, \"project_id\") and self.project_id and \"project_id\" in param_mapping:\n kwargs[param_mapping[\"project_id\"]] = self.project_id\n\n # Ollama-specific parameters\n if provider == \"Ollama\" and \"base_url\" in param_mapping:\n # Map api_base to \"base_url\" parameter for Ollama\n base_url_value = self.api_base if hasattr(self, \"api_base\") and self.api_base else \"http://localhost:11434\"\n kwargs[param_mapping[\"base_url\"]] = base_url_value\n\n # Add optional parameters if they have values and are mapped\n for param_name, param_value in optional_params.items():\n if param_value is not None and param_name in param_mapping:\n # Special handling for request_timeout with Google provider\n if param_name == \"request_timeout\":\n if provider == \"Google Generative AI\" and isinstance(param_value, (int, float)):\n kwargs[param_mapping[param_name]] = {\"timeout\": param_value}\n else:\n kwargs[param_mapping[param_name]] = param_value\n else:\n kwargs[param_mapping[param_name]] = param_value\n\n return kwargs\n" }, "dimensions": { "_input_type": "IntInput", @@ -2456,6 +2406,7 @@ "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", "list": false, "list_add_label": "Add More", + "load_from_db": false, "name": "dimensions", "override_skip": false, "placeholder": "", @@ -2468,21 +2419,19 @@ "type": "int", "value": "" }, - "fail_safe_mode": { + "input_text": { "_input_type": "BoolInput", "advanced": true, - "display_name": "Fail-Safe Mode", + "display_name": "Include the original text in the output", "dynamic": false, - "info": "When enabled, errors will be logged instead of raising exceptions. The component will return None on error.", + "info": "", "list": false, "list_add_label": "Add More", - "load_from_db": false, - "name": "fail_safe_mode", + "name": "input_text", "override_skip": false, "placeholder": "", - "real_time_refresh": true, "required": false, - "show": true, + "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, @@ -2490,29 +2439,9 @@ "type": "bool", "value": true }, - "input_text": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Include the original text in the output", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "name": "input_text", - "override_skip": false, - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "bool", - "value": true - }, - "is_refresh": false, - "max_retries": { - "_input_type": "IntInput", + "is_refresh": true, + "max_retries": { + "_input_type": "IntInput", "advanced": true, "display_name": "Max Retries", "dynamic": false, @@ -2532,32 +2461,228 @@ "value": 3 }, "model": { - "_input_type": "DropdownInput", + "_input_type": "ModelInput", "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Name", + "display_name": "Embedding Model", "dynamic": false, - "external_options": {}, - "info": "Select the embedding model to use", + "external_options": { + "fields": { + "data": { + "node": { + "display_name": "Connect other models", + "icon": "CornerDownLeft", + "name": "connect_other_models" + } + } + } + }, + "info": "Select your model provider", + "input_types": [ + "Embeddings" + ], + "list": false, + "list_add_label": "Add More", + "model_type": "embedding", "name": "model", "options": [ - "text-embedding-3-small" + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-small", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-large", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-ada-002", + "provider": "OpenAI" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "embedding_class": "WatsonxEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_key": "apikey", + "model_id": "model_id", + "project_id": "project_id", + "request_timeout": "request_timeout", + "space_id": "space_id", + "url": "url" + } + }, + "name": "ibm/granite-embedding-278m-multilingual", + "provider": "IBM WatsonX" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "embedding_class": "WatsonxEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_key": "apikey", + "model_id": "model_id", + "project_id": "project_id", + "request_timeout": "request_timeout", + "space_id": "space_id", + "url": "url" + } + }, + "name": "ibm/slate-125m-english-rtrvr-v2", + "provider": "IBM WatsonX" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "embedding_class": "WatsonxEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_key": "apikey", + "model_id": "model_id", + "project_id": "project_id", + "request_timeout": "request_timeout", + "space_id": "space_id", + "url": "url" + } + }, + "name": "ibm/slate-30m-english-rtrvr-v2", + "provider": "IBM WatsonX" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "embedding_class": "WatsonxEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_key": "apikey", + "model_id": "model_id", + "project_id": "project_id", + "request_timeout": "request_timeout", + "space_id": "space_id", + "url": "url" + } + }, + "name": "intfloat/multilingual-e5-large", + "provider": "IBM WatsonX" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "embedding_class": "WatsonxEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_key": "apikey", + "model_id": "model_id", + "project_id": "project_id", + "request_timeout": "request_timeout", + "space_id": "space_id", + "url": "url" + } + }, + "name": "sentence-transformers/all-minilm-l6-v2", + "provider": "IBM WatsonX" + }, + { + "category": "Google Generative AI", + "icon": "GoogleGenerativeAI", + "metadata": { + "is_disabled_provider": true, + "variable_name": null + }, + "name": "__enable_provider_Google Generative AI__", + "provider": "Google Generative AI" + } ], - "options_metadata": [], "override_skip": false, - "placeholder": "", + "placeholder": "Setup Provider", "real_time_refresh": true, "refresh_button": true, - "required": false, + "required": true, "show": true, "title_case": false, - "toggle": false, "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "text-embedding-3-small" + "trace_as_input": true, + "track_in_telemetry": false, + "type": "model", + "value": [ + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-small", + "provider": "OpenAI" + } + ] }, "model_kwargs": { "_input_type": "DictInput", @@ -2579,32 +2704,6 @@ "type": "dict", "value": {} }, - "ollama_base_url": { - "_input_type": "MessageTextInput", - "advanced": false, - "display_name": "Ollama API URL", - "dynamic": false, - "info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "ollama_base_url", - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "" - }, "project_id": { "_input_type": "MessageTextInput", "advanced": false, @@ -2630,48 +2729,6 @@ "type": "str", "value": "" }, - "provider": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Provider", - "dynamic": false, - "external_options": {}, - "info": "Select the embedding model provider", - "name": "provider", - "options": [ - "OpenAI", - "Ollama", - "IBM watsonx.ai" - ], - "options_metadata": [ - { - "icon": "OpenAI" - }, - { - "icon": "Ollama" - }, - { - "icon": "WatsonxAI" - } - ], - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "selected_metadata": { - "icon": "OpenAI" - }, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "OpenAI" - }, "request_timeout": { "_input_type": "FloatInput", "advanced": true, @@ -2741,32 +2798,33 @@ "dragging": false, "id": "EmbeddingModel-aIP4U", "measured": { - "height": 451, + "height": 207, "width": 320 }, "position": { - "x": 500.6819779163044, - "y": 1881.18804656446 + "x": 485.72511430199586, + "y": 1766.776625779543 }, "selected": false, "type": "genericNode" }, { "data": { + "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search. To search use the tools search_documents and raw_search. Search documents takes a query for vector search, for example\n {search_query: \"components in openrag\"}", + "display_name": "OpenSearch (Multi-Model Multi-Embedding)", "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE", "node": { "base_classes": [ "Data", - "DataFrame", "VectorStore" ], "beta": false, "conditional_paths": [], "custom_fields": {}, - "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.", + "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search. To search use the tools search_documents and raw_search. Search documents takes a query for vector search, for example\n {search_query: \"components in openrag\"}", "display_name": "OpenSearch (Multi-Model Multi-Embedding)", "documentation": "", - "edited": true, + "edited": false, "field_order": [ "docs_metadata", "opensearch_url", @@ -2791,14 +2849,16 @@ "jwt_header", "bearer_prefix", "use_ssl", - "verify_certs" + "verify_certs", + "request_timeout", + "max_retries" ], "frozen": false, "icon": "OpenSearch", - "last_updated": "2025-12-02T21:33:13.271Z", + "last_updated": "2026-03-06T16:27:48.915Z", "legacy": false, "metadata": { - "code_hash": "db60433453a8", + "code_hash": "6a3df45b55c5", "dependencies": { "dependencies": [ { @@ -2807,10 +2867,14 @@ }, { "name": "lfx", - "version": "0.2.0.dev21" + "version": null + }, + { + "name": "tenacity", + "version": "8.5.0" } ], - "total_dependencies": 2 + "total_dependencies": 3 }, "module": "custom_components.opensearch_multimodel_multiembedding" }, @@ -2842,7 +2906,7 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "69a7745e-dfb8-40a7-b5cb-5da3af0b10b6" + "value": "c2b04832-8cbf-473b-afd0-014e4e86b84b" }, "_type": "Component", "auth_mode": { @@ -2910,7 +2974,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\n\nfrom lfx.io import (\n BoolInput,\n DropdownInput,\n HandleInput,\n IntInput,\n MultilineInput,\n Output,\n SecretStrInput,\n StrInput,\n TableInput,\n)\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search. \"\n \"To search use the tools search_documents and raw_search. \"\n \"Search documents takes a query for vector search, for example\\n\"\n ' {search_query: \"components in openrag\"}'\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n # DictInput(name=\"query\", display_name=\"Query\", input_types=[\"Data\"], is_list=False, tool_mode=True),\n ]\n outputs = [\n Output(\n display_name=\"Search Results\",\n name=\"search_results\",\n method=\"search_documents\",\n ),\n Output(display_name=\"Raw Search\", name=\"raw_search\", method=\"raw_search\"),\n ]\n\n def raw_search(self, query: str | None = None) -> Data:\n \"\"\"Execute a raw OpenSearch query against the target index.\n\n Args:\n query (dict[str, Any]): The OpenSearch query DSL dictionary.\n\n Returns:\n Data: Search results as a Data object.\n\n Raises:\n ValueError: If 'query' is not a valid OpenSearch query (must be a non-empty dict).\n \"\"\"\n raw_query = query if query is not None else self.search_query\n if isinstance(raw_query, str):\n raw_query = json.loads(raw_query)\n client = self.build_client()\n logger.info(f\"query: {raw_query}\")\n resp = client.search(\n index=self.index_name,\n body=raw_query,\n params={\"terminate_after\": 0},\n )\n # Remove any _source keys whose value is a list of floats (embedding vectors)\n # Minimum length threshold to identify embedding vectors\n min_vector_length = 100\n\n def is_vector(val):\n # Accepts if it's a list of numbers (float or int) and has reasonable vector length\n return (\n isinstance(val, list) and len(val) > min_vector_length and all(isinstance(x, (float, int)) for x in val)\n )\n\n if \"hits\" in resp and \"hits\" in resp[\"hits\"]:\n for hit in resp[\"hits\"][\"hits\"]:\n source = hit.get(\"_source\")\n if isinstance(source, dict):\n keys_to_remove = [k for k, v in source.items() if is_vector(v)]\n for k in keys_to_remove:\n source.pop(k)\n logger.info(f\"Raw search response (all embedding vectors removed): {resp}\")\n return Data(**resp)\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Note: Some OpenSearch versions/configurations have issues with dynamically adding\n knn_vector mappings (NullPointerException). This method checks if the field\n already exists before attempting to add it, and gracefully skips if the field\n is already properly configured.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n # First, check if the field already exists and is properly mapped\n properties = self._get_index_properties(client)\n if self._is_knn_vector_field(properties, field_name):\n # Field already exists as knn_vector - verify dimensions match\n existing_dim = self._get_field_dimension(properties, field_name)\n if existing_dim is not None and existing_dim != dim:\n logger.warning(\n f\"Field '{field_name}' exists with dimension {existing_dim}, \"\n f\"but current embedding has dimension {dim}. Using existing mapping.\"\n )\n else:\n logger.info(\n f\"[OpenSearchMultimodal] Field '{field_name}' already exists\"\n f\"as knn_vector with matching dimensions - skipping mapping update\"\n )\n return\n\n # Field doesn't exist, try to add the mapping\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n # Check if this is the known OpenSearch k-NN NullPointerException issue\n error_str = str(e).lower()\n if \"null\" in error_str or \"nullpointerexception\" in error_str:\n logger.warning(\n f\"[OpenSearchMultimodal] Could not add embedding field mapping for {field_name}\"\n f\"due to OpenSearch k-NN plugin issue: {e}. \"\n f\"This is a known issue with some OpenSearch versions. \"\n f\"[OpenSearchMultimodal] Skipping mapping update. \"\n f\"Please ensure the index has the correct mapping for KNN search to work.\"\n )\n # Skip and continue - ingestion will proceed, but KNN search may fail if mapping doesn't exist\n return\n logger.warning(f\"[OpenSearchMultimodal] Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n # Verify the field was added correctly\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.error(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n logger.debug(f\"[OpenSearchMultimodal] Bulk ingesting embeddings for {index_name}\")\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n\n # Normalize ACL fields that may arrive as JSON strings from flows\n for key in (\"allowed_users\", \"allowed_groups\"):\n value = metadata.get(key)\n if isinstance(value, str):\n try:\n parsed = json.loads(value)\n if isinstance(parsed, list):\n metadata[key] = parsed\n except (json.JSONDecodeError, TypeError):\n # Leave value as-is if it isn't valid JSON\n pass\n\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n logger.debug(\"[OpenSearchMultimodal] Building OpenSearch client\")\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n\n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"[OpenSearchMultimodal] Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"[OpenSearchMultimodal] Starting ingestion mode...\")\n\n logger.debug(f\"[OpenSearchMultimodal] Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[OpenSearchMultimodal][INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n logger.debug(\n f\"[OpenSearchMultimodal][INGESTION] ingest_data type: \"\n f\"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\"\n )\n logger.debug(\n f\"[OpenSearchMultimodal][INGESTION] ingest_data content: \"\n f\"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\"\n )\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[OpenSearchMultimodal][INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"[OpenSearchMultimodal][INGESTION] Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings with rate-limit-aware retry logic using tenacity\n from tenacity import (\n retry,\n retry_if_exception,\n stop_after_attempt,\n wait_exponential,\n )\n\n def is_rate_limit_error(exception: Exception) -> bool:\n \"\"\"Check if exception is a rate limit error (429).\"\"\"\n error_str = str(exception).lower()\n return \"429\" in error_str or \"rate_limit\" in error_str or \"rate limit\" in error_str\n\n def is_other_retryable_error(exception: Exception) -> bool:\n \"\"\"Check if exception is retryable but not a rate limit error.\"\"\"\n # Retry on most exceptions except for specific non-retryable ones\n # Add other non-retryable exceptions here if needed\n return not is_rate_limit_error(exception)\n\n # Create retry decorator for rate limit errors (longer backoff)\n retry_on_rate_limit = retry(\n retry=retry_if_exception(is_rate_limit_error),\n stop=stop_after_attempt(5),\n wait=wait_exponential(multiplier=2, min=2, max=30),\n reraise=True,\n before_sleep=lambda retry_state: logger.warning(\n f\"Rate limit hit for chunk (attempt {retry_state.attempt_number}/5), \"\n f\"backing off for {retry_state.next_action.sleep:.1f}s\"\n ),\n )\n\n # Create retry decorator for other errors (shorter backoff)\n retry_on_other_errors = retry(\n retry=retry_if_exception(is_other_retryable_error),\n stop=stop_after_attempt(3),\n wait=wait_exponential(multiplier=1, min=1, max=8),\n reraise=True,\n before_sleep=lambda retry_state: logger.warning(\n f\"Error embedding chunk (attempt {retry_state.attempt_number}/3), \"\n f\"retrying in {retry_state.next_action.sleep:.1f}s: {retry_state.outcome.exception()}\"\n ),\n )\n\n def embed_chunk_with_retry(chunk_text: str, chunk_idx: int) -> list[float]:\n \"\"\"Embed a single chunk with rate-limit-aware retry logic.\"\"\"\n\n @retry_on_rate_limit\n @retry_on_other_errors\n def _embed(text: str) -> list[float]:\n return selected_embedding.embed_documents([text])[0]\n\n try:\n return _embed(chunk_text)\n except Exception as e:\n logger.error(\n f\"Failed to embed chunk {chunk_idx} after all retries: {e}\",\n error=str(e),\n )\n raise\n\n # Restrict concurrency for IBM/Watsonx models to avoid rate limits\n is_ibm = (embedding_model and \"ibm\" in str(embedding_model).lower()) or (\n selected_embedding and \"watsonx\" in type(selected_embedding).__name__.lower()\n )\n logger.debug(f\"Is IBM: {is_ibm}\")\n\n # For IBM models, use sequential processing with rate limiting\n # For other models, use parallel processing\n vectors: list[list[float]] = [None] * len(texts)\n\n if is_ibm:\n # Sequential processing with inter-request delay for IBM models\n inter_request_delay = 0.6 # ~1.67 req/s, safely under 2 req/s limit\n logger.info(f\"Using sequential processing for IBM model with {inter_request_delay}s delay between requests\")\n\n for idx, chunk in enumerate(texts):\n if idx > 0:\n # Add delay between requests (but not before the first one)\n time.sleep(inter_request_delay)\n vectors[idx] = embed_chunk_with_retry(chunk, idx)\n else:\n # Parallel processing for non-IBM models\n max_workers = min(max(len(texts), 1), 8)\n logger.debug(f\"Using parallel processing with {max_workers} workers\")\n\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk_with_retry, chunk, idx): idx for idx, chunk in enumerate(texts)}\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(\n f\"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\"\n )\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n logger.debug(f\"Model detection query: {agg_query}\")\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n # Log detailed bucket info for debugging\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n if not models:\n total_hits = result.get(\"hits\", {}).get(\"total\", {})\n total_count = total_hits.get(\"value\", 0) if isinstance(total_hits, dict) else total_hits\n logger.warning(\n f\"No embedding_model values found in index '{self.index_name}'. \"\n f\"Total docs in index: {total_count}. \"\n f\"This may indicate documents were indexed without the embedding_model field.\"\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n fallback_model = self._get_embedding_model_name()\n logger.info(f\"Using fallback model: {fallback_model}\")\n return [fallback_model]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n if not embeddings_list:\n logger.error(\n \"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\"\n )\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models_attr}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models_attr and isinstance(available_models_attr, dict):\n logger.info(\n f\"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict\"\n )\n for model_name_key, dedicated_embedding in available_models_attr.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n self.log(f\"[SEARCH] Models detected in index: {available_models}\")\n self.log(f\"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n # Track matching status for debugging\n matched_models = []\n unmatched_models = []\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n matched_models.append(model_name)\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n self.log(f\"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding\")\n else:\n # No matching embedding found for this model\n unmatched_models.append(model_name)\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}\")\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n self.log(f\"[ERROR] Embedding generation failed for '{model_name}': {e}\")\n\n # Log summary of model matching\n logger.info(f\"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n self.log(f\"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n if unmatched_models:\n self.log(f\"[WARN] Unmatched models in index: {unmatched_models}\")\n\n if not query_embeddings:\n msg = (\n f\"Failed to generate embeddings for any model. \"\n f\"Index has models: {available_models}, but no matching embedding objects found. \"\n f\"Available embedding identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[FAIL] Search failed: {msg}\")\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n self.log(f\"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'\")\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n self.log(f\"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping\")\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n self.log(\n f\"[WARN] No valid KNN queries could be built. \"\n f\"Query embeddings generated: {list(query_embeddings.keys())}, \"\n f\"but no matching knn_vector fields found in index.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename.keyword\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: \"\n f\"{list(query_embeddings.keys())}\"\n )\n self.log(f\"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}\")\n self.log(f\"[EXEC] Embedding models used: {list(query_embeddings.keys())}\")\n self.log(f\"[EXEC] KNN fields being queried: {embedding_fields}\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n self.log(f\"[RESULT] Search complete: {len(hits)} results found\")\n\n if len(hits) == 0:\n self.log(\n f\"[EMPTY] Debug info: \"\n f\"models_in_index={available_models}, \"\n f\"matched_models={matched_models}, \"\n f\"knn_fields={embedding_fields}, \"\n f\"filters={len(filter_clauses)} clauses\"\n )\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n logger.info(f\"Search query: {self.search_query}\")\n if self._cached_vector_store is None:\n self.build_vector_store()\n\n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n\n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config" + "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import (\n BoolInput,\n DropdownInput,\n HandleInput,\n IntInput,\n MultilineInput,\n Output,\n SecretStrInput,\n StrInput,\n TableInput,\n)\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\nREQUEST_TIMEOUT = 60\nMAX_RETRIES = 5\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search. \"\n \"To search use the tools search_documents and raw_search. \"\n \"Search documents takes a query for vector search, for example\\n\"\n ' {search_query: \"components in openrag\"}'\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n \"request_timeout\",\n \"max_retries\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"nmslib\", \"faiss\", \"lucene\", \"jvector\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'nmslib' works with standard \"\n \"OpenSearch. 'jvector' requires OpenSearch 2.9+. 'lucene' requires index.knn: true. \"\n \"Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n # ----- Timeout / Retry -----\n StrInput(\n name=\"request_timeout\",\n display_name=\"Request Timeout (seconds)\",\n value=\"60\",\n advanced=True,\n info=(\n \"Time in seconds to wait for a response from OpenSearch. \"\n \"Increase for large bulk ingestion or complex hybrid queries.\"\n ),\n ),\n StrInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n value=\"3\",\n advanced=True,\n info=\"Number of retries for failed connections before raising an error.\",\n ),\n ]\n outputs = [\n Output(\n display_name=\"Search Results\",\n name=\"search_results\",\n method=\"search_documents\",\n ),\n Output(display_name=\"Raw Search\", name=\"raw_search\", method=\"raw_search\"),\n ]\n\n def raw_search(self, query: str | dict | None = None) -> Data:\n \"\"\"Execute a raw OpenSearch query against the target index.\n\n Args:\n query (dict[str, Any]): The OpenSearch query DSL dictionary.\n\n Returns:\n Data: Search results as a Data object.\n\n Raises:\n ValueError: If 'query' is not a valid OpenSearch query (must be a non-empty dict).\n \"\"\"\n raw_query = query if query is not None else self.search_query\n\n if raw_query is None or (isinstance(raw_query, str) and not raw_query.strip()):\n self.log(\"No query provided for raw search - returning empty results\")\n return Data(data={})\n\n if isinstance(raw_query, dict):\n query_body = raw_query\n elif isinstance(raw_query, str):\n s = raw_query.strip()\n\n # First, optimistically try to parse as JSON DSL\n try:\n query_body = json.loads(s)\n except json.JSONDecodeError:\n # Fallback: treat as a basic text query over common fields\n query_body = {\n \"query\": {\n \"multi_match\": {\n \"query\": s,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n }\n }\n }\n else:\n msg = f\"Unsupported raw_search query type: {type(raw_query)!r}\"\n raise TypeError(msg)\n\n client = self.build_client()\n logger.info(f\"query: {query_body}\")\n resp = client.search(\n index=self.index_name,\n body=query_body,\n params={\"terminate_after\": 0},\n )\n # Remove any _source keys whose value is a list of floats (embedding vectors)\n # Minimum length threshold to identify embedding vectors\n min_vector_length = 100\n\n def is_vector(val):\n # Accepts if it's a list of numbers (float or int) and has reasonable vector length\n return (\n isinstance(val, list) and len(val) > min_vector_length and all(isinstance(x, (float, int)) for x in val)\n )\n\n if \"hits\" in resp and \"hits\" in resp[\"hits\"]:\n for hit in resp[\"hits\"][\"hits\"]:\n source = hit.get(\"_source\")\n if isinstance(source, dict):\n keys_to_remove = [k for k, v in source.items() if is_vector(v)]\n for k in keys_to_remove:\n source.pop(k)\n logger.info(f\"Raw search response (all embedding vectors removed): {resp}\")\n return Data(**resp)\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Note: Some OpenSearch versions/configurations have issues with dynamically adding\n knn_vector mappings (NullPointerException). This method checks if the field\n already exists before attempting to add it, and gracefully skips if the field\n is already properly configured.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n # First, check if the field already exists and is properly mapped\n properties = self._get_index_properties(client)\n if self._is_knn_vector_field(properties, field_name):\n # Field already exists as knn_vector - verify dimensions match\n existing_dim = self._get_field_dimension(properties, field_name)\n if existing_dim is not None and existing_dim != dim:\n logger.warning(\n f\"Field '{field_name}' exists with dimension {existing_dim}, \"\n f\"but current embedding has dimension {dim}. Using existing mapping.\"\n )\n else:\n logger.info(\n f\"[OpenSearchMultimodel] Field '{field_name}' already exists\"\n f\"as knn_vector with matching dimensions - skipping mapping update\"\n )\n return\n\n # Field doesn't exist, try to add the mapping\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except RequestError as e:\n error_str = str(e).lower()\n if \"invalid engine\" in error_str and \"jvector\" in error_str:\n msg = (\n \"The 'jvector' engine is not available in your OpenSearch installation. \"\n \"Use 'nmslib' or 'faiss' for standard OpenSearch, or upgrade to OpenSearch 2.9+.\"\n )\n raise ValueError(msg) from e\n if \"index.knn\" in error_str:\n msg = (\n \"The index has index.knn: false. Delete the existing index and let the \"\n \"component recreate it, or create a new index with a different name.\"\n )\n raise ValueError(msg) from e\n raise\n except Exception as e:\n # Check if this is the known OpenSearch k-NN NullPointerException issue\n error_str = str(e).lower()\n if \"null\" in error_str or \"nullpointerexception\" in error_str:\n logger.warning(\n f\"[OpenSearchMultimodel] Could not add embedding field mapping for {field_name}\"\n f\"due to OpenSearch k-NN plugin issue: {e}. \"\n f\"This is a known issue with some OpenSearch versions. \"\n f\"[OpenSearchMultimodel] Skipping mapping update. \"\n f\"Please ensure the index has the correct mapping for KNN search to work.\"\n )\n # Skip and continue - ingestion will proceed, but KNN search may fail if mapping doesn't exist\n return\n logger.warning(f\"[OpenSearchMultimodel] Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n # Verify the field was added correctly\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.error(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n logger.debug(f\"[OpenSearchMultimodel] Bulk ingesting embeddings for {index_name}\")\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n\n # Normalize ACL fields that may arrive as JSON strings from flows\n for key in (\"allowed_users\", \"allowed_groups\"):\n value = metadata.get(key)\n if isinstance(value, str):\n try:\n parsed = json.loads(value)\n if isinstance(parsed, list):\n metadata[key] = parsed\n except (json.JSONDecodeError, TypeError):\n # Leave value as-is if it isn't valid JSON\n pass\n\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- param helpers ----------\n def _parse_int_param(self, attr_name: str, default: int) -> int:\n \"\"\"Parse a string attribute to int, returning *default* on failure.\"\"\"\n raw = getattr(self, attr_name, None)\n if raw is None or str(raw).strip() == \"\":\n return default\n try:\n value = int(str(raw).strip())\n except ValueError:\n logger.warning(f\"Invalid integer value '{raw}' for {attr_name}, using default {default}\")\n return default\n\n if value < 0:\n logger.warning(f\"Negative value '{raw}' for {attr_name}, using default {default}\")\n return default\n\n return value\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n logger.debug(\"[OpenSearchMultimodel] Building OpenSearch client\")\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n timeout=self._parse_int_param(\"request_timeout\", REQUEST_TIMEOUT),\n max_retries=self._parse_int_param(\"max_retries\", MAX_RETRIES),\n retry_on_timeout=True,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n\n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"[OpenSearchMultimodel] Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"[OpenSearchMultimodel] Starting ingestion mode...\")\n\n logger.debug(f\"[OpenSearchMultimodel] Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[OpenSearchMultimodel][INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n logger.debug(\n f\"[OpenSearchMultimodel][INGESTION] ingest_data type: \"\n f\"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\"\n )\n logger.debug(\n f\"[OpenSearchMultimodel][INGESTION] ingest_data content: \"\n f\"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\"\n )\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[OpenSearchMultimodel][INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"[OpenSearchMultimodel][INGESTION] Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings with rate-limit-aware retry logic using tenacity\n from tenacity import (\n retry,\n retry_if_exception,\n stop_after_attempt,\n wait_exponential,\n )\n\n def is_rate_limit_error(exception: Exception) -> bool:\n \"\"\"Check if exception is a rate limit error (429).\"\"\"\n error_str = str(exception).lower()\n return \"429\" in error_str or \"rate_limit\" in error_str or \"rate limit\" in error_str\n\n def is_other_retryable_error(exception: Exception) -> bool:\n \"\"\"Check if exception is retryable but not a rate limit error.\"\"\"\n # Retry on most exceptions except for specific non-retryable ones\n # Add other non-retryable exceptions here if needed\n return not is_rate_limit_error(exception)\n\n # Create retry decorator for rate limit errors (longer backoff)\n retry_on_rate_limit = retry(\n retry=retry_if_exception(is_rate_limit_error),\n stop=stop_after_attempt(5),\n wait=wait_exponential(multiplier=2, min=2, max=30),\n reraise=True,\n before_sleep=lambda retry_state: logger.warning(\n f\"Rate limit hit for chunk (attempt {retry_state.attempt_number}/5), \"\n f\"backing off for {retry_state.next_action.sleep:.1f}s\"\n ),\n )\n\n # Create retry decorator for other errors (shorter backoff)\n retry_on_other_errors = retry(\n retry=retry_if_exception(is_other_retryable_error),\n stop=stop_after_attempt(3),\n wait=wait_exponential(multiplier=1, min=1, max=8),\n reraise=True,\n before_sleep=lambda retry_state: logger.warning(\n f\"Error embedding chunk (attempt {retry_state.attempt_number}/3), \"\n f\"retrying in {retry_state.next_action.sleep:.1f}s: {retry_state.outcome.exception()}\"\n ),\n )\n\n def embed_chunk_with_retry(chunk_text: str, chunk_idx: int) -> list[float]:\n \"\"\"Embed a single chunk with rate-limit-aware retry logic.\"\"\"\n\n @retry_on_rate_limit\n @retry_on_other_errors\n def _embed(text: str) -> list[float]:\n return selected_embedding.embed_documents([text])[0]\n\n try:\n return _embed(chunk_text)\n except Exception as e:\n logger.error(\n f\"Failed to embed chunk {chunk_idx} after all retries: {e}\",\n error=str(e),\n )\n raise\n\n # Restrict concurrency for IBM/Watsonx models to avoid rate limits\n is_ibm = (embedding_model and \"ibm\" in str(embedding_model).lower()) or (\n selected_embedding and \"watsonx\" in type(selected_embedding).__name__.lower()\n )\n logger.debug(f\"Is IBM: {is_ibm}\")\n\n # For IBM models, use sequential processing with rate limiting\n # For other models, use parallel processing\n vectors: list[list[float]] = [None] * len(texts)\n\n if is_ibm:\n # Sequential processing with inter-request delay for IBM models\n inter_request_delay = 0.6 # ~1.67 req/s, safely under 2 req/s limit\n logger.info(f\"Using sequential processing for IBM model with {inter_request_delay}s delay between requests\")\n\n for idx, chunk in enumerate(texts):\n if idx > 0:\n # Add delay between requests (but not before the first one)\n time.sleep(inter_request_delay)\n vectors[idx] = embed_chunk_with_retry(chunk, idx)\n else:\n # Parallel processing for non-IBM models\n max_workers = min(max(len(texts), 1), 8)\n logger.debug(f\"Using parallel processing with {max_workers} workers\")\n\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk_with_retry, chunk, idx): idx for idx, chunk in enumerate(texts)}\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping (index.knn: true is required for vector search)\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error == \"resource_already_exists_exception\":\n pass # Index was created concurrently\n else:\n error_msg = str(creation_error).lower()\n if \"invalid engine\" in error_msg or \"illegal_argument\" in error_msg:\n if \"jvector\" in error_msg:\n msg = (\n \"The 'jvector' engine is not available in your OpenSearch installation. \"\n \"Use 'nmslib' or 'faiss' for standard OpenSearch, or upgrade to 2.9+.\"\n )\n raise ValueError(msg) from creation_error\n if \"index.knn\" in error_msg:\n msg = (\n \"The index has index.knn: false. Delete the existing index and let the \"\n \"component recreate it, or create a new index with a different name.\"\n )\n raise ValueError(msg) from creation_error\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n raise\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(\n f\"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\"\n )\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n logger.debug(f\"Model detection query: {agg_query}\")\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n # Log detailed bucket info for debugging\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n if not models:\n total_hits = result.get(\"hits\", {}).get(\"total\", {})\n total_count = total_hits.get(\"value\", 0) if isinstance(total_hits, dict) else total_hits\n logger.warning(\n f\"No embedding_model values found in index '{self.index_name}'. \"\n f\"Total docs in index: {total_count}. \"\n f\"This may indicate documents were indexed without the embedding_model field.\"\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n fallback_model = self._get_embedding_model_name()\n logger.info(f\"Using fallback model: {fallback_model}\")\n return [fallback_model]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n def _get_filename_agg_field(self, index_properties: dict[str, Any] | None) -> str:\n \"\"\"Choose the appropriate field for filename aggregations.\"\"\"\n if not index_properties:\n return \"filename.keyword\"\n\n filename_def = index_properties.get(\"filename\")\n if not isinstance(filename_def, dict):\n return \"filename.keyword\"\n\n field_type = filename_def.get(\"type\")\n fields_def = filename_def.get(\"fields\", {})\n\n # Top-level keyword with no subfields\n if field_type == \"keyword\" and not isinstance(fields_def, dict):\n return \"filename\"\n\n # Text field with keyword subfield\n if isinstance(fields_def, dict) and \"keyword\" in fields_def:\n return \"filename.keyword\"\n\n # Fallback: aggregate on filename directly\n return \"filename\"\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n if not embeddings_list:\n logger.error(\n \"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\"\n )\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models_attr}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models_attr and isinstance(available_models_attr, dict):\n logger.info(\n f\"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict\"\n )\n for model_name_key, dedicated_embedding in available_models_attr.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n self.log(f\"[SEARCH] Models detected in index: {available_models}\")\n self.log(f\"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n # Track matching status for debugging\n matched_models = []\n unmatched_models = []\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n matched_models.append(model_name)\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n self.log(f\"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding\")\n else:\n # No matching embedding found for this model\n unmatched_models.append(model_name)\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}\")\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n self.log(f\"[ERROR] Embedding generation failed for '{model_name}': {e}\")\n\n # Log summary of model matching\n logger.info(f\"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n self.log(f\"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n if unmatched_models:\n self.log(f\"[WARN] Unmatched models in index: {unmatched_models}\")\n\n if not query_embeddings:\n msg = (\n f\"Failed to generate embeddings for any model. \"\n f\"Index has models: {available_models}, but no matching embedding objects found. \"\n f\"Available embedding identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[FAIL] Search failed: {msg}\")\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n self.log(f\"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'\")\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n self.log(f\"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping\")\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n self.log(\n f\"[WARN] No valid KNN queries could be built. \"\n f\"Query embeddings generated: {list(query_embeddings.keys())}, \"\n f\"but no matching knn_vector fields found in index.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Determine the best aggregation field for filename based on index mapping\n filename_agg_field = self._get_filename_agg_field(index_properties)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": filename_agg_field, \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: \"\n f\"{list(query_embeddings.keys())}\"\n )\n self.log(f\"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}\")\n self.log(f\"[EXEC] Embedding models used: {list(query_embeddings.keys())}\")\n self.log(f\"[EXEC] KNN fields being queried: {embedding_fields}\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n self.log(f\"[RESULT] Search complete: {len(hits)} results found\")\n\n if len(hits) == 0:\n self.log(\n f\"[EMPTY] Debug info: \"\n f\"models_in_index={available_models}, \"\n f\"matched_models={matched_models}, \"\n f\"knn_fields={embedding_fields}, \"\n f\"filters={len(filter_clauses)} clauses\"\n )\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n logger.info(f\"Search query: {self.search_query}\")\n if self._cached_vector_store is None:\n self.build_vector_store()\n\n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n\n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n if is_basic:\n build_config[\"jwt_token\"][\"value\"] = \"\"\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" }, "docs_metadata": { "_input_type": "TableInput", @@ -3025,13 +3089,13 @@ "display_name": "Vector Engine", "dynamic": false, "external_options": {}, - "info": "Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.", + "info": "Vector search engine for similarity calculations. 'nmslib' works with standard OpenSearch. 'jvector' requires OpenSearch 2.9+. 'lucene' requires index.knn: true. Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.", "name": "engine", "options": [ - "jvector", "nmslib", "faiss", - "lucene" + "lucene", + "jvector" ], "options_metadata": [], "override_skip": false, @@ -3063,6 +3127,7 @@ "multiline": true, "name": "filter_expression", "override_skip": false, + "password": false, "placeholder": "", "required": false, "show": true, @@ -3179,6 +3244,27 @@ "type": "int", "value": 16 }, + "max_retries": { + "_input_type": "StrInput", + "advanced": true, + "display_name": "Max Retries", + "dynamic": false, + "info": "Number of retries for failed connections before raising an error.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "max_retries", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "3" + }, "num_candidates": { "_input_type": "IntInput", "advanced": true, @@ -3259,6 +3345,27 @@ "type": "str", "value": "" }, + "request_timeout": { + "_input_type": "StrInput", + "advanced": true, + "display_name": "Request Timeout (seconds)", + "dynamic": false, + "info": "Time in seconds to wait for a response from OpenSearch. Increase for large bulk ingestion or complex hybrid queries.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "request_timeout", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "60" + }, "search_query": { "_input_type": "QueryInput", "advanced": false, @@ -3364,8 +3471,8 @@ "type": "string" } }, - "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.", - "display_description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.", + "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search. To search use the tools search_documents and raw_search. Search documents takes a query for vector search, for example\n {search_query: \"components in openrag\"}", + "display_description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search. To search use the tools search_documents and raw_search. Search documents takes a query for vector search, for example\n {search_query: \"components in openrag\"}", "display_name": "search_documents", "name": "search_documents", "readonly": false, @@ -3375,7 +3482,7 @@ ] }, { - "_uniqueId": "as_dataframe_as_dataframe_1", + "_uniqueId": "raw_search_raw_search_1", "args": { "search_query": { "default": "", @@ -3384,14 +3491,14 @@ "type": "string" } }, - "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.", - "display_description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.", - "display_name": "as_dataframe", - "name": "as_dataframe", + "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search. To search use the tools search_documents and raw_search. Search documents takes a query for vector search, for example\n {search_query: \"components in openrag\"}", + "display_description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search. To search use the tools search_documents and raw_search. Search documents takes a query for vector search, for example\n {search_query: \"components in openrag\"}", + "display_name": "raw_search", + "name": "raw_search", "readonly": false, - "status": false, + "status": true, "tags": [ - "as_dataframe" + "raw_search" ] }, { @@ -3404,8 +3511,8 @@ "type": "string" } }, - "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.", - "display_description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.", + "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search. To search use the tools search_documents and raw_search. Search documents takes a query for vector search, for example\n {search_query: \"components in openrag\"}", + "display_description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search. To search use the tools search_documents and raw_search. Search documents takes a query for vector search, for example\n {search_query: \"components in openrag\"}", "display_name": "as_vector_store", "name": "as_vector_store", "readonly": false, @@ -3508,14 +3615,14 @@ "dragging": false, "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE", "measured": { - "height": 902, + "height": 965, "width": 320 }, "position": { - "x": 1010.0149063694566, - "y": 1404.0086597117045 + "x": 982.8593504384346, + "y": 1372.6753259451405 }, - "selected": false, + "selected": true, "type": "genericNode" }, { @@ -3533,14 +3640,12 @@ "description": "Generate embeddings using a specified provider.", "display_name": "Embedding Model", "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": true, + "edited": false, "field_order": [ - "provider", - "api_base", - "ollama_base_url", - "base_url_ibm_watsonx", "model", "api_key", + "api_base", + "base_url_ibm_watsonx", "project_id", "dimensions", "chunk_size", @@ -3549,48 +3654,26 @@ "show_progress_bar", "model_kwargs", "truncate_input_tokens", - "input_text", - "fail_safe_mode" + "input_text" ], "frozen": false, "icon": "binary", - "last_updated": "2025-12-02T21:33:13.271Z", + "last_updated": "2026-02-27T18:43:22.547Z", "legacy": false, - "lf_version": "1.7.0.dev21", "metadata": { - "code_hash": "0e2d6fe67a26", + "code_hash": "c5ce0982da48", "dependencies": { "dependencies": [ - { - "name": "requests", - "version": "2.32.5" - }, - { - "name": "ibm_watsonx_ai", - "version": "1.4.2" - }, - { - "name": "langchain_openai", - "version": "0.3.23" - }, { "name": "lfx", - "version": "0.2.0.dev21" - }, - { - "name": "langchain_ollama", - "version": "0.3.10" - }, - { - "name": "langchain_community", - "version": "0.3.21" + "version": null }, { - "name": "langchain_ibm", - "version": "0.3.19" + "name": "langchain_core", + "version": "0.3.83" } ], - "total_dependencies": 7 + "total_dependencies": 2 }, "module": "custom_components.embedding_model" }, @@ -3622,7 +3705,7 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "69a7745e-dfb8-40a7-b5cb-5da3af0b10b6" + "value": "c2b04832-8cbf-473b-afd0-014e4e86b84b" }, "_type": "Component", "api_base": { @@ -3641,7 +3724,7 @@ "override_skip": false, "placeholder": "", "required": false, - "show": false, + "show": true, "title_case": false, "tool_mode": false, "trace_as_input": true, @@ -3652,23 +3735,23 @@ }, "api_key": { "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "API Key (Optional)", + "advanced": true, + "display_name": "API Key", "dynamic": false, "info": "Model Provider API key", "input_types": [], - "load_from_db": true, + "load_from_db": false, "name": "api_key", "override_skip": false, "password": true, "placeholder": "", "real_time_refresh": true, "required": false, - "show": false, + "show": true, "title_case": false, "track_in_telemetry": false, "type": "str", - "value": "OPENAI_API_KEY" + "value": "" }, "base_url_ibm_watsonx": { "_input_type": "DropdownInput", @@ -3738,7 +3821,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"fail_safe_mode\",\n display_name=\"Fail-Safe Mode\",\n value=False,\n advanced=True,\n info=\"When enabled, errors will be logged instead of raising exceptions. \"\n \"The component will return None on error.\",\n real_time_refresh=True,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n async def fetch_ollama_models(self) -> list[str]:\n try:\n return await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n except Exception: # noqa: BLE001\n\n logger.exception(\"Error fetching models\")\n return []\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize OpenAI embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n try:\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await self.fetch_ollama_models()\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize Ollama embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to authenticate with IBM watsonx.ai: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n msg = f\"Unknown provider: {provider}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n # Handle fail_safe_mode changes first - set all required fields to False if enabled\n if field_name == \"fail_safe_mode\":\n if field_value: # If fail_safe_mode is enabled\n build_config[\"api_key\"][\"required\"] = False\n elif hasattr(self, \"provider\"):\n # If fail_safe_mode is disabled, restore required flags based on provider\n if self.provider in [\"OpenAI\", \"IBM watsonx.ai\"]:\n build_config[\"api_key\"][\"required\"] = True\n else: # Ollama\n build_config[\"api_key\"][\"required\"] = False\n\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.unified_models import (\n get_api_key_for_provider,\n get_embedding_class,\n get_embedding_model_options,\n get_unified_models_detailed,\n update_model_options_in_build_config,\n)\nfrom lfx.base.models.watsonx_constants import IBM_WATSONX_URLS\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n ModelInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n \"\"\"Dynamically update build config with user-filtered model options.\"\"\"\n # Update model options\n build_config = update_model_options_in_build_config(\n component=self,\n build_config=build_config,\n cache_key_prefix=\"embedding_model_options\",\n get_options_func=get_embedding_model_options,\n field_name=field_name,\n field_value=field_value,\n )\n\n # Show/hide provider-specific fields based on selected model\n if field_name == \"model\" and isinstance(field_value, list) and len(field_value) > 0:\n selected_model = field_value[0]\n provider = selected_model.get(\"provider\", \"\")\n\n # Show/hide watsonx fields\n is_watsonx = provider == \"IBM WatsonX\"\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = is_watsonx\n build_config[\"project_id\"][\"show\"] = is_watsonx\n build_config[\"truncate_input_tokens\"][\"show\"] = is_watsonx\n build_config[\"input_text\"][\"show\"] = is_watsonx\n if is_watsonx:\n build_config[\"base_url_ibm_watsonx\"][\"required\"] = True\n build_config[\"project_id\"][\"required\"] = True\n\n return build_config\n\n inputs = [\n ModelInput(\n name=\"model\",\n display_name=\"Embedding Model\",\n info=\"Select your model provider\",\n real_time_refresh=True,\n required=True,\n model_type=\"embedding\",\n input_types=[\"Embeddings\"], # Override default to accept Embeddings instead of LanguageModel\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"Model Provider API key\",\n real_time_refresh=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n # Watson-specific inputs\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n advanced=True,\n value=1000,\n ),\n FloatInput(\n name=\"request_timeout\",\n display_name=\"Request Timeout\",\n advanced=True,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n advanced=True,\n value=3,\n ),\n BoolInput(\n name=\"show_progress_bar\",\n display_name=\"Show Progress Bar\",\n advanced=True,\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n \"\"\"Build and return an embeddings instance based on the selected model.\n\n Returns an EmbeddingsWithModels wrapper that contains:\n - The primary embedding instance (for the selected model)\n - available_models dict mapping all available model names to their instances\n \"\"\"\n # If an Embeddings object is directly connected, return it\n try:\n from langchain_core.embeddings import Embeddings as BaseEmbeddings\n\n if isinstance(self.model, BaseEmbeddings):\n return self.model\n except ImportError:\n pass\n\n # Safely extract model configuration\n if not self.model or not isinstance(self.model, list):\n msg = \"Model must be a non-empty list\"\n raise ValueError(msg)\n\n model = self.model[0]\n model_name = model.get(\"name\")\n provider = model.get(\"provider\")\n metadata = model.get(\"metadata\", {})\n\n # Get API key from user input or global variables\n api_key = get_api_key_for_provider(self.user_id, provider, self.api_key)\n\n # Validate required fields (Ollama doesn't require API key)\n if not api_key and provider != \"Ollama\":\n msg = (\n f\"{provider} API key is required. \"\n f\"Please provide it in the component or configure it globally as \"\n f\"{provider.upper().replace(' ', '_')}_API_KEY.\"\n )\n raise ValueError(msg)\n\n if not model_name:\n msg = \"Model name is required\"\n raise ValueError(msg)\n\n # Get embedding class\n embedding_class_name = metadata.get(\"embedding_class\")\n if not embedding_class_name:\n msg = f\"No embedding class defined in metadata for {model_name}\"\n raise ValueError(msg)\n\n embedding_class = get_embedding_class(embedding_class_name)\n\n # Build kwargs using parameter mapping for primary instance\n kwargs = self._build_kwargs(model, metadata)\n primary_instance = embedding_class(**kwargs)\n\n # Get all available embedding models for this provider\n available_models_dict = self._build_available_models(\n provider=provider,\n embedding_class=embedding_class,\n metadata=metadata,\n api_key=api_key,\n )\n\n # Wrap with EmbeddingsWithModels to provide available_models metadata\n return EmbeddingsWithModels(\n embeddings=primary_instance,\n available_models=available_models_dict,\n )\n\n def _build_available_models(\n self,\n provider: str,\n embedding_class: type,\n metadata: dict[str, Any],\n api_key: str | None,\n ) -> dict[str, Embeddings]:\n \"\"\"Build a dictionary of all available embedding model instances for the provider.\n\n Args:\n provider: The provider name (e.g., \"OpenAI\", \"Ollama\")\n embedding_class: The embedding class to instantiate\n metadata: Metadata containing param_mapping\n api_key: The API key for the provider\n\n Returns:\n Dict mapping model names to their embedding instances\n \"\"\"\n available_models_dict: dict[str, Embeddings] = {}\n\n # Get all embedding models for this provider from unified models\n all_embedding_models = get_unified_models_detailed(\n providers=[provider],\n model_type=\"embeddings\",\n include_deprecated=False,\n include_unsupported=False,\n )\n\n if not all_embedding_models:\n return available_models_dict\n\n # Extract models from the provider data\n for provider_data in all_embedding_models:\n if provider_data.get(\"provider\") != provider:\n continue\n\n for model_data in provider_data.get(\"models\", []):\n model_name = model_data.get(\"model_name\")\n if not model_name:\n continue\n\n # Create a model dict compatible with _build_kwargs\n model_dict = {\n \"name\": model_name,\n \"provider\": provider,\n \"metadata\": metadata, # Reuse the same metadata/param_mapping\n }\n\n try:\n # Build kwargs for this model\n model_kwargs = self._build_kwargs_for_model(model_dict, metadata, api_key)\n # Create the embedding instance\n available_models_dict[model_name] = embedding_class(**model_kwargs)\n except Exception: # noqa: BLE001\n # Skip models that fail to instantiate\n # This handles cases where specific models have incompatible parameters\n logger.debug(\"Failed to instantiate embedding model %s: skipping\", model_name, exc_info=True)\n continue\n\n return available_models_dict\n\n def _build_kwargs_for_model(\n self,\n model: dict[str, Any],\n metadata: dict[str, Any],\n api_key: str | None,\n ) -> dict[str, Any]:\n \"\"\"Build kwargs dictionary for a specific model using parameter mapping.\n\n This is similar to _build_kwargs but uses the provided api_key directly\n instead of looking it up again.\n\n Args:\n model: Model dict with name and provider\n metadata: Metadata containing param_mapping\n api_key: The API key to use\n\n Returns:\n kwargs dict for embedding class instantiation\n \"\"\"\n param_mapping = metadata.get(\"param_mapping\", {})\n if not param_mapping:\n msg = \"Parameter mapping not found in metadata\"\n raise ValueError(msg)\n\n kwargs = {}\n provider = model.get(\"provider\")\n\n # Required parameters - handle both \"model\" and \"model_id\" (for watsonx)\n if \"model\" in param_mapping:\n kwargs[param_mapping[\"model\"]] = model.get(\"name\")\n elif \"model_id\" in param_mapping:\n kwargs[param_mapping[\"model_id\"]] = model.get(\"name\")\n\n # Add API key if mapped\n if \"api_key\" in param_mapping and api_key:\n kwargs[param_mapping[\"api_key\"]] = api_key\n\n # Optional parameters with their values\n optional_params = {\n \"api_base\": self.api_base if self.api_base else None,\n \"dimensions\": int(self.dimensions) if self.dimensions else None,\n \"chunk_size\": int(self.chunk_size) if self.chunk_size else None,\n \"request_timeout\": float(self.request_timeout) if self.request_timeout else None,\n \"max_retries\": int(self.max_retries) if self.max_retries else None,\n \"show_progress_bar\": self.show_progress_bar if hasattr(self, \"show_progress_bar\") else None,\n \"model_kwargs\": self.model_kwargs if self.model_kwargs else None,\n }\n\n # Watson-specific parameters\n if provider in {\"IBM WatsonX\", \"IBM watsonx.ai\"}:\n # Map base_url_ibm_watsonx to \"url\" parameter for watsonx\n if \"url\" in param_mapping:\n url_value = (\n self.base_url_ibm_watsonx\n if hasattr(self, \"base_url_ibm_watsonx\") and self.base_url_ibm_watsonx\n else \"https://us-south.ml.cloud.ibm.com\"\n )\n kwargs[param_mapping[\"url\"]] = url_value\n # Map project_id for watsonx\n if hasattr(self, \"project_id\") and self.project_id and \"project_id\" in param_mapping:\n kwargs[param_mapping[\"project_id\"]] = self.project_id\n\n # Ollama-specific parameters\n if provider == \"Ollama\" and \"base_url\" in param_mapping:\n # Map api_base to \"base_url\" parameter for Ollama\n base_url_value = self.api_base if hasattr(self, \"api_base\") and self.api_base else \"http://localhost:11434\"\n kwargs[param_mapping[\"base_url\"]] = base_url_value\n\n # Add optional parameters if they have values and are mapped\n for param_name, param_value in optional_params.items():\n if param_value is not None and param_name in param_mapping:\n # Special handling for request_timeout with Google provider\n if param_name == \"request_timeout\":\n if provider == \"Google Generative AI\" and isinstance(param_value, (int, float)):\n kwargs[param_mapping[param_name]] = {\"timeout\": param_value}\n else:\n kwargs[param_mapping[param_name]] = param_value\n else:\n kwargs[param_mapping[param_name]] = param_value\n\n return kwargs\n\n def _build_kwargs(self, model: dict[str, Any], metadata: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Build kwargs dictionary using parameter mapping.\"\"\"\n param_mapping = metadata.get(\"param_mapping\", {})\n if not param_mapping:\n msg = \"Parameter mapping not found in metadata\"\n raise ValueError(msg)\n\n kwargs = {}\n\n # Required parameters - handle both \"model\" and \"model_id\" (for watsonx)\n if \"model\" in param_mapping:\n kwargs[param_mapping[\"model\"]] = model.get(\"name\")\n elif \"model_id\" in param_mapping:\n kwargs[param_mapping[\"model_id\"]] = model.get(\"name\")\n if \"api_key\" in param_mapping:\n kwargs[param_mapping[\"api_key\"]] = get_api_key_for_provider(\n self.user_id,\n model.get(\"provider\"),\n self.api_key,\n )\n\n # Optional parameters with their values\n provider = model.get(\"provider\")\n optional_params = {\n \"api_base\": self.api_base if self.api_base else None,\n \"dimensions\": int(self.dimensions) if self.dimensions else None,\n \"chunk_size\": int(self.chunk_size) if self.chunk_size else None,\n \"request_timeout\": float(self.request_timeout) if self.request_timeout else None,\n \"max_retries\": int(self.max_retries) if self.max_retries else None,\n \"show_progress_bar\": self.show_progress_bar if hasattr(self, \"show_progress_bar\") else None,\n \"model_kwargs\": self.model_kwargs if self.model_kwargs else None,\n }\n\n # Watson-specific parameters\n if provider in {\"IBM WatsonX\", \"IBM watsonx.ai\"}:\n # Map base_url_ibm_watsonx to \"url\" parameter for watsonx\n if \"url\" in param_mapping:\n url_value = (\n self.base_url_ibm_watsonx\n if hasattr(self, \"base_url_ibm_watsonx\") and self.base_url_ibm_watsonx\n else \"https://us-south.ml.cloud.ibm.com\"\n )\n kwargs[param_mapping[\"url\"]] = url_value\n # Map project_id for watsonx\n if hasattr(self, \"project_id\") and self.project_id and \"project_id\" in param_mapping:\n kwargs[param_mapping[\"project_id\"]] = self.project_id\n\n # Ollama-specific parameters\n if provider == \"Ollama\" and \"base_url\" in param_mapping:\n # Map api_base to \"base_url\" parameter for Ollama\n base_url_value = self.api_base if hasattr(self, \"api_base\") and self.api_base else \"http://localhost:11434\"\n kwargs[param_mapping[\"base_url\"]] = base_url_value\n\n # Add optional parameters if they have values and are mapped\n for param_name, param_value in optional_params.items():\n if param_value is not None and param_name in param_mapping:\n # Special handling for request_timeout with Google provider\n if param_name == \"request_timeout\":\n if provider == \"Google Generative AI\" and isinstance(param_value, (int, float)):\n kwargs[param_mapping[param_name]] = {\"timeout\": param_value}\n else:\n kwargs[param_mapping[param_name]] = param_value\n else:\n kwargs[param_mapping[param_name]] = param_value\n\n return kwargs\n" }, "dimensions": { "_input_type": "IntInput", @@ -3748,6 +3831,7 @@ "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", "list": false, "list_add_label": "Add More", + "load_from_db": false, "name": "dimensions", "override_skip": false, "placeholder": "", @@ -3760,28 +3844,6 @@ "type": "int", "value": "" }, - "fail_safe_mode": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Fail-Safe Mode", - "dynamic": false, - "info": "When enabled, errors will be logged instead of raising exceptions. The component will return None on error.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "fail_safe_mode", - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "bool", - "value": true - }, "input_text": { "_input_type": "BoolInput", "advanced": true, @@ -3802,7 +3864,7 @@ "type": "bool", "value": true }, - "is_refresh": false, + "is_refresh": true, "max_retries": { "_input_type": "IntInput", "advanced": true, @@ -3824,34 +3886,228 @@ "value": 3 }, "model": { - "_input_type": "DropdownInput", + "_input_type": "ModelInput", "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Name", + "display_name": "Embedding Model", "dynamic": false, - "external_options": {}, - "info": "Select the embedding model to use", - "load_from_db": false, - "name": "model", - "options": [ - "bge-large:latest", - "qwen3-embedding:4b" - ], - "options_metadata": [], - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "refresh_button": true, - "required": false, - "show": true, - "title_case": false, - "toggle": false, + "external_options": { + "fields": { + "data": { + "node": { + "display_name": "Connect other models", + "icon": "CornerDownLeft", + "name": "connect_other_models" + } + } + } + }, + "info": "Select your model provider", + "input_types": [ + "Embeddings" + ], + "list": false, + "list_add_label": "Add More", + "model_type": "embedding", + "name": "model", + "options": [ + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-small", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-large", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-ada-002", + "provider": "OpenAI" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "embedding_class": "WatsonxEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_key": "apikey", + "model_id": "model_id", + "project_id": "project_id", + "request_timeout": "request_timeout", + "space_id": "space_id", + "url": "url" + } + }, + "name": "ibm/granite-embedding-278m-multilingual", + "provider": "IBM WatsonX" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "embedding_class": "WatsonxEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_key": "apikey", + "model_id": "model_id", + "project_id": "project_id", + "request_timeout": "request_timeout", + "space_id": "space_id", + "url": "url" + } + }, + "name": "ibm/slate-125m-english-rtrvr-v2", + "provider": "IBM WatsonX" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "embedding_class": "WatsonxEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_key": "apikey", + "model_id": "model_id", + "project_id": "project_id", + "request_timeout": "request_timeout", + "space_id": "space_id", + "url": "url" + } + }, + "name": "ibm/slate-30m-english-rtrvr-v2", + "provider": "IBM WatsonX" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "embedding_class": "WatsonxEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_key": "apikey", + "model_id": "model_id", + "project_id": "project_id", + "request_timeout": "request_timeout", + "space_id": "space_id", + "url": "url" + } + }, + "name": "intfloat/multilingual-e5-large", + "provider": "IBM WatsonX" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "embedding_class": "WatsonxEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_key": "apikey", + "model_id": "model_id", + "project_id": "project_id", + "request_timeout": "request_timeout", + "space_id": "space_id", + "url": "url" + } + }, + "name": "sentence-transformers/all-minilm-l6-v2", + "provider": "IBM WatsonX" + }, + { + "category": "Google Generative AI", + "icon": "GoogleGenerativeAI", + "metadata": { + "is_disabled_provider": true, + "variable_name": null + }, + "name": "__enable_provider_Google Generative AI__", + "provider": "Google Generative AI" + } + ], + "override_skip": false, + "placeholder": "Setup Provider", + "real_time_refresh": true, + "refresh_button": true, + "required": true, + "show": true, + "title_case": false, "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "" + "trace_as_input": true, + "track_in_telemetry": false, + "type": "model", + "value": [ + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-small", + "provider": "OpenAI" + } + ] }, "model_kwargs": { "_input_type": "DictInput", @@ -3873,32 +4129,6 @@ "type": "dict", "value": {} }, - "ollama_base_url": { - "_input_type": "MessageTextInput", - "advanced": false, - "display_name": "Ollama API URL", - "dynamic": false, - "info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": true, - "name": "ollama_base_url", - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "OLLAMA_BASE_URL" - }, "project_id": { "_input_type": "MessageTextInput", "advanced": false, @@ -3924,49 +4154,6 @@ "type": "str", "value": "" }, - "provider": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Provider", - "dynamic": false, - "external_options": {}, - "info": "Select the embedding model provider", - "load_from_db": false, - "name": "provider", - "options": [ - "OpenAI", - "Ollama", - "IBM watsonx.ai" - ], - "options_metadata": [ - { - "icon": "OpenAI" - }, - { - "icon": "Ollama" - }, - { - "icon": "WatsonxAI" - } - ], - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "selected_metadata": { - "icon": "Ollama" - }, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "Ollama" - }, "request_timeout": { "_input_type": "FloatInput", "advanced": true, @@ -4033,14 +4220,15 @@ "showNode": true, "type": "EmbeddingModel" }, + "dragging": false, "id": "EmbeddingModel-J6YgA", "measured": { - "height": 369, + "height": 207, "width": 320 }, "position": { - "x": 494.1867639968285, - "y": 1470.1965849152762 + "x": 488.5779401414627, + "y": 1530.0240393725105 }, "selected": false, "type": "genericNode" @@ -4060,14 +4248,12 @@ "description": "Generate embeddings using a specified provider.", "display_name": "Embedding Model", "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": true, + "edited": false, "field_order": [ - "provider", - "api_base", - "ollama_base_url", - "base_url_ibm_watsonx", "model", "api_key", + "api_base", + "base_url_ibm_watsonx", "project_id", "dimensions", "chunk_size", @@ -4076,48 +4262,26 @@ "show_progress_bar", "model_kwargs", "truncate_input_tokens", - "input_text", - "fail_safe_mode" + "input_text" ], "frozen": false, "icon": "binary", - "last_updated": "2025-12-02T21:33:13.272Z", + "last_updated": "2026-02-27T18:43:20.779Z", "legacy": false, - "lf_version": "1.7.0.dev21", "metadata": { - "code_hash": "0e2d6fe67a26", + "code_hash": "c5ce0982da48", "dependencies": { "dependencies": [ - { - "name": "requests", - "version": "2.32.5" - }, - { - "name": "ibm_watsonx_ai", - "version": "1.4.2" - }, - { - "name": "langchain_openai", - "version": "0.3.23" - }, { "name": "lfx", - "version": "0.2.0.dev21" - }, - { - "name": "langchain_ollama", - "version": "0.3.10" - }, - { - "name": "langchain_community", - "version": "0.3.21" + "version": null }, { - "name": "langchain_ibm", - "version": "0.3.19" + "name": "langchain_core", + "version": "0.3.83" } ], - "total_dependencies": 7 + "total_dependencies": 2 }, "module": "custom_components.embedding_model" }, @@ -4149,7 +4313,7 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "69a7745e-dfb8-40a7-b5cb-5da3af0b10b6" + "value": "c2b04832-8cbf-473b-afd0-014e4e86b84b" }, "_type": "Component", "api_base": { @@ -4168,7 +4332,7 @@ "override_skip": false, "placeholder": "", "required": false, - "show": false, + "show": true, "title_case": false, "tool_mode": false, "trace_as_input": true, @@ -4179,12 +4343,12 @@ }, "api_key": { "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "IBM watsonx.ai API Key", + "advanced": true, + "display_name": "API Key", "dynamic": false, "info": "Model Provider API key", "input_types": [], - "load_from_db": true, + "load_from_db": false, "name": "api_key", "override_skip": false, "password": true, @@ -4195,7 +4359,7 @@ "title_case": false, "track_in_telemetry": false, "type": "str", - "value": "WATSONX_API_KEY" + "value": "" }, "base_url_ibm_watsonx": { "_input_type": "DropdownInput", @@ -4220,7 +4384,7 @@ "placeholder": "", "real_time_refresh": true, "required": false, - "show": true, + "show": false, "title_case": false, "toggle": false, "tool_mode": false, @@ -4265,7 +4429,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"fail_safe_mode\",\n display_name=\"Fail-Safe Mode\",\n value=False,\n advanced=True,\n info=\"When enabled, errors will be logged instead of raising exceptions. \"\n \"The component will return None on error.\",\n real_time_refresh=True,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n async def fetch_ollama_models(self) -> list[str]:\n try:\n return await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n except Exception: # noqa: BLE001\n\n logger.exception(\"Error fetching models\")\n return []\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize OpenAI embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n try:\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await self.fetch_ollama_models()\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize Ollama embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to authenticate with IBM watsonx.ai: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n msg = f\"Unknown provider: {provider}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n # Handle fail_safe_mode changes first - set all required fields to False if enabled\n if field_name == \"fail_safe_mode\":\n if field_value: # If fail_safe_mode is enabled\n build_config[\"api_key\"][\"required\"] = False\n elif hasattr(self, \"provider\"):\n # If fail_safe_mode is disabled, restore required flags based on provider\n if self.provider in [\"OpenAI\", \"IBM watsonx.ai\"]:\n build_config[\"api_key\"][\"required\"] = True\n else: # Ollama\n build_config[\"api_key\"][\"required\"] = False\n\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.unified_models import (\n get_api_key_for_provider,\n get_embedding_class,\n get_embedding_model_options,\n get_unified_models_detailed,\n update_model_options_in_build_config,\n)\nfrom lfx.base.models.watsonx_constants import IBM_WATSONX_URLS\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n ModelInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n \"\"\"Dynamically update build config with user-filtered model options.\"\"\"\n # Update model options\n build_config = update_model_options_in_build_config(\n component=self,\n build_config=build_config,\n cache_key_prefix=\"embedding_model_options\",\n get_options_func=get_embedding_model_options,\n field_name=field_name,\n field_value=field_value,\n )\n\n # Show/hide provider-specific fields based on selected model\n if field_name == \"model\" and isinstance(field_value, list) and len(field_value) > 0:\n selected_model = field_value[0]\n provider = selected_model.get(\"provider\", \"\")\n\n # Show/hide watsonx fields\n is_watsonx = provider == \"IBM WatsonX\"\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = is_watsonx\n build_config[\"project_id\"][\"show\"] = is_watsonx\n build_config[\"truncate_input_tokens\"][\"show\"] = is_watsonx\n build_config[\"input_text\"][\"show\"] = is_watsonx\n if is_watsonx:\n build_config[\"base_url_ibm_watsonx\"][\"required\"] = True\n build_config[\"project_id\"][\"required\"] = True\n\n return build_config\n\n inputs = [\n ModelInput(\n name=\"model\",\n display_name=\"Embedding Model\",\n info=\"Select your model provider\",\n real_time_refresh=True,\n required=True,\n model_type=\"embedding\",\n input_types=[\"Embeddings\"], # Override default to accept Embeddings instead of LanguageModel\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"Model Provider API key\",\n real_time_refresh=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n # Watson-specific inputs\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n advanced=True,\n value=1000,\n ),\n FloatInput(\n name=\"request_timeout\",\n display_name=\"Request Timeout\",\n advanced=True,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n advanced=True,\n value=3,\n ),\n BoolInput(\n name=\"show_progress_bar\",\n display_name=\"Show Progress Bar\",\n advanced=True,\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n \"\"\"Build and return an embeddings instance based on the selected model.\n\n Returns an EmbeddingsWithModels wrapper that contains:\n - The primary embedding instance (for the selected model)\n - available_models dict mapping all available model names to their instances\n \"\"\"\n # If an Embeddings object is directly connected, return it\n try:\n from langchain_core.embeddings import Embeddings as BaseEmbeddings\n\n if isinstance(self.model, BaseEmbeddings):\n return self.model\n except ImportError:\n pass\n\n # Safely extract model configuration\n if not self.model or not isinstance(self.model, list):\n msg = \"Model must be a non-empty list\"\n raise ValueError(msg)\n\n model = self.model[0]\n model_name = model.get(\"name\")\n provider = model.get(\"provider\")\n metadata = model.get(\"metadata\", {})\n\n # Get API key from user input or global variables\n api_key = get_api_key_for_provider(self.user_id, provider, self.api_key)\n\n # Validate required fields (Ollama doesn't require API key)\n if not api_key and provider != \"Ollama\":\n msg = (\n f\"{provider} API key is required. \"\n f\"Please provide it in the component or configure it globally as \"\n f\"{provider.upper().replace(' ', '_')}_API_KEY.\"\n )\n raise ValueError(msg)\n\n if not model_name:\n msg = \"Model name is required\"\n raise ValueError(msg)\n\n # Get embedding class\n embedding_class_name = metadata.get(\"embedding_class\")\n if not embedding_class_name:\n msg = f\"No embedding class defined in metadata for {model_name}\"\n raise ValueError(msg)\n\n embedding_class = get_embedding_class(embedding_class_name)\n\n # Build kwargs using parameter mapping for primary instance\n kwargs = self._build_kwargs(model, metadata)\n primary_instance = embedding_class(**kwargs)\n\n # Get all available embedding models for this provider\n available_models_dict = self._build_available_models(\n provider=provider,\n embedding_class=embedding_class,\n metadata=metadata,\n api_key=api_key,\n )\n\n # Wrap with EmbeddingsWithModels to provide available_models metadata\n return EmbeddingsWithModels(\n embeddings=primary_instance,\n available_models=available_models_dict,\n )\n\n def _build_available_models(\n self,\n provider: str,\n embedding_class: type,\n metadata: dict[str, Any],\n api_key: str | None,\n ) -> dict[str, Embeddings]:\n \"\"\"Build a dictionary of all available embedding model instances for the provider.\n\n Args:\n provider: The provider name (e.g., \"OpenAI\", \"Ollama\")\n embedding_class: The embedding class to instantiate\n metadata: Metadata containing param_mapping\n api_key: The API key for the provider\n\n Returns:\n Dict mapping model names to their embedding instances\n \"\"\"\n available_models_dict: dict[str, Embeddings] = {}\n\n # Get all embedding models for this provider from unified models\n all_embedding_models = get_unified_models_detailed(\n providers=[provider],\n model_type=\"embeddings\",\n include_deprecated=False,\n include_unsupported=False,\n )\n\n if not all_embedding_models:\n return available_models_dict\n\n # Extract models from the provider data\n for provider_data in all_embedding_models:\n if provider_data.get(\"provider\") != provider:\n continue\n\n for model_data in provider_data.get(\"models\", []):\n model_name = model_data.get(\"model_name\")\n if not model_name:\n continue\n\n # Create a model dict compatible with _build_kwargs\n model_dict = {\n \"name\": model_name,\n \"provider\": provider,\n \"metadata\": metadata, # Reuse the same metadata/param_mapping\n }\n\n try:\n # Build kwargs for this model\n model_kwargs = self._build_kwargs_for_model(model_dict, metadata, api_key)\n # Create the embedding instance\n available_models_dict[model_name] = embedding_class(**model_kwargs)\n except Exception: # noqa: BLE001\n # Skip models that fail to instantiate\n # This handles cases where specific models have incompatible parameters\n logger.debug(\"Failed to instantiate embedding model %s: skipping\", model_name, exc_info=True)\n continue\n\n return available_models_dict\n\n def _build_kwargs_for_model(\n self,\n model: dict[str, Any],\n metadata: dict[str, Any],\n api_key: str | None,\n ) -> dict[str, Any]:\n \"\"\"Build kwargs dictionary for a specific model using parameter mapping.\n\n This is similar to _build_kwargs but uses the provided api_key directly\n instead of looking it up again.\n\n Args:\n model: Model dict with name and provider\n metadata: Metadata containing param_mapping\n api_key: The API key to use\n\n Returns:\n kwargs dict for embedding class instantiation\n \"\"\"\n param_mapping = metadata.get(\"param_mapping\", {})\n if not param_mapping:\n msg = \"Parameter mapping not found in metadata\"\n raise ValueError(msg)\n\n kwargs = {}\n provider = model.get(\"provider\")\n\n # Required parameters - handle both \"model\" and \"model_id\" (for watsonx)\n if \"model\" in param_mapping:\n kwargs[param_mapping[\"model\"]] = model.get(\"name\")\n elif \"model_id\" in param_mapping:\n kwargs[param_mapping[\"model_id\"]] = model.get(\"name\")\n\n # Add API key if mapped\n if \"api_key\" in param_mapping and api_key:\n kwargs[param_mapping[\"api_key\"]] = api_key\n\n # Optional parameters with their values\n optional_params = {\n \"api_base\": self.api_base if self.api_base else None,\n \"dimensions\": int(self.dimensions) if self.dimensions else None,\n \"chunk_size\": int(self.chunk_size) if self.chunk_size else None,\n \"request_timeout\": float(self.request_timeout) if self.request_timeout else None,\n \"max_retries\": int(self.max_retries) if self.max_retries else None,\n \"show_progress_bar\": self.show_progress_bar if hasattr(self, \"show_progress_bar\") else None,\n \"model_kwargs\": self.model_kwargs if self.model_kwargs else None,\n }\n\n # Watson-specific parameters\n if provider in {\"IBM WatsonX\", \"IBM watsonx.ai\"}:\n # Map base_url_ibm_watsonx to \"url\" parameter for watsonx\n if \"url\" in param_mapping:\n url_value = (\n self.base_url_ibm_watsonx\n if hasattr(self, \"base_url_ibm_watsonx\") and self.base_url_ibm_watsonx\n else \"https://us-south.ml.cloud.ibm.com\"\n )\n kwargs[param_mapping[\"url\"]] = url_value\n # Map project_id for watsonx\n if hasattr(self, \"project_id\") and self.project_id and \"project_id\" in param_mapping:\n kwargs[param_mapping[\"project_id\"]] = self.project_id\n\n # Ollama-specific parameters\n if provider == \"Ollama\" and \"base_url\" in param_mapping:\n # Map api_base to \"base_url\" parameter for Ollama\n base_url_value = self.api_base if hasattr(self, \"api_base\") and self.api_base else \"http://localhost:11434\"\n kwargs[param_mapping[\"base_url\"]] = base_url_value\n\n # Add optional parameters if they have values and are mapped\n for param_name, param_value in optional_params.items():\n if param_value is not None and param_name in param_mapping:\n # Special handling for request_timeout with Google provider\n if param_name == \"request_timeout\":\n if provider == \"Google Generative AI\" and isinstance(param_value, (int, float)):\n kwargs[param_mapping[param_name]] = {\"timeout\": param_value}\n else:\n kwargs[param_mapping[param_name]] = param_value\n else:\n kwargs[param_mapping[param_name]] = param_value\n\n return kwargs\n\n def _build_kwargs(self, model: dict[str, Any], metadata: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Build kwargs dictionary using parameter mapping.\"\"\"\n param_mapping = metadata.get(\"param_mapping\", {})\n if not param_mapping:\n msg = \"Parameter mapping not found in metadata\"\n raise ValueError(msg)\n\n kwargs = {}\n\n # Required parameters - handle both \"model\" and \"model_id\" (for watsonx)\n if \"model\" in param_mapping:\n kwargs[param_mapping[\"model\"]] = model.get(\"name\")\n elif \"model_id\" in param_mapping:\n kwargs[param_mapping[\"model_id\"]] = model.get(\"name\")\n if \"api_key\" in param_mapping:\n kwargs[param_mapping[\"api_key\"]] = get_api_key_for_provider(\n self.user_id,\n model.get(\"provider\"),\n self.api_key,\n )\n\n # Optional parameters with their values\n provider = model.get(\"provider\")\n optional_params = {\n \"api_base\": self.api_base if self.api_base else None,\n \"dimensions\": int(self.dimensions) if self.dimensions else None,\n \"chunk_size\": int(self.chunk_size) if self.chunk_size else None,\n \"request_timeout\": float(self.request_timeout) if self.request_timeout else None,\n \"max_retries\": int(self.max_retries) if self.max_retries else None,\n \"show_progress_bar\": self.show_progress_bar if hasattr(self, \"show_progress_bar\") else None,\n \"model_kwargs\": self.model_kwargs if self.model_kwargs else None,\n }\n\n # Watson-specific parameters\n if provider in {\"IBM WatsonX\", \"IBM watsonx.ai\"}:\n # Map base_url_ibm_watsonx to \"url\" parameter for watsonx\n if \"url\" in param_mapping:\n url_value = (\n self.base_url_ibm_watsonx\n if hasattr(self, \"base_url_ibm_watsonx\") and self.base_url_ibm_watsonx\n else \"https://us-south.ml.cloud.ibm.com\"\n )\n kwargs[param_mapping[\"url\"]] = url_value\n # Map project_id for watsonx\n if hasattr(self, \"project_id\") and self.project_id and \"project_id\" in param_mapping:\n kwargs[param_mapping[\"project_id\"]] = self.project_id\n\n # Ollama-specific parameters\n if provider == \"Ollama\" and \"base_url\" in param_mapping:\n # Map api_base to \"base_url\" parameter for Ollama\n base_url_value = self.api_base if hasattr(self, \"api_base\") and self.api_base else \"http://localhost:11434\"\n kwargs[param_mapping[\"base_url\"]] = base_url_value\n\n # Add optional parameters if they have values and are mapped\n for param_name, param_value in optional_params.items():\n if param_value is not None and param_name in param_mapping:\n # Special handling for request_timeout with Google provider\n if param_name == \"request_timeout\":\n if provider == \"Google Generative AI\" and isinstance(param_value, (int, float)):\n kwargs[param_mapping[param_name]] = {\"timeout\": param_value}\n else:\n kwargs[param_mapping[param_name]] = param_value\n else:\n kwargs[param_mapping[param_name]] = param_value\n\n return kwargs\n" }, "dimensions": { "_input_type": "IntInput", @@ -4275,6 +4439,7 @@ "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", "list": false, "list_add_label": "Add More", + "load_from_db": false, "name": "dimensions", "override_skip": false, "placeholder": "", @@ -4287,28 +4452,6 @@ "type": "int", "value": "" }, - "fail_safe_mode": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Fail-Safe Mode", - "dynamic": false, - "info": "When enabled, errors will be logged instead of raising exceptions. The component will return None on error.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "fail_safe_mode", - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "bool", - "value": true - }, "input_text": { "_input_type": "BoolInput", "advanced": true, @@ -4321,7 +4464,7 @@ "override_skip": false, "placeholder": "", "required": false, - "show": true, + "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, @@ -4329,7 +4472,7 @@ "type": "bool", "value": true }, - "is_refresh": false, + "is_refresh": true, "max_retries": { "_input_type": "IntInput", "advanced": true, @@ -4351,37 +4494,228 @@ "value": 3 }, "model": { - "_input_type": "DropdownInput", + "_input_type": "ModelInput", "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Name", + "display_name": "Embedding Model", "dynamic": false, - "external_options": {}, - "info": "Select the embedding model to use", - "load_from_db": false, + "external_options": { + "fields": { + "data": { + "node": { + "display_name": "Connect other models", + "icon": "CornerDownLeft", + "name": "connect_other_models" + } + } + } + }, + "info": "Select your model provider", + "input_types": [ + "Embeddings" + ], + "list": false, + "list_add_label": "Add More", + "model_type": "embedding", "name": "model", "options": [ - "ibm/granite-embedding-278m-multilingual", - "ibm/slate-125m-english-rtrvr-v2", - "ibm/slate-30m-english-rtrvr-v2", - "intfloat/multilingual-e5-large", - "sentence-transformers/all-minilm-l6-v2" + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-small", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-large", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-ada-002", + "provider": "OpenAI" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "embedding_class": "WatsonxEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_key": "apikey", + "model_id": "model_id", + "project_id": "project_id", + "request_timeout": "request_timeout", + "space_id": "space_id", + "url": "url" + } + }, + "name": "ibm/granite-embedding-278m-multilingual", + "provider": "IBM WatsonX" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "embedding_class": "WatsonxEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_key": "apikey", + "model_id": "model_id", + "project_id": "project_id", + "request_timeout": "request_timeout", + "space_id": "space_id", + "url": "url" + } + }, + "name": "ibm/slate-125m-english-rtrvr-v2", + "provider": "IBM WatsonX" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "embedding_class": "WatsonxEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_key": "apikey", + "model_id": "model_id", + "project_id": "project_id", + "request_timeout": "request_timeout", + "space_id": "space_id", + "url": "url" + } + }, + "name": "ibm/slate-30m-english-rtrvr-v2", + "provider": "IBM WatsonX" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "embedding_class": "WatsonxEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_key": "apikey", + "model_id": "model_id", + "project_id": "project_id", + "request_timeout": "request_timeout", + "space_id": "space_id", + "url": "url" + } + }, + "name": "intfloat/multilingual-e5-large", + "provider": "IBM WatsonX" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "embedding_class": "WatsonxEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_key": "apikey", + "model_id": "model_id", + "project_id": "project_id", + "request_timeout": "request_timeout", + "space_id": "space_id", + "url": "url" + } + }, + "name": "sentence-transformers/all-minilm-l6-v2", + "provider": "IBM WatsonX" + }, + { + "category": "Google Generative AI", + "icon": "GoogleGenerativeAI", + "metadata": { + "is_disabled_provider": true, + "variable_name": null + }, + "name": "__enable_provider_Google Generative AI__", + "provider": "Google Generative AI" + } ], - "options_metadata": [], "override_skip": false, - "placeholder": "", + "placeholder": "Setup Provider", "real_time_refresh": true, "refresh_button": true, - "required": false, + "required": true, "show": true, "title_case": false, - "toggle": false, "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "ibm/granite-embedding-278m-multilingual" + "trace_as_input": true, + "track_in_telemetry": false, + "type": "model", + "value": [ + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-small", + "provider": "OpenAI" + } + ] }, "model_kwargs": { "_input_type": "DictInput", @@ -4403,32 +4737,6 @@ "type": "dict", "value": {} }, - "ollama_base_url": { - "_input_type": "MessageTextInput", - "advanced": false, - "display_name": "Ollama API URL", - "dynamic": false, - "info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": true, - "name": "ollama_base_url", - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "OLLAMA_BASE_URL" - }, "project_id": { "_input_type": "MessageTextInput", "advanced": false, @@ -4445,7 +4753,7 @@ "override_skip": false, "placeholder": "", "required": false, - "show": true, + "show": false, "title_case": false, "tool_mode": false, "trace_as_input": true, @@ -4454,49 +4762,6 @@ "type": "str", "value": "WATSONX_PROJECT_ID" }, - "provider": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Provider", - "dynamic": false, - "external_options": {}, - "info": "Select the embedding model provider", - "load_from_db": false, - "name": "provider", - "options": [ - "OpenAI", - "Ollama", - "IBM watsonx.ai" - ], - "options_metadata": [ - { - "icon": "OpenAI" - }, - { - "icon": "Ollama" - }, - { - "icon": "WatsonxAI" - } - ], - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "selected_metadata": { - "icon": "WatsonxAI" - }, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "IBM watsonx.ai" - }, "request_timeout": { "_input_type": "FloatInput", "advanced": true, @@ -4549,7 +4814,7 @@ "override_skip": false, "placeholder": "", "required": false, - "show": true, + "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, @@ -4566,18 +4831,20 @@ "dragging": false, "id": "EmbeddingModel-zLHKs", "measured": { - "height": 534, + "height": 207, "width": 320 }, "position": { - "x": 484.3184528995537, - "y": 914.6994738821654 + "x": 486.18806085134224, + "y": 1292.3602801434572 }, "selected": false, "type": "genericNode" }, { "data": { + "description": "Create a prompt template with dynamic variables.", + "display_name": "Prompt Template", "id": "Prompt Template-7kZsI", "node": { "base_classes": [ @@ -4595,33 +4862,28 @@ "display_name": "Prompt Template", "documentation": "https://docs.langflow.org/components-prompts", "edited": false, - "error": null, "field_order": [ "template", + "use_double_brackets", "tool_placeholder" ], "frozen": false, - "full_path": null, - "icon": "braces", - "is_composition": null, - "is_input": null, - "is_output": null, + "icon": "prompts", "legacy": false, "metadata": { - "code_hash": "7382d03ce412", + "code_hash": "5b3e6730923e", "dependencies": { "dependencies": [ { "name": "lfx", - "version": "0.2.0.dev21" + "version": null } ], "total_dependencies": 1 }, - "module": "lfx.components.models_and_agents.prompt.PromptComponent" + "module": "custom_components.prompt_template" }, "minimized": false, - "name": "", "output_types": [], "outputs": [ { @@ -4644,8 +4906,6 @@ } ], "pinned": false, - "priority": 0, - "replacement": null, "template": { "_type": "Component", "code": { @@ -4664,7 +4924,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from lfx.base.prompts.api_utils import process_prompt_template\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.inputs.inputs import DefaultPromptField\nfrom lfx.io import MessageTextInput, Output, PromptInput\nfrom lfx.schema.message import Message\nfrom lfx.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt Template\"\n description: str = \"Create a prompt template with dynamic variables.\"\n documentation: str = \"https://docs.langflow.org/components-prompts\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt Template\"\n priority = 0 # Set priority to 0 to make it appear first\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n" + "value": "from typing import Any\n\nfrom lfx.base.prompts.api_utils import process_prompt_template\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.inputs.input_mixin import FieldTypes\nfrom lfx.inputs.inputs import DefaultPromptField\nfrom lfx.io import BoolInput, MessageTextInput, Output, PromptInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.schema.message import Message\nfrom lfx.template.utils import update_template_values\nfrom lfx.utils.mustache_security import validate_mustache_template\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt Template\"\n description: str = \"Create a prompt template with dynamic variables.\"\n documentation: str = \"https://docs.langflow.org/components-prompts\"\n icon = \"prompts\"\n trace_type = \"prompt\"\n name = \"Prompt Template\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n BoolInput(\n name=\"use_double_brackets\",\n display_name=\"Use Double Brackets\",\n value=False,\n advanced=True,\n info=\"Use {{variable}} syntax instead of {variable}.\",\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:\n \"\"\"Update the template field type based on the selected mode.\"\"\"\n if field_name == \"use_double_brackets\":\n # Change the template field type based on mode\n is_mustache = field_value is True\n if is_mustache:\n build_config[\"template\"][\"type\"] = FieldTypes.MUSTACHE_PROMPT.value\n else:\n build_config[\"template\"][\"type\"] = FieldTypes.PROMPT.value\n\n # Re-process the template to update variables when mode changes\n template_value = build_config.get(\"template\", {}).get(\"value\", \"\")\n if template_value:\n # Ensure custom_fields is properly initialized\n if \"custom_fields\" not in build_config:\n build_config[\"custom_fields\"] = {}\n\n # Clean up fields from the OLD mode before processing with NEW mode\n # This ensures we don't keep fields with wrong syntax even if validation fails\n old_custom_fields = build_config[\"custom_fields\"].get(\"template\", [])\n for old_field in list(old_custom_fields):\n # Remove the field from custom_fields and template\n if old_field in old_custom_fields:\n old_custom_fields.remove(old_field)\n build_config.pop(old_field, None)\n\n # Try to process template with new mode to add new variables\n # If validation fails, at least we cleaned up old fields\n try:\n # Validate mustache templates for security\n if is_mustache:\n validate_mustache_template(template_value)\n\n # Re-process template with new mode to add new variables\n _ = process_prompt_template(\n template=template_value,\n name=\"template\",\n custom_fields=build_config[\"custom_fields\"],\n frontend_node_template=build_config,\n is_mustache=is_mustache,\n )\n except ValueError as e:\n # If validation fails, we still updated the mode and cleaned old fields\n # User will see error when they try to save\n logger.debug(f\"Template validation failed during mode switch: {e}\")\n return build_config\n\n async def build_prompt(self) -> Message:\n use_double_brackets = self.use_double_brackets if hasattr(self, \"use_double_brackets\") else False\n template_format = \"mustache\" if use_double_brackets else \"f-string\"\n prompt = await Message.from_template_and_variables(template_format=template_format, **self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n use_double_brackets = frontend_node[\"template\"].get(\"use_double_brackets\", {}).get(\"value\", False)\n is_mustache = use_double_brackets is True\n\n try:\n # Validate mustache templates for security\n if is_mustache:\n validate_mustache_template(prompt_template)\n\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n is_mustache=is_mustache,\n )\n except ValueError as e:\n # If validation fails, don't add variables but allow component to be created\n logger.debug(f\"Template validation failed in _update_template: {e}\")\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n use_double_brackets = frontend_node[\"template\"].get(\"use_double_brackets\", {}).get(\"value\", False)\n is_mustache = use_double_brackets is True\n\n try:\n # Validate mustache templates for security\n if is_mustache:\n validate_mustache_template(template)\n\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n is_mustache=is_mustache,\n )\n except ValueError as e:\n # If validation fails, don't add variables but allow component to be updated\n logger.debug(f\"Template validation failed in update_frontend_node: {e}\")\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n" }, "filter": { "advanced": false, @@ -4718,6 +4978,7 @@ "info": "", "list": false, "list_add_label": "Add More", + "load_from_db": false, "name": "template", "override_skip": false, "placeholder": "", @@ -4754,6 +5015,27 @@ "track_in_telemetry": false, "type": "str", "value": "" + }, + "use_double_brackets": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Use Double Brackets", + "dynamic": false, + "info": "Use {{variable}} syntax instead of {variable}.", + "list": false, + "list_add_label": "Add More", + "name": "use_double_brackets", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": false } }, "tool_mode": false @@ -4764,7 +5046,7 @@ "dragging": false, "id": "Prompt Template-7kZsI", "measured": { - "height": 435, + "height": 429, "width": 320 }, "position": { @@ -4773,23 +5055,176 @@ }, "selected": false, "type": "genericNode" + }, + { + "data": { + "id": "TextInput-i0a3P", + "node": { + "base_classes": [ + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Get user text inputs.", + "display_name": "Text Input", + "documentation": "https://docs.langflow.org/text-input-and-output", + "edited": false, + "field_order": [ + "input_value", + "use_global_variable" + ], + "frozen": false, + "icon": "type", + "last_updated": "2026-02-27T18:42:16.883Z", + "legacy": false, + "metadata": { + "code_hash": "518f16485886", + "dependencies": { + "dependencies": [ + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 1 + }, + "module": "lfx.components.input_output.text.TextInputComponent" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Output Text", + "group_outputs": false, + "loop_types": null, + "method": "text_response", + "name": "text", + "options": null, + "required_inputs": null, + "selected": "Message", + "tool_mode": true, + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" + }, + "_frontend_node_folder_id": { + "value": "2bef1fdd-4d60-4bb6-8fd2-c0a3eae09d1e" + }, + "_type": "Component", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Any\n\nfrom lfx.base.io.text import TextComponent\nfrom lfx.io import BoolInput, MultilineInput, Output\nfrom lfx.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get user text inputs.\"\n documentation: str = \"https://docs.langflow.org/text-input-and-output\"\n icon = \"type\"\n name = \"TextInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n BoolInput(\n name=\"use_global_variable\",\n display_name=\"Use Global Variable\",\n info=\"Enable to select from global variables (shows globe icon). Disables multiline editing.\",\n value=False,\n advanced=True,\n real_time_refresh=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\n ]\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name == \"use_global_variable\":\n if field_value:\n # Enable global variable mode: single-line with password masking and globe dropdown\n build_config[\"input_value\"][\"multiline\"] = False\n build_config[\"input_value\"][\"password\"] = True\n else:\n # Default mode: multiline text editing\n build_config[\"input_value\"][\"multiline\"] = True\n build_config[\"input_value\"][\"password\"] = False\n return build_config\n\n def text_response(self) -> Message:\n return Message(\n text=self.input_value,\n )\n" + }, + "input_value": { + "_input_type": "MultilineInput", + "advanced": false, + "ai_enabled": false, + "copy_field": false, + "display_name": "Text", + "dynamic": false, + "info": "Text to be passed as input.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "multiline": false, + "name": "input_value", + "override_skip": false, + "password": true, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "is_refresh": false, + "use_global_variable": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Use Global Variable", + "dynamic": false, + "info": "Enable to select from global variables (shows globe icon). Disables multiline editing.", + "list": false, + "list_add_label": "Add More", + "name": "use_global_variable", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + } + }, + "tool_mode": false + }, + "showNode": true, + "type": "TextInput" + }, + "dragging": false, + "id": "TextInput-i0a3P", + "measured": { + "height": 207, + "width": 320 + }, + "position": { + "x": 471.417983121568, + "y": 2170.275430066397 + }, + "selected": false, + "type": "genericNode" } ], "viewport": { - "x": 3.4342547318133256, - "y": -319.63689114007093, - "zoom": 0.4319912925345232 + "x": -37.54904796446385, + "y": -382.80845207416996, + "zoom": 0.4709758466489534 } }, "description": "OpenRAG OpenSearch Agent", "endpoint_name": null, "id": "1098eea1-6649-4e1d-aed1-b77249fb8dd0", "is_component": false, + "last_tested_version": "1.8.0", "locked": true, - "last_tested_version": "1.7.0.dev21", "name": "OpenRAG OpenSearch Agent Flow", "tags": [ "assistants", "agents" ] -} +} \ No newline at end of file diff --git a/flows/openrag_nudges.json b/flows/openrag_nudges.json index 408afee33..7fbc7bc4e 100644 --- a/flows/openrag_nudges.json +++ b/flows/openrag_nudges.json @@ -1,34 +1,6 @@ { "data": { "edges": [ - { - "animated": false, - "className": "not-running", - "data": { - "sourceHandle": { - "dataType": "ParserComponent", - "id": "ParserComponent-tZs7s", - "name": "parsed_text", - "output_types": [ - "Message" - ] - }, - "targetHandle": { - "fieldName": "docs", - "id": "Prompt Template-Wo6kR", - "inputTypes": [ - "Message" - ], - "type": "str" - } - }, - "id": "xy-edge__ParserComponent-tZs7s{œdataTypeœ:œParserComponentœ,œidœ:œParserComponent-tZs7sœ,œnameœ:œparsed_textœ,œoutput_typesœ:[œMessageœ]}-Prompt Template-Wo6kR{œfieldNameœ:œdocsœ,œidœ:œPrompt Template-Wo6kRœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", - "selected": false, - "source": "ParserComponent-tZs7s", - "sourceHandle": "{œdataTypeœ:œParserComponentœ,œidœ:œParserComponent-tZs7sœ,œnameœ:œparsed_textœ,œoutput_typesœ:[œMessageœ]}", - "target": "Prompt Template-Wo6kR", - "targetHandle": "{œfieldNameœ:œdocsœ,œidœ:œPrompt Template-Wo6kRœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" - }, { "animated": false, "className": "", @@ -202,18 +174,45 @@ { "animated": false, "className": "", + "data": { + "sourceHandle": { + "dataType": "TextInput", + "id": "TextInput-BG2U3", + "name": "text", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "filter_expression", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3", + "inputTypes": [ + "Message" + ], + "type": "str" + } + }, + "id": "xy-edge__TextInput-BG2U3{œdataTypeœ:œTextInputœ,œidœ:œTextInput-BG2U3œ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3{œfieldNameœ:œfilter_expressionœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "selected": false, + "source": "TextInput-BG2U3", + "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-BG2U3œ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3", + "targetHandle": "{œfieldNameœ:œfilter_expressionœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" + }, + { + "animated": false, "data": { "sourceHandle": { "dataType": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding", "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3", - "name": "dataframe", + "name": "search_results", "output_types": [ - "DataFrame" + "Data" ] }, "targetHandle": { "fieldName": "input_data", - "id": "ParserComponent-tZs7s", + "id": "ParserComponent-nLvsY", "inputTypes": [ "DataFrame", "Data" @@ -221,44 +220,103 @@ "type": "other" } }, - "id": "xy-edge__OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3{œdataTypeœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3œ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}-ParserComponent-tZs7s{œfieldNameœ:œinput_dataœ,œidœ:œParserComponent-tZs7sœ,œinputTypesœ:[œDataFrameœ,œDataœ],œtypeœ:œotherœ}", + "id": "xy-edge__OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3{œdataTypeœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3œ,œnameœ:œsearch_resultsœ,œoutput_typesœ:[œDataœ]}-ParserComponent-nLvsY{œfieldNameœ:œinput_dataœ,œidœ:œParserComponent-nLvsYœ,œinputTypesœ:[œDataFrameœ,œDataœ],œtypeœ:œotherœ}", "selected": false, "source": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3", - "sourceHandle": "{œdataTypeœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3œ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}", - "target": "ParserComponent-tZs7s", - "targetHandle": "{œfieldNameœ:œinput_dataœ,œidœ:œParserComponent-tZs7sœ,œinputTypesœ:[œDataFrameœ,œDataœ],œtypeœ:œotherœ}" + "sourceHandle": "{œdataTypeœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3œ,œnameœ:œsearch_resultsœ,œoutput_typesœ:[œDataœ]}", + "target": "ParserComponent-nLvsY", + "targetHandle": "{œfieldNameœ:œinput_dataœ,œidœ:œParserComponent-nLvsYœ,œinputTypesœ:[œDataFrameœ,œDataœ],œtypeœ:œotherœ}" }, { "animated": false, "data": { "sourceHandle": { - "dataType": "TextInput", - "id": "TextInput-4cEHx", - "name": "text", + "dataType": "ParserComponent", + "id": "ParserComponent-nLvsY", + "name": "parsed_text", "output_types": [ "Message" ] }, "targetHandle": { - "fieldName": "filter_expression", - "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3", + "fieldName": "input_data", + "id": "TypeConverterComponent-AM0BE", + "inputTypes": [ + "Message", + "Data", + "DataFrame" + ], + "type": "other" + } + }, + "id": "xy-edge__ParserComponent-nLvsY{œdataTypeœ:œParserComponentœ,œidœ:œParserComponent-nLvsYœ,œnameœ:œparsed_textœ,œoutput_typesœ:[œMessageœ]}-TypeConverterComponent-AM0BE{œfieldNameœ:œinput_dataœ,œidœ:œTypeConverterComponent-AM0BEœ,œinputTypesœ:[œMessageœ,œDataœ,œDataFrameœ],œtypeœ:œotherœ}", + "selected": false, + "source": "ParserComponent-nLvsY", + "sourceHandle": "{œdataTypeœ:œParserComponentœ,œidœ:œParserComponent-nLvsYœ,œnameœ:œparsed_textœ,œoutput_typesœ:[œMessageœ]}", + "target": "TypeConverterComponent-AM0BE", + "targetHandle": "{œfieldNameœ:œinput_dataœ,œidœ:œTypeConverterComponent-AM0BEœ,œinputTypesœ:[œMessageœ,œDataœ,œDataFrameœ],œtypeœ:œotherœ}" + }, + { + "animated": false, + "data": { + "sourceHandle": { + "dataType": "TypeConverterComponent", + "id": "TypeConverterComponent-AM0BE", + "name": "dataframe_output", + "output_types": [ + "DataFrame" + ] + }, + "targetHandle": { + "fieldName": "input_data", + "id": "ParserComponent-x6BS7", + "inputTypes": [ + "DataFrame", + "Data" + ], + "type": "other" + } + }, + "id": "xy-edge__TypeConverterComponent-AM0BE{œdataTypeœ:œTypeConverterComponentœ,œidœ:œTypeConverterComponent-AM0BEœ,œnameœ:œdataframe_outputœ,œoutput_typesœ:[œDataFrameœ]}-ParserComponent-x6BS7{œfieldNameœ:œinput_dataœ,œidœ:œParserComponent-x6BS7œ,œinputTypesœ:[œDataFrameœ,œDataœ],œtypeœ:œotherœ}", + "selected": false, + "source": "TypeConverterComponent-AM0BE", + "sourceHandle": "{œdataTypeœ:œTypeConverterComponentœ,œidœ:œTypeConverterComponent-AM0BEœ,œnameœ:œdataframe_outputœ,œoutput_typesœ:[œDataFrameœ]}", + "target": "ParserComponent-x6BS7", + "targetHandle": "{œfieldNameœ:œinput_dataœ,œidœ:œParserComponent-x6BS7œ,œinputTypesœ:[œDataFrameœ,œDataœ],œtypeœ:œotherœ}" + }, + { + "animated": false, + "data": { + "sourceHandle": { + "dataType": "ParserComponent", + "id": "ParserComponent-x6BS7", + "name": "parsed_text", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "docs", + "id": "Prompt Template-Wo6kR", "inputTypes": [ "Message" ], "type": "str" } }, - "id": "xy-edge__TextInput-4cEHx{œdataTypeœ:œTextInputœ,œidœ:œTextInput-4cEHxœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3{œfieldNameœ:œfilter_expressionœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "id": "xy-edge__ParserComponent-x6BS7{œdataTypeœ:œParserComponentœ,œidœ:œParserComponent-x6BS7œ,œnameœ:œparsed_textœ,œoutput_typesœ:[œMessageœ]}-Prompt Template-Wo6kR{œfieldNameœ:œdocsœ,œidœ:œPrompt Template-Wo6kRœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", "selected": false, - "source": "TextInput-4cEHx", - "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-4cEHxœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", - "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3", - "targetHandle": "{œfieldNameœ:œfilter_expressionœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" + "source": "ParserComponent-x6BS7", + "sourceHandle": "{œdataTypeœ:œParserComponentœ,œidœ:œParserComponent-x6BS7œ,œnameœ:œparsed_textœ,œoutput_typesœ:[œMessageœ]}", + "target": "Prompt Template-Wo6kR", + "targetHandle": "{œfieldNameœ:œdocsœ,œidœ:œPrompt Template-Wo6kRœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" } ], "nodes": [ { "data": { + "description": "Create a prompt template with dynamic variables.", + "display_name": "Prompt Template", "id": "Prompt Template-Wo6kR", "node": { "base_classes": [ @@ -278,14 +336,14 @@ "edited": false, "field_order": [ "template", + "use_double_brackets", "tool_placeholder" ], "frozen": false, - "icon": "braces", + "icon": "prompts", "legacy": false, - "lf_version": "1.7.0.dev21", "metadata": { - "code_hash": "7382d03ce412", + "code_hash": "5b3e6730923e", "dependencies": { "dependencies": [ { @@ -306,6 +364,7 @@ "display_name": "Prompt", "group_outputs": false, "hidden": null, + "loop_types": null, "method": "build_prompt", "name": "prompt", "options": null, @@ -319,7 +378,6 @@ } ], "pinned": false, - "priority": 0, "template": { "_type": "Component", "code": { @@ -338,7 +396,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from lfx.base.prompts.api_utils import process_prompt_template\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.inputs.inputs import DefaultPromptField\nfrom lfx.io import MessageTextInput, Output, PromptInput\nfrom lfx.schema.message import Message\nfrom lfx.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt Template\"\n description: str = \"Create a prompt template with dynamic variables.\"\n documentation: str = \"https://docs.langflow.org/components-prompts\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt Template\"\n priority = 0 # Set priority to 0 to make it appear first\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n" + "value": "from typing import Any\n\nfrom lfx.base.prompts.api_utils import process_prompt_template\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.inputs.input_mixin import FieldTypes\nfrom lfx.inputs.inputs import DefaultPromptField\nfrom lfx.io import BoolInput, MessageTextInput, Output, PromptInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.schema.message import Message\nfrom lfx.template.utils import update_template_values\nfrom lfx.utils.mustache_security import validate_mustache_template\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt Template\"\n description: str = \"Create a prompt template with dynamic variables.\"\n documentation: str = \"https://docs.langflow.org/components-prompts\"\n icon = \"prompts\"\n trace_type = \"prompt\"\n name = \"Prompt Template\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n BoolInput(\n name=\"use_double_brackets\",\n display_name=\"Use Double Brackets\",\n value=False,\n advanced=True,\n info=\"Use {{variable}} syntax instead of {variable}.\",\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:\n \"\"\"Update the template field type based on the selected mode.\"\"\"\n if field_name == \"use_double_brackets\":\n # Change the template field type based on mode\n is_mustache = field_value is True\n if is_mustache:\n build_config[\"template\"][\"type\"] = FieldTypes.MUSTACHE_PROMPT.value\n else:\n build_config[\"template\"][\"type\"] = FieldTypes.PROMPT.value\n\n # Re-process the template to update variables when mode changes\n template_value = build_config.get(\"template\", {}).get(\"value\", \"\")\n if template_value:\n # Ensure custom_fields is properly initialized\n if \"custom_fields\" not in build_config:\n build_config[\"custom_fields\"] = {}\n\n # Clean up fields from the OLD mode before processing with NEW mode\n # This ensures we don't keep fields with wrong syntax even if validation fails\n old_custom_fields = build_config[\"custom_fields\"].get(\"template\", [])\n for old_field in list(old_custom_fields):\n # Remove the field from custom_fields and template\n if old_field in old_custom_fields:\n old_custom_fields.remove(old_field)\n build_config.pop(old_field, None)\n\n # Try to process template with new mode to add new variables\n # If validation fails, at least we cleaned up old fields\n try:\n # Validate mustache templates for security\n if is_mustache:\n validate_mustache_template(template_value)\n\n # Re-process template with new mode to add new variables\n _ = process_prompt_template(\n template=template_value,\n name=\"template\",\n custom_fields=build_config[\"custom_fields\"],\n frontend_node_template=build_config,\n is_mustache=is_mustache,\n )\n except ValueError as e:\n # If validation fails, we still updated the mode and cleaned old fields\n # User will see error when they try to save\n logger.debug(f\"Template validation failed during mode switch: {e}\")\n return build_config\n\n async def build_prompt(self) -> Message:\n use_double_brackets = self.use_double_brackets if hasattr(self, \"use_double_brackets\") else False\n template_format = \"mustache\" if use_double_brackets else \"f-string\"\n prompt = await Message.from_template_and_variables(template_format=template_format, **self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n use_double_brackets = frontend_node[\"template\"].get(\"use_double_brackets\", {}).get(\"value\", False)\n is_mustache = use_double_brackets is True\n\n try:\n # Validate mustache templates for security\n if is_mustache:\n validate_mustache_template(prompt_template)\n\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n is_mustache=is_mustache,\n )\n except ValueError as e:\n # If validation fails, don't add variables but allow component to be created\n logger.debug(f\"Template validation failed in _update_template: {e}\")\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n use_double_brackets = frontend_node[\"template\"].get(\"use_double_brackets\", {}).get(\"value\", False)\n is_mustache = use_double_brackets is True\n\n try:\n # Validate mustache templates for security\n if is_mustache:\n validate_mustache_template(template)\n\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n is_mustache=is_mustache,\n )\n except ValueError as e:\n # If validation fails, don't add variables but allow component to be updated\n logger.debug(f\"Template validation failed in update_frontend_node: {e}\")\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n" }, "docs": { "advanced": false, @@ -394,12 +452,14 @@ "list_add_label": "Add More", "load_from_db": false, "name": "template", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, "tool_mode": false, "trace_as_input": true, + "track_in_telemetry": false, "type": "prompt", "value": "You are generating prompt nudges to help a user explore a corpus.\n\nTask:\n1) Skim the documents to infer common themes, entities, or tasks.\n2) Propose exactly three concise, distinct prompt nudges that encourage useful next queries.\n3) If the chat history is provided, use it to generate new questions that the user might have, based on the llm's response to his previous query. DO NOT repeat user questions.\n4) Make the nudges concise, close to 40 characters.\n5) The nudges are questions or commands that the user can make to the chatbot, which will respond looking at the corpus.\n4) Return strings only, separating the nudges by a newline. Don't include quotation marks.\n5) If any error occured, return blank. This will be used in production, so don't ask for more info or confirm a info like you're talking to me still. If, for some reason, you can't provide the nudges, your job failed and you just return blank.\nRules: Be brief. No duplicates. No explanations outside the strings of the nudges. English only.\n\nExamples:\n Show me this quarter's top 10 deals\n Summarize recent client interactions\n Search OpenSearch for mentions of our competitors\n\nChat history:\n{prompt}\n\nDocuments:\n{docs}\n\n" }, @@ -416,6 +476,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "tool_placeholder", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -423,8 +484,30 @@ "tool_mode": true, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" + }, + "use_double_brackets": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Use Double Brackets", + "dynamic": false, + "info": "Use {{variable}} syntax instead of {variable}.", + "list": false, + "list_add_label": "Add More", + "name": "use_double_brackets", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": false } }, "tool_mode": false @@ -435,7 +518,7 @@ "dragging": false, "id": "Prompt Template-Wo6kR", "measured": { - "height": 449, + "height": 429, "width": 320 }, "position": { @@ -447,9 +530,9 @@ }, { "data": { - "description": "Extracts text using a template.", - "display_name": "Parser", - "id": "ParserComponent-tZs7s", + "description": "Get chat inputs from the Playground.", + "display_name": "Chat Input", + "id": "ChatInput-7W1BE", "node": { "base_classes": [ "Message" @@ -457,22 +540,25 @@ "beta": false, "conditional_paths": [], "custom_fields": {}, - "description": "Extracts text using a template.", - "display_name": "Parser", - "documentation": "https://docs.langflow.org/parser", + "description": "Get chat inputs from the Playground.", + "display_name": "Chat Input", + "documentation": "https://docs.langflow.org/chat-input-and-output", "edited": false, "field_order": [ - "input_data", - "mode", - "pattern", - "sep" + "input_value", + "should_store_message", + "sender", + "sender_name", + "session_id", + "context_id", + "files" ], "frozen": false, - "icon": "braces", + "icon": "MessagesSquare", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": { - "code_hash": "3cda25c3f7b5", + "code_hash": "7a26c54d89ed", "dependencies": { "dependencies": [ { @@ -482,19 +568,19 @@ ], "total_dependencies": 1 }, - "module": "custom_components.parser" + "module": "custom_components.chat_input" }, - "minimized": false, + "minimized": true, "output_types": [], "outputs": [ { "allows_loop": false, "cache": true, - "display_name": "Parsed Text", + "display_name": "Chat Message", "group_outputs": false, "loop_types": null, - "method": "parse_combined_text", - "name": "parsed_text", + "method": "message_response", + "name": "message", "options": null, "required_inputs": null, "selected": "Message", @@ -524,73 +610,97 @@ "show": true, "title_case": false, "type": "code", - "value": "from lfx.custom.custom_component.component import Component\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, HandleInput, MessageTextInput, MultilineInput, TabInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.template.field.base import Output\n\n\nclass ParserComponent(Component):\n display_name = \"Parser\"\n description = \"Extracts text using a template.\"\n documentation: str = \"https://docs.langflow.org/parser\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"input_data\",\n display_name=\"Data or DataFrame\",\n input_types=[\"DataFrame\", \"Data\"],\n info=\"Accepts either a DataFrame or a Data object.\",\n required=True,\n ),\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Parser\", \"Stringify\"],\n value=\"Parser\",\n info=\"Convert into raw string instead of using a template.\",\n real_time_refresh=True,\n ),\n MultilineInput(\n name=\"pattern\",\n display_name=\"Template\",\n info=(\n \"Use variables within curly brackets to extract column values for DataFrames \"\n \"or key values for Data.\"\n \"For example: `Name: {Name}, Age: {Age}, Country: {Country}`\"\n ),\n value=\"Text: {text}\", # Example default\n dynamic=True,\n show=True,\n required=True,\n ),\n MessageTextInput(\n name=\"sep\",\n display_name=\"Separator\",\n advanced=True,\n value=\"\\n\",\n info=\"String used to separate rows/items.\",\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"Parsed Text\",\n name=\"parsed_text\",\n info=\"Formatted text output.\",\n method=\"parse_combined_text\",\n ),\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n \"\"\"Dynamically hide/show `template` and enforce requirement based on `stringify`.\"\"\"\n if field_name == \"mode\":\n build_config[\"pattern\"][\"show\"] = self.mode == \"Parser\"\n build_config[\"pattern\"][\"required\"] = self.mode == \"Parser\"\n if field_value:\n clean_data = BoolInput(\n name=\"clean_data\",\n display_name=\"Clean Data\",\n info=(\n \"Enable to clean the data by removing empty rows and lines \"\n \"in each cell of the DataFrame/ Data object.\"\n ),\n value=True,\n advanced=True,\n required=False,\n )\n build_config[\"clean_data\"] = clean_data.to_dict()\n else:\n build_config.pop(\"clean_data\", None)\n\n return build_config\n\n def _clean_args(self):\n \"\"\"Prepare arguments based on input type.\"\"\"\n input_data = self.input_data\n\n match input_data:\n case list() if all(isinstance(item, Data) for item in input_data):\n msg = \"List of Data objects is not supported.\"\n raise ValueError(msg)\n case DataFrame():\n return input_data, None\n case Data():\n return None, input_data\n case dict() if \"data\" in input_data:\n try:\n if \"columns\" in input_data: # Likely a DataFrame\n return DataFrame.from_dict(input_data), None\n # Likely a Data object\n return None, Data(**input_data)\n except (TypeError, ValueError, KeyError) as e:\n msg = f\"Invalid structured input provided: {e!s}\"\n raise ValueError(msg) from e\n case _:\n msg = f\"Unsupported input type: {type(input_data)}. Expected DataFrame or Data.\"\n raise ValueError(msg)\n\n def parse_combined_text(self) -> Message:\n \"\"\"Parse all rows/items into a single text or convert input to string if `stringify` is enabled.\"\"\"\n # Early return for stringify option\n if self.mode == \"Stringify\":\n return self.convert_to_string()\n\n df, data = self._clean_args()\n\n lines = []\n if df is not None:\n for _, row in df.iterrows():\n formatted_text = self.pattern.format(**row.to_dict())\n lines.append(formatted_text)\n elif data is not None:\n # Use format_map with a dict that returns default_value for missing keys\n class DefaultDict(dict):\n def __missing__(self, key):\n return data.default_value or \"\"\n\n formatted_text = self.pattern.format_map(DefaultDict(data.data))\n lines.append(formatted_text)\n\n combined_text = self.sep.join(lines)\n self.status = combined_text\n return Message(text=combined_text)\n\n def convert_to_string(self) -> Message:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n result = \"\"\n if isinstance(self.input_data, list):\n result = \"\\n\".join([safe_convert(item, clean_data=self.clean_data or False) for item in self.input_data])\n else:\n result = safe_convert(self.input_data or False)\n self.log(f\"Converted to string with length: {len(result)}\")\n\n message = Message(text=result)\n self.status = message\n return message\n" + "value": "from lfx.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/chat-input-and-output\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n # Ensure files is a list and filter out empty/None values\n files = self.files if self.files else []\n if files and not isinstance(files, list):\n files = [files]\n # Filter out None/empty values\n files = [f for f in files if f is not None and f != \"\"]\n\n session_id = self.session_id or self.graph.session_id or \"\"\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=session_id,\n context_id=self.context_id,\n files=files,\n )\n if session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" }, - "input_data": { - "_input_type": "HandleInput", - "advanced": false, - "display_name": "Data or DataFrame", + "context_id": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "Context ID", "dynamic": false, - "info": "Accepts either a DataFrame or a Data object.", + "info": "The context ID of the chat. Adds an extra layer to the local memory.", "input_types": [ - "DataFrame", - "Data" + "Message" ], "list": false, "list_add_label": "Add More", - "name": "input_data", + "load_from_db": false, + "name": "context_id", "override_skip": false, "placeholder": "", - "required": true, + "required": false, "show": true, "title_case": false, + "tool_mode": false, + "trace_as_input": true, "trace_as_metadata": true, "track_in_telemetry": false, - "type": "other", + "type": "str", "value": "" }, - "mode": { - "_input_type": "TabInput", - "advanced": false, - "display_name": "Mode", + "files": { + "_input_type": "FileInput", + "advanced": true, + "display_name": "Files", "dynamic": false, - "info": "Convert into raw string instead of using a template.", - "name": "mode", - "options": [ - "Parser", - "Stringify" + "fileTypes": [ + "csv", + "json", + "pdf", + "txt", + "md", + "mdx", + "yaml", + "yml", + "xml", + "html", + "htm", + "docx", + "py", + "sh", + "sql", + "js", + "ts", + "tsx", + "jpg", + "jpeg", + "png", + "bmp", + "image" ], + "file_path": "", + "info": "Files to be sent with the message.", + "list": true, + "list_add_label": "Add More", + "name": "files", "override_skip": false, "placeholder": "", - "real_time_refresh": true, "required": false, "show": true, + "temp_file": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "tab", - "value": "Parser" + "track_in_telemetry": false, + "type": "file", + "value": "" }, - "pattern": { + "input_value": { "_input_type": "MultilineInput", "advanced": false, "ai_enabled": false, "copy_field": false, - "display_name": "Template", - "dynamic": true, - "info": "Use variables within curly brackets to extract column values for DataFrames or key values for Data.For example: `Name: {Name}, Age: {Age}, Country: {Country}`", - "input_types": [ - "Message" - ], + "display_name": "Input Text", + "dynamic": false, + "info": "Message to be passed as input.", + "input_types": [], "list": false, "list_add_label": "Add More", "load_from_db": false, "multiline": true, - "name": "pattern", + "name": "input_value", "override_skip": false, "placeholder": "", - "required": true, + "required": false, "show": true, "title_case": false, "tool_mode": false, @@ -598,236 +708,10 @@ "trace_as_metadata": true, "track_in_telemetry": false, "type": "str", - "value": "{text}" + "value": "" }, - "sep": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Separator", - "dynamic": false, - "info": "String used to separate rows/items.", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "sep", - "override_skip": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "\n" - } - }, - "tool_mode": false - }, - "showNode": true, - "type": "ParserComponent" - }, - "dragging": false, - "id": "ParserComponent-tZs7s", - "measured": { - "height": 329, - "width": 320 - }, - "position": { - "x": 854.0613788430787, - "y": 1204.2200355777322 - }, - "selected": false, - "type": "genericNode" - }, - { - "data": { - "description": "Get chat inputs from the Playground.", - "display_name": "Chat Input", - "id": "ChatInput-7W1BE", - "node": { - "base_classes": [ - "Message" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Get chat inputs from the Playground.", - "display_name": "Chat Input", - "documentation": "https://docs.langflow.org/chat-input-and-output", - "edited": false, - "field_order": [ - "input_value", - "should_store_message", - "sender", - "sender_name", - "session_id", - "context_id", - "files" - ], - "frozen": false, - "icon": "MessagesSquare", - "legacy": false, - "lf_version": "1.7.0.dev21", - "metadata": { - "code_hash": "7a26c54d89ed", - "dependencies": { - "dependencies": [ - { - "name": "lfx", - "version": null - } - ], - "total_dependencies": 1 - }, - "module": "custom_components.chat_input" - }, - "minimized": true, - "output_types": [], - "outputs": [ - { - "allows_loop": false, - "cache": true, - "display_name": "Chat Message", - "group_outputs": false, - "loop_types": null, - "method": "message_response", - "name": "message", - "options": null, - "required_inputs": null, - "selected": "Message", - "tool_mode": true, - "types": [ - "Message" - ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "code", - "value": "from lfx.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/chat-input-and-output\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n # Ensure files is a list and filter out empty/None values\n files = self.files if self.files else []\n if files and not isinstance(files, list):\n files = [files]\n # Filter out None/empty values\n files = [f for f in files if f is not None and f != \"\"]\n\n session_id = self.session_id or self.graph.session_id or \"\"\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=session_id,\n context_id=self.context_id,\n files=files,\n )\n if session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" - }, - "context_id": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Context ID", - "dynamic": false, - "info": "The context ID of the chat. Adds an extra layer to the local memory.", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "context_id", - "override_skip": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "" - }, - "files": { - "_input_type": "FileInput", - "advanced": true, - "display_name": "Files", - "dynamic": false, - "fileTypes": [ - "csv", - "json", - "pdf", - "txt", - "md", - "mdx", - "yaml", - "yml", - "xml", - "html", - "htm", - "docx", - "py", - "sh", - "sql", - "js", - "ts", - "tsx", - "jpg", - "jpeg", - "png", - "bmp", - "image" - ], - "file_path": "", - "info": "Files to be sent with the message.", - "list": true, - "list_add_label": "Add More", - "name": "files", - "override_skip": false, - "placeholder": "", - "required": false, - "show": true, - "temp_file": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "file", - "value": "" - }, - "input_value": { - "_input_type": "MultilineInput", - "advanced": false, - "ai_enabled": false, - "copy_field": false, - "display_name": "Input Text", - "dynamic": false, - "info": "Message to be passed as input.", - "input_types": [], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "multiline": true, - "name": "input_value", - "override_skip": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "" - }, - "sender": { - "_input_type": "DropdownInput", + "sender": { + "_input_type": "DropdownInput", "advanced": true, "combobox": false, "dialog_inputs": {}, @@ -932,12 +816,12 @@ "dragging": false, "id": "ChatInput-7W1BE", "measured": { - "height": 48, + "height": 52, "width": 192 }, "position": { - "x": 980.6037146218582, - "y": 1642.0144323522718 + "x": 871.1682517864286, + "y": 1681.8091461106098 }, "selected": false, "type": "genericNode" @@ -972,7 +856,7 @@ "icon": "MessagesSquare", "legacy": false, "metadata": { - "code_hash": "cae45e2d53f6", + "code_hash": "8c87e536cca4", "dependencies": { "dependencies": [ { @@ -981,7 +865,7 @@ }, { "name": "fastapi", - "version": "0.120.0" + "version": "0.133.1" }, { "name": "lfx", @@ -1052,7 +936,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/chat-input-and-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n advanced=True,\n info=\"Whether to clean data before converting to string.\",\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, _, display_name, source_id = self.get_properties_from_source_component()\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message) and not self.is_connected_to_chat_input():\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id or self.graph.session_id or \"\"\n message.context_id = self.context_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n\n # Store message if needed\n if message.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n clean_data: bool = getattr(self, \"clean_data\", False)\n return \"\\n\".join([safe_convert(item, clean_data=clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" + "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/chat-input-and-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n advanced=True,\n info=\"Whether to clean data before converting to string.\",\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, _, display_name, source_id = self.get_properties_from_source_component()\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message) and not self.is_connected_to_chat_input():\n message = self.input_value\n # Update message properties\n message.text = text\n # Preserve existing session_id from the incoming message if it exists\n existing_session_id = message.session_id\n else:\n message = Message(text=text)\n existing_session_id = None\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n # Preserve session_id from incoming message, or use component/graph session_id\n message.session_id = (\n self.session_id or existing_session_id or (self.graph.session_id if hasattr(self, \"graph\") else None) or \"\"\n )\n message.context_id = self.context_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n\n # Store message if needed\n if message.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n clean_data: bool = getattr(self, \"clean_data\", False)\n return \"\\n\".join([safe_convert(item, clean_data=clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" }, "context_id": { "_input_type": "MessageTextInput", @@ -1234,7 +1118,7 @@ "dragging": false, "id": "ChatOutput-axewE", "measured": { - "height": 48, + "height": 52, "width": 192 }, "position": { @@ -1246,6 +1130,8 @@ }, { "data": { + "description": "Runs a language model given a specified provider.", + "display_name": "Language Model", "id": "LanguageModelComponent-KdXf9", "node": { "base_classes": [ @@ -1260,8 +1146,7 @@ "documentation": "https://docs.langflow.org/components-models", "edited": false, "field_order": [ - "provider", - "model_name", + "model", "api_key", "base_url_ibm_watsonx", "project_id", @@ -1269,47 +1154,23 @@ "input_value", "system_message", "stream", - "temperature" + "temperature", + "max_tokens" ], "frozen": false, "icon": "brain-circuit", - "last_updated": "2025-12-02T21:32:07.567Z", + "last_updated": "2026-02-27T18:44:56.137Z", "legacy": false, - "lf_version": "1.7.0.dev21", "metadata": { - "code_hash": "694ffc4b17b8", + "code_hash": "4af3c0cc0dcf", "dependencies": { "dependencies": [ - { - "name": "requests", - "version": "2.32.5" - }, - { - "name": "langchain_anthropic", - "version": "0.3.14" - }, - { - "name": "langchain_ibm", - "version": "0.3.19" - }, - { - "name": "langchain_ollama", - "version": "0.3.10" - }, - { - "name": "langchain_openai", - "version": "0.3.23" - }, - { - "name": "pydantic", - "version": "2.10.6" - }, { "name": "lfx", "version": null } ], - "total_dependencies": 7 + "total_dependencies": 1 }, "keywords": [ "model", @@ -1317,7 +1178,7 @@ "language model", "large language model" ], - "module": "lfx.components.models.language_model.LanguageModelComponent" + "module": "custom_components.language_model" }, "minimized": false, "output_types": [], @@ -1327,6 +1188,7 @@ "cache": true, "display_name": "Model Response", "group_outputs": false, + "hidden": null, "loop_types": null, "method": "text_response", "name": "text_output", @@ -1344,6 +1206,7 @@ "cache": true, "display_name": "Language Model", "group_outputs": false, + "hidden": null, "loop_types": null, "method": "build_model", "name": "model_output", @@ -1358,30 +1221,31 @@ } ], "pinned": false, - "priority": 0, "template": { "_frontend_node_flow_id": { "value": "ebc01d31-1976-46ce-a385-b0240327226c" }, "_frontend_node_folder_id": { - "value": "69a7745e-dfb8-40a7-b5cb-5da3af0b10b6" + "value": "bbfbd352-e931-4894-afe8-6552a3f0cc2c" }, "_type": "Component", "api_key": { "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "OpenAI API Key", + "advanced": true, + "display_name": "API Key", "dynamic": false, - "info": "Model Provider API key", + "info": "Falls back to OPENAI_API_KEY environment variable", "input_types": [], "load_from_db": true, "name": "api_key", + "override_skip": false, "password": true, "placeholder": "", "real_time_refresh": true, "required": false, "show": true, "title_case": false, + "track_in_telemetry": false, "type": "str", "value": "OPENAI_API_KEY" }, @@ -1404,6 +1268,7 @@ "https://ca-tor.ml.cloud.ibm.com" ], "options_metadata": [], + "override_skip": false, "placeholder": "", "real_time_refresh": true, "required": false, @@ -1412,6 +1277,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "https://us-south.ml.cloud.ibm.com" }, @@ -1431,7 +1297,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_ibm import ChatWatsonx\nfrom langchain_ollama import ChatOllama\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom lfx.base.models.anthropic_constants import ANTHROPIC_MODELS\nfrom lfx.base.models.google_generative_ai_constants import GOOGLE_GENERATIVE_AI_MODELS\nfrom lfx.base.models.google_generative_ai_model import ChatGoogleGenerativeAIFixed\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_CHAT_MODEL_NAMES, OPENAI_REASONING_MODEL_NAMES\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.inputs.inputs import BoolInput, MessageTextInput, StrInput\nfrom lfx.io import DropdownInput, MessageInput, MultilineInput, SecretStrInput, SliderInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# IBM watsonx.ai constants\nIBM_WATSONX_DEFAULT_MODELS = [\"ibm/granite-3-2b-instruct\", \"ibm/granite-3-8b-instruct\", \"ibm/granite-13b-instruct-v2\"]\nIBM_WATSONX_URLS = [\n \"https://us-south.ml.cloud.ibm.com\",\n \"https://eu-de.ml.cloud.ibm.com\",\n \"https://eu-gb.ml.cloud.ibm.com\",\n \"https://au-syd.ml.cloud.ibm.com\",\n \"https://jp-tok.ml.cloud.ibm.com\",\n \"https://ca-tor.ml.cloud.ibm.com\",\n]\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"completion\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass LanguageModelComponent(LCModelComponent):\n display_name = \"Language Model\"\n description = \"Runs a language model given a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-models\"\n icon = \"brain-circuit\"\n category = \"models\"\n priority = 0 # Set priority to 0 to make it appear first\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\"version\": \"2024-09-16\", \"filters\": \"function_text_chat,!lifecycle_withdrawn\"}\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching IBM watsonx models. Using default models.\")\n return IBM_WATSONX_DEFAULT_MODELS\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Anthropic\", \"Google\", \"IBM watsonx.ai\", \"Ollama\"],\n value=\"OpenAI\",\n info=\"Select the model provider\",\n real_time_refresh=True,\n options_metadata=[\n {\"icon\": \"OpenAI\"},\n {\"icon\": \"Anthropic\"},\n {\"icon\": \"GoogleGenerativeAI\"},\n {\"icon\": \"WatsonxAI\"},\n {\"icon\": \"Ollama\"},\n ],\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_CHAT_MODEL_NAMES[0],\n info=\"Select the model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=False,\n show=True,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"project_id\",\n display_name=\"watsonx Project ID\",\n info=\"The project ID associated with the foundation model (IBM watsonx.ai only)\",\n show=False,\n required=False,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n MessageInput(\n name=\"input_value\",\n display_name=\"Input\",\n info=\"The input text to send to the model\",\n ),\n MultilineInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"A system message that helps set the behavior of the assistant\",\n advanced=False,\n ),\n BoolInput(\n name=\"stream\",\n display_name=\"Stream\",\n info=\"Whether to stream the response\",\n value=False,\n advanced=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n info=\"Controls randomness in responses\",\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n ]\n\n def build_model(self) -> LanguageModel:\n provider = self.provider\n model_name = self.model_name\n temperature = self.temperature\n stream = self.stream\n\n if provider == \"OpenAI\":\n if not self.api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n if model_name in OPENAI_REASONING_MODEL_NAMES:\n # reasoning models do not support temperature (yet)\n temperature = None\n\n return ChatOpenAI(\n model_name=model_name,\n temperature=temperature,\n streaming=stream,\n openai_api_key=self.api_key,\n )\n if provider == \"Anthropic\":\n if not self.api_key:\n msg = \"Anthropic API key is required when using Anthropic provider\"\n raise ValueError(msg)\n return ChatAnthropic(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n anthropic_api_key=self.api_key,\n )\n if provider == \"Google\":\n if not self.api_key:\n msg = \"Google API key is required when using Google provider\"\n raise ValueError(msg)\n return ChatGoogleGenerativeAIFixed(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n google_api_key=self.api_key,\n )\n if provider == \"IBM watsonx.ai\":\n if not self.api_key:\n msg = \"IBM API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n if not self.base_url_ibm_watsonx:\n msg = \"IBM watsonx API Endpoint is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n if not self.project_id:\n msg = \"IBM watsonx Project ID is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n return ChatWatsonx(\n apikey=SecretStr(self.api_key).get_secret_value(),\n url=self.base_url_ibm_watsonx,\n project_id=self.project_id,\n model_id=model_name,\n params={\n \"temperature\": temperature,\n },\n streaming=stream,\n )\n if provider == \"Ollama\":\n if not self.ollama_base_url:\n msg = \"Ollama API URL is required when using Ollama provider\"\n raise ValueError(msg)\n if not model_name:\n msg = \"Model name is required when using Ollama provider\"\n raise ValueError(msg)\n\n transformed_base_url = transform_localhost_url(self.ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n return ChatOllama(\n base_url=transformed_base_url,\n model=model_name,\n temperature=temperature,\n )\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model_name\"][\"options\"] = OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES\n build_config[\"model_name\"][\"value\"] = OPENAI_CHAT_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n elif field_value == \"Anthropic\":\n build_config[\"model_name\"][\"options\"] = ANTHROPIC_MODELS\n build_config[\"model_name\"][\"value\"] = ANTHROPIC_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Anthropic API Key\"\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n elif field_value == \"Google\":\n build_config[\"model_name\"][\"options\"] = GOOGLE_GENERATIVE_AI_MODELS\n build_config[\"model_name\"][\"value\"] = GOOGLE_GENERATIVE_AI_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Google API Key\"\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model_name\"][\"options\"] = IBM_WATSONX_DEFAULT_MODELS\n build_config[\"model_name\"][\"value\"] = IBM_WATSONX_DEFAULT_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM API Key\"\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n elif field_value == \"Ollama\":\n # Fetch Ollama models from the API\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n # Try multiple sources to get the URL (in order of preference):\n # 1. Instance attribute (already resolved from global/db)\n # 2. Build config value (may be a global variable reference)\n # 3. Default value\n ollama_url = getattr(self, \"ollama_base_url\", None)\n if not ollama_url:\n config_value = build_config[\"ollama_base_url\"].get(\"value\", DEFAULT_OLLAMA_URL)\n # If config_value looks like a variable name (all caps with underscores), use default\n is_variable_ref = (\n config_value\n and isinstance(config_value, str)\n and config_value.isupper()\n and \"_\" in config_value\n )\n if is_variable_ref:\n await logger.adebug(\n f\"Config value appears to be a variable reference: {config_value}, using default\"\n )\n ollama_url = DEFAULT_OLLAMA_URL\n else:\n ollama_url = config_value\n\n await logger.adebug(f\"Fetching Ollama models for provider switch. URL: {ollama_url}\")\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model_name\"][\"options\"] = models\n build_config[\"model_name\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama models. Setting empty options.\")\n build_config[\"model_name\"][\"options\"] = []\n build_config[\"model_name\"][\"value\"] = \"\"\n else:\n await logger.awarning(f\"Invalid Ollama URL: {ollama_url}\")\n build_config[\"model_name\"][\"options\"] = []\n build_config[\"model_name\"][\"value\"] = \"\"\n elif (\n field_name == \"base_url_ibm_watsonx\"\n and field_value\n and hasattr(self, \"provider\")\n and self.provider == \"IBM watsonx.ai\"\n ):\n # Fetch IBM models when base_url changes\n try:\n models = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model_name\"][\"options\"] = models\n build_config[\"model_name\"][\"value\"] = models[0] if models else IBM_WATSONX_DEFAULT_MODELS[0]\n info_message = f\"Updated model options: {len(models)} models found in {field_value}\"\n logger.info(info_message)\n except Exception: # noqa: BLE001\n logger.exception(\"Error updating IBM model options.\")\n elif field_name == \"ollama_base_url\":\n # Fetch Ollama models when ollama_base_url changes\n # Use the field_value directly since this is triggered when the field changes\n logger.debug(\n f\"Fetching Ollama models from updated URL: {build_config['ollama_base_url']} \\\n and value {self.ollama_base_url}\",\n )\n await logger.adebug(f\"Fetching Ollama models from updated URL: {self.ollama_base_url}\")\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model_name\"][\"options\"] = models\n build_config[\"model_name\"][\"value\"] = models[0] if models else \"\"\n info_message = f\"Updated model options: {len(models)} models found in {self.ollama_base_url}\"\n await logger.ainfo(info_message)\n except ValueError:\n await logger.awarning(\"Error updating Ollama model options.\")\n build_config[\"model_name\"][\"options\"] = []\n build_config[\"model_name\"][\"value\"] = \"\"\n else:\n await logger.awarning(f\"Invalid Ollama URL: {self.ollama_base_url}\")\n build_config[\"model_name\"][\"options\"] = []\n build_config[\"model_name\"][\"value\"] = \"\"\n elif field_name == \"model_name\":\n # Refresh Ollama models when model_name field is accessed\n if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n ollama_url = getattr(self, \"ollama_base_url\", DEFAULT_OLLAMA_URL)\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model_name\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama models.\")\n build_config[\"model_name\"][\"options\"] = []\n else:\n build_config[\"model_name\"][\"options\"] = []\n\n # Hide system_message for o1 models - currently unsupported\n if field_value and field_value.startswith(\"o1\") and hasattr(self, \"provider\") and self.provider == \"OpenAI\":\n if \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = False\n elif \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = True\n return build_config\n" + "value": "from lfx.base.models.model import LCModelComponent\nfrom lfx.base.models.unified_models import (\n apply_provider_variable_config_to_build_config,\n get_language_model_options,\n get_llm,\n update_model_options_in_build_config,\n)\nfrom lfx.base.models.watsonx_constants import IBM_WATSONX_URLS\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, StrInput\nfrom lfx.io import IntInput, MessageInput, ModelInput, MultilineInput, SecretStrInput, SliderInput\n\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass LanguageModelComponent(LCModelComponent):\n display_name = \"Language Model\"\n description = \"Runs a language model given a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-models\"\n icon = \"brain-circuit\"\n category = \"models\"\n\n inputs = [\n ModelInput(\n name=\"model\",\n display_name=\"Language Model\",\n info=\"Select your model provider\",\n real_time_refresh=True,\n required=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"Model Provider API key\",\n required=False,\n show=True,\n real_time_refresh=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"project_id\",\n display_name=\"watsonx Project ID\",\n info=\"The project ID associated with the foundation model (IBM watsonx.ai only)\",\n show=False,\n required=False,\n ),\n StrInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n ),\n MessageInput(\n name=\"input_value\",\n display_name=\"Input\",\n info=\"The input text to send to the model\",\n ),\n MultilineInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"A system message that helps set the behavior of the assistant\",\n advanced=False,\n ),\n BoolInput(\n name=\"stream\",\n display_name=\"Stream\",\n info=\"Whether to stream the response\",\n value=False,\n advanced=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n info=\"Controls randomness in responses\",\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n info=\"Maximum number of tokens to generate. Field name varies by provider.\",\n advanced=True,\n range_spec=RangeSpec(min=1, max=128000, step=1, step_type=\"int\"),\n ),\n ]\n\n def build_model(self) -> LanguageModel:\n return get_llm(\n model=self.model,\n user_id=self.user_id,\n api_key=self.api_key,\n temperature=self.temperature,\n stream=self.stream,\n max_tokens=getattr(self, \"max_tokens\", None),\n watsonx_url=getattr(self, \"base_url_ibm_watsonx\", None),\n watsonx_project_id=getattr(self, \"project_id\", None),\n ollama_base_url=getattr(self, \"ollama_base_url\", None),\n )\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n \"\"\"Dynamically update build config with user-filtered model options.\"\"\"\n # Update model options\n build_config = update_model_options_in_build_config(\n component=self,\n build_config=build_config,\n cache_key_prefix=\"language_model_options\",\n get_options_func=get_language_model_options,\n field_name=field_name,\n field_value=field_value,\n )\n\n # Hide all provider-specific fields by default\n for field in [\"api_key\", \"base_url_ibm_watsonx\", \"project_id\", \"ollama_base_url\"]:\n if field in build_config:\n build_config[field][\"show\"] = False\n build_config[field][\"required\"] = False\n\n # Show/configure provider-specific fields based on selected model\n # Get current model value - from field_value if model is being changed, otherwise from build_config\n current_model_value = field_value if field_name == \"model\" else build_config.get(\"model\", {}).get(\"value\")\n if isinstance(current_model_value, list) and len(current_model_value) > 0:\n selected_model = current_model_value[0]\n provider = selected_model.get(\"provider\", \"\")\n\n if provider:\n # Apply provider variable configuration (required_for_component, advanced, env var fallback)\n build_config = apply_provider_variable_config_to_build_config(build_config, provider)\n\n return build_config\n" }, "input_value": { "_input_type": "MessageInput", @@ -1446,6 +1312,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "input_value", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1453,76 +1320,287 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, - "is_refresh": false, - "model_name": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Name", + "is_refresh": true, + "max_tokens": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Max Tokens", "dynamic": false, - "external_options": {}, - "info": "Select the model to use", - "name": "model_name", - "options": [ - "gpt-4o-mini", - "gpt-4o", - "gpt-4.1", - "gpt-4.1-mini", - "gpt-4.1-nano", - "gpt-4-turbo", - "gpt-4-turbo-preview", - "gpt-4", - "gpt-3.5-turbo", - "gpt-5.1", - "gpt-5", - "gpt-5-mini", - "gpt-5-nano", - "gpt-5-chat-latest", - "o1", - "o3-mini", - "o3", - "o3-pro", - "o4-mini", - "o4-mini-high" - ], - "options_metadata": [], + "info": "Maximum number of tokens to generate. Field name varies by provider.", + "list": false, + "list_add_label": "Add More", + "name": "max_tokens", + "override_skip": false, "placeholder": "", - "real_time_refresh": true, - "refresh_button": true, + "range_spec": { + "max": 128000, + "min": 1, + "step": 1, + "step_type": "int" + }, "required": false, "show": true, "title_case": false, - "toggle": false, "tool_mode": false, "trace_as_metadata": true, - "type": "str", - "value": "gpt-4o-mini" + "track_in_telemetry": true, + "type": "int", + "value": 0 + }, + "model": { + "_input_type": "ModelInput", + "advanced": false, + "display_name": "Language Model", + "dynamic": false, + "external_options": { + "fields": { + "data": { + "node": { + "display_name": "Connect other models", + "icon": "CornerDownLeft", + "name": "connect_other_models" + } + } + } + }, + "info": "Select your model provider", + "input_types": [ + "LanguageModel" + ], + "list": false, + "list_add_label": "Add More", + "model_type": "language", + "name": "model", + "options": [ + { + "category": "Anthropic", + "icon": "Anthropic", + "metadata": { + "api_key_param": "api_key", + "context_length": 128000, + "model_class": "ChatAnthropic", + "model_name_param": "model" + }, + "name": "claude-opus-4-5-20251101", + "provider": "Anthropic" + }, + { + "category": "Anthropic", + "icon": "Anthropic", + "metadata": { + "api_key_param": "api_key", + "context_length": 128000, + "model_class": "ChatAnthropic", + "model_name_param": "model" + }, + "name": "claude-haiku-4-5-20251001", + "provider": "Anthropic" + }, + { + "category": "Anthropic", + "icon": "Anthropic", + "metadata": { + "api_key_param": "api_key", + "context_length": 128000, + "model_class": "ChatAnthropic", + "model_name_param": "model" + }, + "name": "claude-sonnet-4-5-20250929", + "provider": "Anthropic" + }, + { + "category": "Anthropic", + "icon": "Anthropic", + "metadata": { + "api_key_param": "api_key", + "context_length": 128000, + "model_class": "ChatAnthropic", + "model_name_param": "model" + }, + "name": "claude-opus-4-1-20250805", + "provider": "Anthropic" + }, + { + "category": "Anthropic", + "icon": "Anthropic", + "metadata": { + "api_key_param": "api_key", + "context_length": 128000, + "model_class": "ChatAnthropic", + "model_name_param": "model" + }, + "name": "claude-opus-4-20250514", + "provider": "Anthropic" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "api_key_param": "api_key", + "context_length": 128000, + "model_class": "ChatOpenAI", + "model_name_param": "model", + "reasoning_models": [ + "gpt-5.1" + ] + }, + "name": "gpt-5.1", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "api_key_param": "api_key", + "context_length": 128000, + "model_class": "ChatOpenAI", + "model_name_param": "model", + "reasoning_models": [ + "gpt-5" + ] + }, + "name": "gpt-5", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "api_key_param": "api_key", + "context_length": 128000, + "model_class": "ChatOpenAI", + "model_name_param": "model", + "reasoning_models": [ + "gpt-5-mini" + ] + }, + "name": "gpt-5-mini", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "api_key_param": "api_key", + "context_length": 128000, + "model_class": "ChatOpenAI", + "model_name_param": "model", + "reasoning_models": [ + "gpt-5-nano" + ] + }, + "name": "gpt-5-nano", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "api_key_param": "api_key", + "context_length": 128000, + "model_class": "ChatOpenAI", + "model_name_param": "model", + "reasoning_models": [ + "gpt-5-chat-latest" + ] + }, + "name": "gpt-5-chat-latest", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "api_key_param": "api_key", + "context_length": 128000, + "model_class": "ChatOpenAI", + "model_name_param": "model" + }, + "name": "gpt-4o", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "api_key_param": "api_key", + "context_length": 128000, + "model_class": "ChatOpenAI", + "model_name_param": "model" + }, + "name": "gpt-4", + "provider": "OpenAI" + }, + { + "category": "Google Generative AI", + "icon": "GoogleGenerativeAI", + "metadata": { + "is_disabled_provider": true, + "variable_name": null + }, + "name": "__enable_provider_Google Generative AI__", + "provider": "Google Generative AI" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "is_disabled_provider": true, + "variable_name": null + }, + "name": "__enable_provider_IBM WatsonX__", + "provider": "IBM WatsonX" + } + ], + "override_skip": false, + "placeholder": "Setup Provider", + "real_time_refresh": true, + "refresh_button": true, + "required": true, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "track_in_telemetry": false, + "type": "model", + "value": [ + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "api_key_param": "api_key", + "context_length": 128000, + "model_class": "ChatOpenAI", + "model_name_param": "model" + }, + "name": "gpt-4", + "provider": "OpenAI" + } + ] }, "ollama_base_url": { - "_input_type": "MessageTextInput", + "_input_type": "StrInput", "advanced": false, "display_name": "Ollama API URL", "dynamic": false, "info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434", - "input_types": [ - "Message" - ], "list": false, "list_add_label": "Add More", "load_from_db": false, "name": "ollama_base_url", + "override_skip": false, "placeholder": "", "real_time_refresh": true, "required": false, "show": false, "title_case": false, "tool_mode": false, - "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -1536,63 +1614,17 @@ "list_add_label": "Add More", "load_from_db": false, "name": "project_id", + "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, - "provider": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Provider", - "dynamic": false, - "external_options": {}, - "info": "Select the model provider", - "name": "provider", - "options": [ - "OpenAI", - "Anthropic", - "Google", - "IBM watsonx.ai", - "Ollama" - ], - "options_metadata": [ - { - "icon": "OpenAI" - }, - { - "icon": "Anthropic" - }, - { - "icon": "GoogleGenerativeAI" - }, - { - "icon": "WatsonxAI" - }, - { - "icon": "Ollama" - } - ], - "placeholder": "", - "real_time_refresh": true, - "required": false, - "selected_metadata": { - "icon": "OpenAI" - }, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "OpenAI" - }, "stream": { "_input_type": "BoolInput", "advanced": true, @@ -1602,18 +1634,21 @@ "list": false, "list_add_label": "Add More", "name": "stream", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "bool", "value": false }, "system_message": { "_input_type": "MultilineInput", "advanced": false, + "ai_enabled": false, "copy_field": false, "display_name": "System Message", "dynamic": false, @@ -1626,6 +1661,8 @@ "load_from_db": false, "multiline": true, "name": "system_message", + "override_skip": false, + "password": false, "placeholder": "", "required": false, "show": true, @@ -1633,6 +1670,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -1647,6 +1685,7 @@ "min_label": "", "min_label_icon": "", "name": "temperature", + "override_skip": false, "placeholder": "", "range_spec": { "max": 1, @@ -1661,6 +1700,7 @@ "slider_input": false, "title_case": false, "tool_mode": false, + "track_in_telemetry": false, "type": "slider", "value": 0.1 } @@ -1674,7 +1714,7 @@ "dragging": false, "id": "LanguageModelComponent-KdXf9", "measured": { - "height": 534, + "height": 371, "width": 320 }, "position": { @@ -1684,120 +1724,6 @@ "selected": false, "type": "genericNode" }, - { - "data": { - "id": "TextInput-4cEHx", - "node": { - "base_classes": [ - "Message" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Get user text inputs.", - "display_name": "Text Input", - "documentation": "https://docs.langflow.org/components-io#text-input", - "edited": true, - "field_order": [ - "input_value" - ], - "frozen": false, - "icon": "type", - "legacy": false, - "lf_version": "1.7.0.dev21", - "metadata": { - "code_hash": "7b91454fe0f3", - "dependencies": { - "dependencies": [ - { - "name": "lfx", - "version": null - } - ], - "total_dependencies": 1 - }, - "module": "custom_components.text_input" - }, - "minimized": false, - "output_types": [], - "outputs": [ - { - "allows_loop": false, - "cache": true, - "display_name": "Output Text", - "group_outputs": false, - "hidden": null, - "method": "text_response", - "name": "text", - "options": null, - "required_inputs": null, - "selected": "Message", - "tool_mode": true, - "types": [ - "Message" - ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "code", - "value": "from lfx.base.io.text import TextComponent\nfrom lfx.io import StrInput, Output\nfrom lfx.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get user text inputs.\"\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\n icon = \"type\"\n name = \"TextInput\"\n\n inputs = [\n StrInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n ]\n outputs = [\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\n ]\n\n def text_response(self) -> Message:\n return Message(\n text=self.input_value,\n )\n" - }, - "input_value": { - "_input_type": "StrInput", - "advanced": false, - "display_name": "Text", - "dynamic": false, - "info": "Text to be passed as input.", - "list": false, - "list_add_label": "Add More", - "load_from_db": true, - "name": "input_value", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "OPENRAG-QUERY-FILTER" - } - }, - "tool_mode": false - }, - "showNode": true, - "type": "TextInput" - }, - "dragging": false, - "id": "TextInput-4cEHx", - "measured": { - "height": 204, - "width": 320 - }, - "position": { - "x": -262.4138400422388, - "y": 1630.3486582843238 - }, - "selected": false, - "type": "genericNode" - }, { "data": { "description": "Generate embeddings using a specified provider.", @@ -1813,14 +1739,12 @@ "description": "Generate embeddings using a specified provider.", "display_name": "Embedding Model", "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": true, + "edited": false, "field_order": [ - "provider", - "api_base", - "ollama_base_url", - "base_url_ibm_watsonx", "model", "api_key", + "api_base", + "base_url_ibm_watsonx", "project_id", "dimensions", "chunk_size", @@ -1829,48 +1753,27 @@ "show_progress_bar", "model_kwargs", "truncate_input_tokens", - "input_text", - "fail_safe_mode" + "input_text" ], "frozen": false, "icon": "binary", - "last_updated": "2025-12-02T21:24:52.479Z", + "last_updated": "2026-02-27T18:44:03.477Z", "legacy": false, - "lf_version": "1.7.0.dev21", + "lf_version": "1.8.0", "metadata": { - "code_hash": "0e2d6fe67a26", + "code_hash": "c5ce0982da48", "dependencies": { "dependencies": [ - { - "name": "requests", - "version": "2.32.5" - }, - { - "name": "ibm_watsonx_ai", - "version": "1.4.2" - }, - { - "name": "langchain_openai", - "version": "0.3.23" - }, { "name": "lfx", - "version": "0.2.0.dev21" - }, - { - "name": "langchain_ollama", - "version": "0.3.10" - }, - { - "name": "langchain_community", - "version": "0.3.21" + "version": null }, { - "name": "langchain_ibm", - "version": "0.3.19" + "name": "langchain_core", + "version": "0.3.83" } ], - "total_dependencies": 7 + "total_dependencies": 2 }, "module": "custom_components.embedding_model" }, @@ -1902,13 +1805,13 @@ "value": "ebc01d31-1976-46ce-a385-b0240327226c" }, "_frontend_node_folder_id": { - "value": "69a7745e-dfb8-40a7-b5cb-5da3af0b10b6" + "value": "bbfbd352-e931-4894-afe8-6552a3f0cc2c" }, "_type": "Component", "api_base": { "_input_type": "MessageTextInput", - "advanced": false, - "display_name": "OpenAI API Base URL", + "advanced": true, + "display_name": "API Base URL", "dynamic": false, "info": "Base URL for the API. Leave empty for default.", "input_types": [ @@ -1932,12 +1835,12 @@ }, "api_key": { "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "OpenAI API Key", + "advanced": true, + "display_name": "API Key", "dynamic": false, "info": "Model Provider API key", "input_types": [], - "load_from_db": true, + "load_from_db": false, "name": "api_key", "override_skip": false, "password": true, @@ -1948,7 +1851,7 @@ "title_case": false, "track_in_telemetry": false, "type": "str", - "value": "OPENAI_API_KEY" + "value": "" }, "base_url_ibm_watsonx": { "_input_type": "DropdownInput", @@ -2018,7 +1921,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"fail_safe_mode\",\n display_name=\"Fail-Safe Mode\",\n value=False,\n advanced=True,\n info=\"When enabled, errors will be logged instead of raising exceptions. \"\n \"The component will return None on error.\",\n real_time_refresh=True,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n async def fetch_ollama_models(self) -> list[str]:\n try:\n return await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n except Exception: # noqa: BLE001\n\n logger.exception(\"Error fetching models\")\n return []\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize OpenAI embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n try:\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await self.fetch_ollama_models()\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize Ollama embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to authenticate with IBM watsonx.ai: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n msg = f\"Unknown provider: {provider}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n # Handle fail_safe_mode changes first - set all required fields to False if enabled\n if field_name == \"fail_safe_mode\":\n if field_value: # If fail_safe_mode is enabled\n build_config[\"api_key\"][\"required\"] = False\n elif hasattr(self, \"provider\"):\n # If fail_safe_mode is disabled, restore required flags based on provider\n if self.provider in [\"OpenAI\", \"IBM watsonx.ai\"]:\n build_config[\"api_key\"][\"required\"] = True\n else: # Ollama\n build_config[\"api_key\"][\"required\"] = False\n\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.unified_models import (\n get_api_key_for_provider,\n get_embedding_class,\n get_embedding_model_options,\n get_unified_models_detailed,\n update_model_options_in_build_config,\n)\nfrom lfx.base.models.watsonx_constants import IBM_WATSONX_URLS\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n ModelInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n \"\"\"Dynamically update build config with user-filtered model options.\"\"\"\n # Update model options\n build_config = update_model_options_in_build_config(\n component=self,\n build_config=build_config,\n cache_key_prefix=\"embedding_model_options\",\n get_options_func=get_embedding_model_options,\n field_name=field_name,\n field_value=field_value,\n )\n\n # Show/hide provider-specific fields based on selected model\n if field_name == \"model\" and isinstance(field_value, list) and len(field_value) > 0:\n selected_model = field_value[0]\n provider = selected_model.get(\"provider\", \"\")\n\n # Show/hide watsonx fields\n is_watsonx = provider == \"IBM WatsonX\"\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = is_watsonx\n build_config[\"project_id\"][\"show\"] = is_watsonx\n build_config[\"truncate_input_tokens\"][\"show\"] = is_watsonx\n build_config[\"input_text\"][\"show\"] = is_watsonx\n if is_watsonx:\n build_config[\"base_url_ibm_watsonx\"][\"required\"] = True\n build_config[\"project_id\"][\"required\"] = True\n\n return build_config\n\n inputs = [\n ModelInput(\n name=\"model\",\n display_name=\"Embedding Model\",\n info=\"Select your model provider\",\n real_time_refresh=True,\n required=True,\n model_type=\"embedding\",\n input_types=[\"Embeddings\"], # Override default to accept Embeddings instead of LanguageModel\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"Model Provider API key\",\n real_time_refresh=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n # Watson-specific inputs\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n advanced=True,\n value=1000,\n ),\n FloatInput(\n name=\"request_timeout\",\n display_name=\"Request Timeout\",\n advanced=True,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n advanced=True,\n value=3,\n ),\n BoolInput(\n name=\"show_progress_bar\",\n display_name=\"Show Progress Bar\",\n advanced=True,\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n \"\"\"Build and return an embeddings instance based on the selected model.\n\n Returns an EmbeddingsWithModels wrapper that contains:\n - The primary embedding instance (for the selected model)\n - available_models dict mapping all available model names to their instances\n \"\"\"\n # If an Embeddings object is directly connected, return it\n try:\n from langchain_core.embeddings import Embeddings as BaseEmbeddings\n\n if isinstance(self.model, BaseEmbeddings):\n return self.model\n except ImportError:\n pass\n\n # Safely extract model configuration\n if not self.model or not isinstance(self.model, list):\n msg = \"Model must be a non-empty list\"\n raise ValueError(msg)\n\n model = self.model[0]\n model_name = model.get(\"name\")\n provider = model.get(\"provider\")\n metadata = model.get(\"metadata\", {})\n\n # Get API key from user input or global variables\n api_key = get_api_key_for_provider(self.user_id, provider, self.api_key)\n\n # Validate required fields (Ollama doesn't require API key)\n if not api_key and provider != \"Ollama\":\n msg = (\n f\"{provider} API key is required. \"\n f\"Please provide it in the component or configure it globally as \"\n f\"{provider.upper().replace(' ', '_')}_API_KEY.\"\n )\n raise ValueError(msg)\n\n if not model_name:\n msg = \"Model name is required\"\n raise ValueError(msg)\n\n # Get embedding class\n embedding_class_name = metadata.get(\"embedding_class\")\n if not embedding_class_name:\n msg = f\"No embedding class defined in metadata for {model_name}\"\n raise ValueError(msg)\n\n embedding_class = get_embedding_class(embedding_class_name)\n\n # Build kwargs using parameter mapping for primary instance\n kwargs = self._build_kwargs(model, metadata)\n primary_instance = embedding_class(**kwargs)\n\n # Get all available embedding models for this provider\n available_models_dict = self._build_available_models(\n provider=provider,\n embedding_class=embedding_class,\n metadata=metadata,\n api_key=api_key,\n )\n\n # Wrap with EmbeddingsWithModels to provide available_models metadata\n return EmbeddingsWithModels(\n embeddings=primary_instance,\n available_models=available_models_dict,\n )\n\n def _build_available_models(\n self,\n provider: str,\n embedding_class: type,\n metadata: dict[str, Any],\n api_key: str | None,\n ) -> dict[str, Embeddings]:\n \"\"\"Build a dictionary of all available embedding model instances for the provider.\n\n Args:\n provider: The provider name (e.g., \"OpenAI\", \"Ollama\")\n embedding_class: The embedding class to instantiate\n metadata: Metadata containing param_mapping\n api_key: The API key for the provider\n\n Returns:\n Dict mapping model names to their embedding instances\n \"\"\"\n available_models_dict: dict[str, Embeddings] = {}\n\n # Get all embedding models for this provider from unified models\n all_embedding_models = get_unified_models_detailed(\n providers=[provider],\n model_type=\"embeddings\",\n include_deprecated=False,\n include_unsupported=False,\n )\n\n if not all_embedding_models:\n return available_models_dict\n\n # Extract models from the provider data\n for provider_data in all_embedding_models:\n if provider_data.get(\"provider\") != provider:\n continue\n\n for model_data in provider_data.get(\"models\", []):\n model_name = model_data.get(\"model_name\")\n if not model_name:\n continue\n\n # Create a model dict compatible with _build_kwargs\n model_dict = {\n \"name\": model_name,\n \"provider\": provider,\n \"metadata\": metadata, # Reuse the same metadata/param_mapping\n }\n\n try:\n # Build kwargs for this model\n model_kwargs = self._build_kwargs_for_model(model_dict, metadata, api_key)\n # Create the embedding instance\n available_models_dict[model_name] = embedding_class(**model_kwargs)\n except Exception: # noqa: BLE001\n # Skip models that fail to instantiate\n # This handles cases where specific models have incompatible parameters\n logger.debug(\"Failed to instantiate embedding model %s: skipping\", model_name, exc_info=True)\n continue\n\n return available_models_dict\n\n def _build_kwargs_for_model(\n self,\n model: dict[str, Any],\n metadata: dict[str, Any],\n api_key: str | None,\n ) -> dict[str, Any]:\n \"\"\"Build kwargs dictionary for a specific model using parameter mapping.\n\n This is similar to _build_kwargs but uses the provided api_key directly\n instead of looking it up again.\n\n Args:\n model: Model dict with name and provider\n metadata: Metadata containing param_mapping\n api_key: The API key to use\n\n Returns:\n kwargs dict for embedding class instantiation\n \"\"\"\n param_mapping = metadata.get(\"param_mapping\", {})\n if not param_mapping:\n msg = \"Parameter mapping not found in metadata\"\n raise ValueError(msg)\n\n kwargs = {}\n provider = model.get(\"provider\")\n\n # Required parameters - handle both \"model\" and \"model_id\" (for watsonx)\n if \"model\" in param_mapping:\n kwargs[param_mapping[\"model\"]] = model.get(\"name\")\n elif \"model_id\" in param_mapping:\n kwargs[param_mapping[\"model_id\"]] = model.get(\"name\")\n\n # Add API key if mapped\n if \"api_key\" in param_mapping and api_key:\n kwargs[param_mapping[\"api_key\"]] = api_key\n\n # Optional parameters with their values\n optional_params = {\n \"api_base\": self.api_base if self.api_base else None,\n \"dimensions\": int(self.dimensions) if self.dimensions else None,\n \"chunk_size\": int(self.chunk_size) if self.chunk_size else None,\n \"request_timeout\": float(self.request_timeout) if self.request_timeout else None,\n \"max_retries\": int(self.max_retries) if self.max_retries else None,\n \"show_progress_bar\": self.show_progress_bar if hasattr(self, \"show_progress_bar\") else None,\n \"model_kwargs\": self.model_kwargs if self.model_kwargs else None,\n }\n\n # Watson-specific parameters\n if provider in {\"IBM WatsonX\", \"IBM watsonx.ai\"}:\n # Map base_url_ibm_watsonx to \"url\" parameter for watsonx\n if \"url\" in param_mapping:\n url_value = (\n self.base_url_ibm_watsonx\n if hasattr(self, \"base_url_ibm_watsonx\") and self.base_url_ibm_watsonx\n else \"https://us-south.ml.cloud.ibm.com\"\n )\n kwargs[param_mapping[\"url\"]] = url_value\n # Map project_id for watsonx\n if hasattr(self, \"project_id\") and self.project_id and \"project_id\" in param_mapping:\n kwargs[param_mapping[\"project_id\"]] = self.project_id\n\n # Ollama-specific parameters\n if provider == \"Ollama\" and \"base_url\" in param_mapping:\n # Map api_base to \"base_url\" parameter for Ollama\n base_url_value = self.api_base if hasattr(self, \"api_base\") and self.api_base else \"http://localhost:11434\"\n kwargs[param_mapping[\"base_url\"]] = base_url_value\n\n # Add optional parameters if they have values and are mapped\n for param_name, param_value in optional_params.items():\n if param_value is not None and param_name in param_mapping:\n # Special handling for request_timeout with Google provider\n if param_name == \"request_timeout\":\n if provider == \"Google Generative AI\" and isinstance(param_value, (int, float)):\n kwargs[param_mapping[param_name]] = {\"timeout\": param_value}\n else:\n kwargs[param_mapping[param_name]] = param_value\n else:\n kwargs[param_mapping[param_name]] = param_value\n\n return kwargs\n\n def _build_kwargs(self, model: dict[str, Any], metadata: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Build kwargs dictionary using parameter mapping.\"\"\"\n param_mapping = metadata.get(\"param_mapping\", {})\n if not param_mapping:\n msg = \"Parameter mapping not found in metadata\"\n raise ValueError(msg)\n\n kwargs = {}\n\n # Required parameters - handle both \"model\" and \"model_id\" (for watsonx)\n if \"model\" in param_mapping:\n kwargs[param_mapping[\"model\"]] = model.get(\"name\")\n elif \"model_id\" in param_mapping:\n kwargs[param_mapping[\"model_id\"]] = model.get(\"name\")\n if \"api_key\" in param_mapping:\n kwargs[param_mapping[\"api_key\"]] = get_api_key_for_provider(\n self.user_id,\n model.get(\"provider\"),\n self.api_key,\n )\n\n # Optional parameters with their values\n provider = model.get(\"provider\")\n optional_params = {\n \"api_base\": self.api_base if self.api_base else None,\n \"dimensions\": int(self.dimensions) if self.dimensions else None,\n \"chunk_size\": int(self.chunk_size) if self.chunk_size else None,\n \"request_timeout\": float(self.request_timeout) if self.request_timeout else None,\n \"max_retries\": int(self.max_retries) if self.max_retries else None,\n \"show_progress_bar\": self.show_progress_bar if hasattr(self, \"show_progress_bar\") else None,\n \"model_kwargs\": self.model_kwargs if self.model_kwargs else None,\n }\n\n # Watson-specific parameters\n if provider in {\"IBM WatsonX\", \"IBM watsonx.ai\"}:\n # Map base_url_ibm_watsonx to \"url\" parameter for watsonx\n if \"url\" in param_mapping:\n url_value = (\n self.base_url_ibm_watsonx\n if hasattr(self, \"base_url_ibm_watsonx\") and self.base_url_ibm_watsonx\n else \"https://us-south.ml.cloud.ibm.com\"\n )\n kwargs[param_mapping[\"url\"]] = url_value\n # Map project_id for watsonx\n if hasattr(self, \"project_id\") and self.project_id and \"project_id\" in param_mapping:\n kwargs[param_mapping[\"project_id\"]] = self.project_id\n\n # Ollama-specific parameters\n if provider == \"Ollama\" and \"base_url\" in param_mapping:\n # Map api_base to \"base_url\" parameter for Ollama\n base_url_value = self.api_base if hasattr(self, \"api_base\") and self.api_base else \"http://localhost:11434\"\n kwargs[param_mapping[\"base_url\"]] = base_url_value\n\n # Add optional parameters if they have values and are mapped\n for param_name, param_value in optional_params.items():\n if param_value is not None and param_name in param_mapping:\n # Special handling for request_timeout with Google provider\n if param_name == \"request_timeout\":\n if provider == \"Google Generative AI\" and isinstance(param_value, (int, float)):\n kwargs[param_mapping[param_name]] = {\"timeout\": param_value}\n else:\n kwargs[param_mapping[param_name]] = param_value\n else:\n kwargs[param_mapping[param_name]] = param_value\n\n return kwargs\n" }, "dimensions": { "_input_type": "IntInput", @@ -2028,6 +1931,7 @@ "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", "list": false, "list_add_label": "Add More", + "load_from_db": false, "name": "dimensions", "override_skip": false, "placeholder": "", @@ -2040,21 +1944,19 @@ "type": "int", "value": "" }, - "fail_safe_mode": { + "input_text": { "_input_type": "BoolInput", "advanced": true, - "display_name": "Fail-Safe Mode", + "display_name": "Include the original text in the output", "dynamic": false, - "info": "When enabled, errors will be logged instead of raising exceptions. The component will return None on error.", + "info": "", "list": false, "list_add_label": "Add More", - "load_from_db": false, - "name": "fail_safe_mode", + "name": "input_text", "override_skip": false, "placeholder": "", - "real_time_refresh": true, "required": false, - "show": true, + "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, @@ -2062,27 +1964,7 @@ "type": "bool", "value": true }, - "input_text": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Include the original text in the output", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "name": "input_text", - "override_skip": false, - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "bool", - "value": true - }, - "is_refresh": false, + "is_refresh": true, "max_retries": { "_input_type": "IntInput", "advanced": true, @@ -2104,32 +1986,148 @@ "value": 3 }, "model": { - "_input_type": "DropdownInput", + "_input_type": "ModelInput", "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Name", + "display_name": "Embedding Model", "dynamic": false, - "external_options": {}, - "info": "Select the embedding model to use", + "external_options": { + "fields": { + "data": { + "node": { + "display_name": "Connect other models", + "icon": "CornerDownLeft", + "name": "connect_other_models" + } + } + } + }, + "info": "Select your model provider", + "input_types": [ + "Embeddings" + ], + "list": false, + "list_add_label": "Add More", + "model_type": "embedding", "name": "model", "options": [ - "text-embedding-3-small" + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-small", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-large", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-ada-002", + "provider": "OpenAI" + }, + { + "category": "Google Generative AI", + "icon": "GoogleGenerativeAI", + "metadata": { + "is_disabled_provider": true, + "variable_name": null + }, + "name": "__enable_provider_Google Generative AI__", + "provider": "Google Generative AI" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "is_disabled_provider": true, + "variable_name": null + }, + "name": "__enable_provider_IBM WatsonX__", + "provider": "IBM WatsonX" + } ], - "options_metadata": [], "override_skip": false, - "placeholder": "", + "placeholder": "Setup Provider", "real_time_refresh": true, "refresh_button": true, - "required": false, + "required": true, "show": true, "title_case": false, - "toggle": false, "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "text-embedding-3-small" + "trace_as_input": true, + "track_in_telemetry": false, + "type": "model", + "value": [ + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-large", + "provider": "OpenAI" + } + ] }, "model_kwargs": { "_input_type": "DictInput", @@ -2151,32 +2149,6 @@ "type": "dict", "value": {} }, - "ollama_base_url": { - "_input_type": "MessageTextInput", - "advanced": false, - "display_name": "Ollama API URL", - "dynamic": false, - "info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "ollama_base_url", - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "" - }, "project_id": { "_input_type": "MessageTextInput", "advanced": false, @@ -2202,48 +2174,6 @@ "type": "str", "value": "" }, - "provider": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Provider", - "dynamic": false, - "external_options": {}, - "info": "Select the embedding model provider", - "name": "provider", - "options": [ - "OpenAI", - "Ollama", - "IBM watsonx.ai" - ], - "options_metadata": [ - { - "icon": "OpenAI" - }, - { - "icon": "Ollama" - }, - { - "icon": "WatsonxAI" - } - ], - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "selected_metadata": { - "icon": "OpenAI" - }, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "OpenAI" - }, "request_timeout": { "_input_type": "FloatInput", "advanced": true, @@ -2313,32 +2243,33 @@ "dragging": false, "id": "EmbeddingModel-ooLFP", "measured": { - "height": 451, + "height": 207, "width": 320 }, "position": { - "x": -1091.341314577015, - "y": 1237.2170349466728 + "x": -324.91242891544755, + "y": 1396.6496101594803 }, "selected": false, "type": "genericNode" }, { "data": { + "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search. To search use the tools search_documents and raw_search. Search documents takes a query for vector search, for example\n {search_query: \"components in openrag\"}", + "display_name": "OpenSearch (Multi-Model Multi-Embedding)", "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3", "node": { "base_classes": [ "Data", - "DataFrame", "VectorStore" ], "beta": false, "conditional_paths": [], "custom_fields": {}, - "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.", + "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search. To search use the tools search_documents and raw_search. Search documents takes a query for vector search, for example\n {search_query: \"components in openrag\"}", "display_name": "OpenSearch (Multi-Model Multi-Embedding)", "documentation": "", - "edited": true, + "edited": false, "field_order": [ "docs_metadata", "opensearch_url", @@ -2363,15 +2294,16 @@ "jwt_header", "bearer_prefix", "use_ssl", - "verify_certs" + "verify_certs", + "request_timeout", + "max_retries" ], "frozen": false, "icon": "OpenSearch", - "last_updated": "2025-12-02T21:30:53.860Z", + "last_updated": "2026-02-27T19:55:57.532Z", "legacy": false, - "lf_version": "1.7.0.dev21", "metadata": { - "code_hash": "db60433453a8", + "code_hash": "6a3df45b55c5", "dependencies": { "dependencies": [ { @@ -2380,10 +2312,14 @@ }, { "name": "lfx", - "version": "0.2.0.dev21" + "version": null + }, + { + "name": "tenacity", + "version": "8.5.0" } ], - "total_dependencies": 2 + "total_dependencies": 3 }, "module": "custom_components.opensearch_multimodel_multiembedding" }, @@ -2411,18 +2347,17 @@ { "allows_loop": false, "cache": true, - "display_name": "DataFrame", + "display_name": "Raw Search", "group_outputs": false, "hidden": null, "loop_types": null, - "method": "as_dataframe", - "name": "dataframe", + "method": "raw_search", + "name": "raw_search", "options": null, "required_inputs": null, - "selected": "DataFrame", "tool_mode": true, "types": [ - "DataFrame" + "Data" ], "value": "__UNDEFINED__" }, @@ -2437,7 +2372,6 @@ "name": "vectorstoreconnection", "options": null, "required_inputs": null, - "selected": "VectorStore", "tool_mode": true, "types": [ "VectorStore" @@ -2451,7 +2385,7 @@ "value": "ebc01d31-1976-46ce-a385-b0240327226c" }, "_frontend_node_folder_id": { - "value": "69a7745e-dfb8-40a7-b5cb-5da3af0b10b6" + "value": "bbfbd352-e931-4894-afe8-6552a3f0cc2c" }, "_type": "Component", "auth_mode": { @@ -2519,7 +2453,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\n\nfrom lfx.io import (\n BoolInput,\n DropdownInput,\n HandleInput,\n IntInput,\n MultilineInput,\n Output,\n SecretStrInput,\n StrInput,\n TableInput,\n)\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search. \"\n \"To search use the tools search_documents and raw_search. \"\n \"Search documents takes a query for vector search, for example\\n\"\n ' {search_query: \"components in openrag\"}'\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n # DictInput(name=\"query\", display_name=\"Query\", input_types=[\"Data\"], is_list=False, tool_mode=True),\n ]\n outputs = [\n Output(\n display_name=\"Search Results\",\n name=\"search_results\",\n method=\"search_documents\",\n ),\n Output(display_name=\"Raw Search\", name=\"raw_search\", method=\"raw_search\"),\n ]\n\n def raw_search(self, query: str | None = None) -> Data:\n \"\"\"Execute a raw OpenSearch query against the target index.\n\n Args:\n query (dict[str, Any]): The OpenSearch query DSL dictionary.\n\n Returns:\n Data: Search results as a Data object.\n\n Raises:\n ValueError: If 'query' is not a valid OpenSearch query (must be a non-empty dict).\n \"\"\"\n raw_query = query if query is not None else self.search_query\n if isinstance(raw_query, str):\n raw_query = json.loads(raw_query)\n client = self.build_client()\n logger.info(f\"query: {raw_query}\")\n resp = client.search(\n index=self.index_name,\n body=raw_query,\n params={\"terminate_after\": 0},\n )\n # Remove any _source keys whose value is a list of floats (embedding vectors)\n # Minimum length threshold to identify embedding vectors\n min_vector_length = 100\n\n def is_vector(val):\n # Accepts if it's a list of numbers (float or int) and has reasonable vector length\n return (\n isinstance(val, list) and len(val) > min_vector_length and all(isinstance(x, (float, int)) for x in val)\n )\n\n if \"hits\" in resp and \"hits\" in resp[\"hits\"]:\n for hit in resp[\"hits\"][\"hits\"]:\n source = hit.get(\"_source\")\n if isinstance(source, dict):\n keys_to_remove = [k for k, v in source.items() if is_vector(v)]\n for k in keys_to_remove:\n source.pop(k)\n logger.info(f\"Raw search response (all embedding vectors removed): {resp}\")\n return Data(**resp)\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Note: Some OpenSearch versions/configurations have issues with dynamically adding\n knn_vector mappings (NullPointerException). This method checks if the field\n already exists before attempting to add it, and gracefully skips if the field\n is already properly configured.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n # First, check if the field already exists and is properly mapped\n properties = self._get_index_properties(client)\n if self._is_knn_vector_field(properties, field_name):\n # Field already exists as knn_vector - verify dimensions match\n existing_dim = self._get_field_dimension(properties, field_name)\n if existing_dim is not None and existing_dim != dim:\n logger.warning(\n f\"Field '{field_name}' exists with dimension {existing_dim}, \"\n f\"but current embedding has dimension {dim}. Using existing mapping.\"\n )\n else:\n logger.info(\n f\"[OpenSearchMultimodal] Field '{field_name}' already exists\"\n f\"as knn_vector with matching dimensions - skipping mapping update\"\n )\n return\n\n # Field doesn't exist, try to add the mapping\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n # Check if this is the known OpenSearch k-NN NullPointerException issue\n error_str = str(e).lower()\n if \"null\" in error_str or \"nullpointerexception\" in error_str:\n logger.warning(\n f\"[OpenSearchMultimodal] Could not add embedding field mapping for {field_name}\"\n f\"due to OpenSearch k-NN plugin issue: {e}. \"\n f\"This is a known issue with some OpenSearch versions. \"\n f\"[OpenSearchMultimodal] Skipping mapping update. \"\n f\"Please ensure the index has the correct mapping for KNN search to work.\"\n )\n # Skip and continue - ingestion will proceed, but KNN search may fail if mapping doesn't exist\n return\n logger.warning(f\"[OpenSearchMultimodal] Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n # Verify the field was added correctly\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.error(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n logger.debug(f\"[OpenSearchMultimodal] Bulk ingesting embeddings for {index_name}\")\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n\n # Normalize ACL fields that may arrive as JSON strings from flows\n for key in (\"allowed_users\", \"allowed_groups\"):\n value = metadata.get(key)\n if isinstance(value, str):\n try:\n parsed = json.loads(value)\n if isinstance(parsed, list):\n metadata[key] = parsed\n except (json.JSONDecodeError, TypeError):\n # Leave value as-is if it isn't valid JSON\n pass\n\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n logger.debug(\"[OpenSearchMultimodal] Building OpenSearch client\")\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n\n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"[OpenSearchMultimodal] Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"[OpenSearchMultimodal] Starting ingestion mode...\")\n\n logger.debug(f\"[OpenSearchMultimodal] Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[OpenSearchMultimodal][INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n logger.debug(\n f\"[OpenSearchMultimodal][INGESTION] ingest_data type: \"\n f\"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\"\n )\n logger.debug(\n f\"[OpenSearchMultimodal][INGESTION] ingest_data content: \"\n f\"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\"\n )\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[OpenSearchMultimodal][INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"[OpenSearchMultimodal][INGESTION] Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings with rate-limit-aware retry logic using tenacity\n from tenacity import (\n retry,\n retry_if_exception,\n stop_after_attempt,\n wait_exponential,\n )\n\n def is_rate_limit_error(exception: Exception) -> bool:\n \"\"\"Check if exception is a rate limit error (429).\"\"\"\n error_str = str(exception).lower()\n return \"429\" in error_str or \"rate_limit\" in error_str or \"rate limit\" in error_str\n\n def is_other_retryable_error(exception: Exception) -> bool:\n \"\"\"Check if exception is retryable but not a rate limit error.\"\"\"\n # Retry on most exceptions except for specific non-retryable ones\n # Add other non-retryable exceptions here if needed\n return not is_rate_limit_error(exception)\n\n # Create retry decorator for rate limit errors (longer backoff)\n retry_on_rate_limit = retry(\n retry=retry_if_exception(is_rate_limit_error),\n stop=stop_after_attempt(5),\n wait=wait_exponential(multiplier=2, min=2, max=30),\n reraise=True,\n before_sleep=lambda retry_state: logger.warning(\n f\"Rate limit hit for chunk (attempt {retry_state.attempt_number}/5), \"\n f\"backing off for {retry_state.next_action.sleep:.1f}s\"\n ),\n )\n\n # Create retry decorator for other errors (shorter backoff)\n retry_on_other_errors = retry(\n retry=retry_if_exception(is_other_retryable_error),\n stop=stop_after_attempt(3),\n wait=wait_exponential(multiplier=1, min=1, max=8),\n reraise=True,\n before_sleep=lambda retry_state: logger.warning(\n f\"Error embedding chunk (attempt {retry_state.attempt_number}/3), \"\n f\"retrying in {retry_state.next_action.sleep:.1f}s: {retry_state.outcome.exception()}\"\n ),\n )\n\n def embed_chunk_with_retry(chunk_text: str, chunk_idx: int) -> list[float]:\n \"\"\"Embed a single chunk with rate-limit-aware retry logic.\"\"\"\n\n @retry_on_rate_limit\n @retry_on_other_errors\n def _embed(text: str) -> list[float]:\n return selected_embedding.embed_documents([text])[0]\n\n try:\n return _embed(chunk_text)\n except Exception as e:\n logger.error(\n f\"Failed to embed chunk {chunk_idx} after all retries: {e}\",\n error=str(e),\n )\n raise\n\n # Restrict concurrency for IBM/Watsonx models to avoid rate limits\n is_ibm = (embedding_model and \"ibm\" in str(embedding_model).lower()) or (\n selected_embedding and \"watsonx\" in type(selected_embedding).__name__.lower()\n )\n logger.debug(f\"Is IBM: {is_ibm}\")\n\n # For IBM models, use sequential processing with rate limiting\n # For other models, use parallel processing\n vectors: list[list[float]] = [None] * len(texts)\n\n if is_ibm:\n # Sequential processing with inter-request delay for IBM models\n inter_request_delay = 0.6 # ~1.67 req/s, safely under 2 req/s limit\n logger.info(f\"Using sequential processing for IBM model with {inter_request_delay}s delay between requests\")\n\n for idx, chunk in enumerate(texts):\n if idx > 0:\n # Add delay between requests (but not before the first one)\n time.sleep(inter_request_delay)\n vectors[idx] = embed_chunk_with_retry(chunk, idx)\n else:\n # Parallel processing for non-IBM models\n max_workers = min(max(len(texts), 1), 8)\n logger.debug(f\"Using parallel processing with {max_workers} workers\")\n\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk_with_retry, chunk, idx): idx for idx, chunk in enumerate(texts)}\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(\n f\"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\"\n )\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n logger.debug(f\"Model detection query: {agg_query}\")\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n # Log detailed bucket info for debugging\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n if not models:\n total_hits = result.get(\"hits\", {}).get(\"total\", {})\n total_count = total_hits.get(\"value\", 0) if isinstance(total_hits, dict) else total_hits\n logger.warning(\n f\"No embedding_model values found in index '{self.index_name}'. \"\n f\"Total docs in index: {total_count}. \"\n f\"This may indicate documents were indexed without the embedding_model field.\"\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n fallback_model = self._get_embedding_model_name()\n logger.info(f\"Using fallback model: {fallback_model}\")\n return [fallback_model]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n if not embeddings_list:\n logger.error(\n \"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\"\n )\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models_attr}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models_attr and isinstance(available_models_attr, dict):\n logger.info(\n f\"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict\"\n )\n for model_name_key, dedicated_embedding in available_models_attr.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n self.log(f\"[SEARCH] Models detected in index: {available_models}\")\n self.log(f\"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n # Track matching status for debugging\n matched_models = []\n unmatched_models = []\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n matched_models.append(model_name)\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n self.log(f\"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding\")\n else:\n # No matching embedding found for this model\n unmatched_models.append(model_name)\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}\")\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n self.log(f\"[ERROR] Embedding generation failed for '{model_name}': {e}\")\n\n # Log summary of model matching\n logger.info(f\"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n self.log(f\"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n if unmatched_models:\n self.log(f\"[WARN] Unmatched models in index: {unmatched_models}\")\n\n if not query_embeddings:\n msg = (\n f\"Failed to generate embeddings for any model. \"\n f\"Index has models: {available_models}, but no matching embedding objects found. \"\n f\"Available embedding identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[FAIL] Search failed: {msg}\")\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n self.log(f\"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'\")\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n self.log(f\"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping\")\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n self.log(\n f\"[WARN] No valid KNN queries could be built. \"\n f\"Query embeddings generated: {list(query_embeddings.keys())}, \"\n f\"but no matching knn_vector fields found in index.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename.keyword\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: \"\n f\"{list(query_embeddings.keys())}\"\n )\n self.log(f\"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}\")\n self.log(f\"[EXEC] Embedding models used: {list(query_embeddings.keys())}\")\n self.log(f\"[EXEC] KNN fields being queried: {embedding_fields}\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n self.log(f\"[RESULT] Search complete: {len(hits)} results found\")\n\n if len(hits) == 0:\n self.log(\n f\"[EMPTY] Debug info: \"\n f\"models_in_index={available_models}, \"\n f\"matched_models={matched_models}, \"\n f\"knn_fields={embedding_fields}, \"\n f\"filters={len(filter_clauses)} clauses\"\n )\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n logger.info(f\"Search query: {self.search_query}\")\n if self._cached_vector_store is None:\n self.build_vector_store()\n\n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n\n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config" + "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import (\n BoolInput,\n DropdownInput,\n HandleInput,\n IntInput,\n MultilineInput,\n Output,\n SecretStrInput,\n StrInput,\n TableInput,\n)\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\nREQUEST_TIMEOUT = 60\nMAX_RETRIES = 5\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search. \"\n \"To search use the tools search_documents and raw_search. \"\n \"Search documents takes a query for vector search, for example\\n\"\n ' {search_query: \"components in openrag\"}'\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n \"request_timeout\",\n \"max_retries\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"nmslib\", \"faiss\", \"lucene\", \"jvector\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'nmslib' works with standard \"\n \"OpenSearch. 'jvector' requires OpenSearch 2.9+. 'lucene' requires index.knn: true. \"\n \"Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n # ----- Timeout / Retry -----\n StrInput(\n name=\"request_timeout\",\n display_name=\"Request Timeout (seconds)\",\n value=\"60\",\n advanced=True,\n info=(\n \"Time in seconds to wait for a response from OpenSearch. \"\n \"Increase for large bulk ingestion or complex hybrid queries.\"\n ),\n ),\n StrInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n value=\"3\",\n advanced=True,\n info=\"Number of retries for failed connections before raising an error.\",\n ),\n ]\n outputs = [\n Output(\n display_name=\"Search Results\",\n name=\"search_results\",\n method=\"search_documents\",\n ),\n Output(display_name=\"Raw Search\", name=\"raw_search\", method=\"raw_search\"),\n ]\n\n def raw_search(self, query: str | dict | None = None) -> Data:\n \"\"\"Execute a raw OpenSearch query against the target index.\n\n Args:\n query (dict[str, Any]): The OpenSearch query DSL dictionary.\n\n Returns:\n Data: Search results as a Data object.\n\n Raises:\n ValueError: If 'query' is not a valid OpenSearch query (must be a non-empty dict).\n \"\"\"\n raw_query = query if query is not None else self.search_query\n\n if raw_query is None or (isinstance(raw_query, str) and not raw_query.strip()):\n self.log(\"No query provided for raw search - returning empty results\")\n return Data(data={})\n\n if isinstance(raw_query, dict):\n query_body = raw_query\n elif isinstance(raw_query, str):\n s = raw_query.strip()\n\n # First, optimistically try to parse as JSON DSL\n try:\n query_body = json.loads(s)\n except json.JSONDecodeError:\n # Fallback: treat as a basic text query over common fields\n query_body = {\n \"query\": {\n \"multi_match\": {\n \"query\": s,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n }\n }\n }\n else:\n msg = f\"Unsupported raw_search query type: {type(raw_query)!r}\"\n raise TypeError(msg)\n\n client = self.build_client()\n logger.info(f\"query: {query_body}\")\n resp = client.search(\n index=self.index_name,\n body=query_body,\n params={\"terminate_after\": 0},\n )\n # Remove any _source keys whose value is a list of floats (embedding vectors)\n # Minimum length threshold to identify embedding vectors\n min_vector_length = 100\n\n def is_vector(val):\n # Accepts if it's a list of numbers (float or int) and has reasonable vector length\n return (\n isinstance(val, list) and len(val) > min_vector_length and all(isinstance(x, (float, int)) for x in val)\n )\n\n if \"hits\" in resp and \"hits\" in resp[\"hits\"]:\n for hit in resp[\"hits\"][\"hits\"]:\n source = hit.get(\"_source\")\n if isinstance(source, dict):\n keys_to_remove = [k for k, v in source.items() if is_vector(v)]\n for k in keys_to_remove:\n source.pop(k)\n logger.info(f\"Raw search response (all embedding vectors removed): {resp}\")\n return Data(**resp)\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Note: Some OpenSearch versions/configurations have issues with dynamically adding\n knn_vector mappings (NullPointerException). This method checks if the field\n already exists before attempting to add it, and gracefully skips if the field\n is already properly configured.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n # First, check if the field already exists and is properly mapped\n properties = self._get_index_properties(client)\n if self._is_knn_vector_field(properties, field_name):\n # Field already exists as knn_vector - verify dimensions match\n existing_dim = self._get_field_dimension(properties, field_name)\n if existing_dim is not None and existing_dim != dim:\n logger.warning(\n f\"Field '{field_name}' exists with dimension {existing_dim}, \"\n f\"but current embedding has dimension {dim}. Using existing mapping.\"\n )\n else:\n logger.info(\n f\"[OpenSearchMultimodel] Field '{field_name}' already exists\"\n f\"as knn_vector with matching dimensions - skipping mapping update\"\n )\n return\n\n # Field doesn't exist, try to add the mapping\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except RequestError as e:\n error_str = str(e).lower()\n if \"invalid engine\" in error_str and \"jvector\" in error_str:\n msg = (\n \"The 'jvector' engine is not available in your OpenSearch installation. \"\n \"Use 'nmslib' or 'faiss' for standard OpenSearch, or upgrade to OpenSearch 2.9+.\"\n )\n raise ValueError(msg) from e\n if \"index.knn\" in error_str:\n msg = (\n \"The index has index.knn: false. Delete the existing index and let the \"\n \"component recreate it, or create a new index with a different name.\"\n )\n raise ValueError(msg) from e\n raise\n except Exception as e:\n # Check if this is the known OpenSearch k-NN NullPointerException issue\n error_str = str(e).lower()\n if \"null\" in error_str or \"nullpointerexception\" in error_str:\n logger.warning(\n f\"[OpenSearchMultimodel] Could not add embedding field mapping for {field_name}\"\n f\"due to OpenSearch k-NN plugin issue: {e}. \"\n f\"This is a known issue with some OpenSearch versions. \"\n f\"[OpenSearchMultimodel] Skipping mapping update. \"\n f\"Please ensure the index has the correct mapping for KNN search to work.\"\n )\n # Skip and continue - ingestion will proceed, but KNN search may fail if mapping doesn't exist\n return\n logger.warning(f\"[OpenSearchMultimodel] Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n # Verify the field was added correctly\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.error(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n logger.debug(f\"[OpenSearchMultimodel] Bulk ingesting embeddings for {index_name}\")\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n\n # Normalize ACL fields that may arrive as JSON strings from flows\n for key in (\"allowed_users\", \"allowed_groups\"):\n value = metadata.get(key)\n if isinstance(value, str):\n try:\n parsed = json.loads(value)\n if isinstance(parsed, list):\n metadata[key] = parsed\n except (json.JSONDecodeError, TypeError):\n # Leave value as-is if it isn't valid JSON\n pass\n\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- param helpers ----------\n def _parse_int_param(self, attr_name: str, default: int) -> int:\n \"\"\"Parse a string attribute to int, returning *default* on failure.\"\"\"\n raw = getattr(self, attr_name, None)\n if raw is None or str(raw).strip() == \"\":\n return default\n try:\n value = int(str(raw).strip())\n except ValueError:\n logger.warning(f\"Invalid integer value '{raw}' for {attr_name}, using default {default}\")\n return default\n\n if value < 0:\n logger.warning(f\"Negative value '{raw}' for {attr_name}, using default {default}\")\n return default\n\n return value\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n logger.debug(\"[OpenSearchMultimodel] Building OpenSearch client\")\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n timeout=self._parse_int_param(\"request_timeout\", REQUEST_TIMEOUT),\n max_retries=self._parse_int_param(\"max_retries\", MAX_RETRIES),\n retry_on_timeout=True,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n\n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"[OpenSearchMultimodel] Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"[OpenSearchMultimodel] Starting ingestion mode...\")\n\n logger.debug(f\"[OpenSearchMultimodel] Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[OpenSearchMultimodel][INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n logger.debug(\n f\"[OpenSearchMultimodel][INGESTION] ingest_data type: \"\n f\"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\"\n )\n logger.debug(\n f\"[OpenSearchMultimodel][INGESTION] ingest_data content: \"\n f\"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\"\n )\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[OpenSearchMultimodel][INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"[OpenSearchMultimodel][INGESTION] Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings with rate-limit-aware retry logic using tenacity\n from tenacity import (\n retry,\n retry_if_exception,\n stop_after_attempt,\n wait_exponential,\n )\n\n def is_rate_limit_error(exception: Exception) -> bool:\n \"\"\"Check if exception is a rate limit error (429).\"\"\"\n error_str = str(exception).lower()\n return \"429\" in error_str or \"rate_limit\" in error_str or \"rate limit\" in error_str\n\n def is_other_retryable_error(exception: Exception) -> bool:\n \"\"\"Check if exception is retryable but not a rate limit error.\"\"\"\n # Retry on most exceptions except for specific non-retryable ones\n # Add other non-retryable exceptions here if needed\n return not is_rate_limit_error(exception)\n\n # Create retry decorator for rate limit errors (longer backoff)\n retry_on_rate_limit = retry(\n retry=retry_if_exception(is_rate_limit_error),\n stop=stop_after_attempt(5),\n wait=wait_exponential(multiplier=2, min=2, max=30),\n reraise=True,\n before_sleep=lambda retry_state: logger.warning(\n f\"Rate limit hit for chunk (attempt {retry_state.attempt_number}/5), \"\n f\"backing off for {retry_state.next_action.sleep:.1f}s\"\n ),\n )\n\n # Create retry decorator for other errors (shorter backoff)\n retry_on_other_errors = retry(\n retry=retry_if_exception(is_other_retryable_error),\n stop=stop_after_attempt(3),\n wait=wait_exponential(multiplier=1, min=1, max=8),\n reraise=True,\n before_sleep=lambda retry_state: logger.warning(\n f\"Error embedding chunk (attempt {retry_state.attempt_number}/3), \"\n f\"retrying in {retry_state.next_action.sleep:.1f}s: {retry_state.outcome.exception()}\"\n ),\n )\n\n def embed_chunk_with_retry(chunk_text: str, chunk_idx: int) -> list[float]:\n \"\"\"Embed a single chunk with rate-limit-aware retry logic.\"\"\"\n\n @retry_on_rate_limit\n @retry_on_other_errors\n def _embed(text: str) -> list[float]:\n return selected_embedding.embed_documents([text])[0]\n\n try:\n return _embed(chunk_text)\n except Exception as e:\n logger.error(\n f\"Failed to embed chunk {chunk_idx} after all retries: {e}\",\n error=str(e),\n )\n raise\n\n # Restrict concurrency for IBM/Watsonx models to avoid rate limits\n is_ibm = (embedding_model and \"ibm\" in str(embedding_model).lower()) or (\n selected_embedding and \"watsonx\" in type(selected_embedding).__name__.lower()\n )\n logger.debug(f\"Is IBM: {is_ibm}\")\n\n # For IBM models, use sequential processing with rate limiting\n # For other models, use parallel processing\n vectors: list[list[float]] = [None] * len(texts)\n\n if is_ibm:\n # Sequential processing with inter-request delay for IBM models\n inter_request_delay = 0.6 # ~1.67 req/s, safely under 2 req/s limit\n logger.info(f\"Using sequential processing for IBM model with {inter_request_delay}s delay between requests\")\n\n for idx, chunk in enumerate(texts):\n if idx > 0:\n # Add delay between requests (but not before the first one)\n time.sleep(inter_request_delay)\n vectors[idx] = embed_chunk_with_retry(chunk, idx)\n else:\n # Parallel processing for non-IBM models\n max_workers = min(max(len(texts), 1), 8)\n logger.debug(f\"Using parallel processing with {max_workers} workers\")\n\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk_with_retry, chunk, idx): idx for idx, chunk in enumerate(texts)}\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping (index.knn: true is required for vector search)\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error == \"resource_already_exists_exception\":\n pass # Index was created concurrently\n else:\n error_msg = str(creation_error).lower()\n if \"invalid engine\" in error_msg or \"illegal_argument\" in error_msg:\n if \"jvector\" in error_msg:\n msg = (\n \"The 'jvector' engine is not available in your OpenSearch installation. \"\n \"Use 'nmslib' or 'faiss' for standard OpenSearch, or upgrade to 2.9+.\"\n )\n raise ValueError(msg) from creation_error\n if \"index.knn\" in error_msg:\n msg = (\n \"The index has index.knn: false. Delete the existing index and let the \"\n \"component recreate it, or create a new index with a different name.\"\n )\n raise ValueError(msg) from creation_error\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n raise\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(\n f\"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\"\n )\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n logger.debug(f\"Model detection query: {agg_query}\")\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n # Log detailed bucket info for debugging\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n if not models:\n total_hits = result.get(\"hits\", {}).get(\"total\", {})\n total_count = total_hits.get(\"value\", 0) if isinstance(total_hits, dict) else total_hits\n logger.warning(\n f\"No embedding_model values found in index '{self.index_name}'. \"\n f\"Total docs in index: {total_count}. \"\n f\"This may indicate documents were indexed without the embedding_model field.\"\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n fallback_model = self._get_embedding_model_name()\n logger.info(f\"Using fallback model: {fallback_model}\")\n return [fallback_model]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n def _get_filename_agg_field(self, index_properties: dict[str, Any] | None) -> str:\n \"\"\"Choose the appropriate field for filename aggregations.\"\"\"\n if not index_properties:\n return \"filename.keyword\"\n\n filename_def = index_properties.get(\"filename\")\n if not isinstance(filename_def, dict):\n return \"filename.keyword\"\n\n field_type = filename_def.get(\"type\")\n fields_def = filename_def.get(\"fields\", {})\n\n # Top-level keyword with no subfields\n if field_type == \"keyword\" and not isinstance(fields_def, dict):\n return \"filename\"\n\n # Text field with keyword subfield\n if isinstance(fields_def, dict) and \"keyword\" in fields_def:\n return \"filename.keyword\"\n\n # Fallback: aggregate on filename directly\n return \"filename\"\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n if not embeddings_list:\n logger.error(\n \"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\"\n )\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models_attr}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models_attr and isinstance(available_models_attr, dict):\n logger.info(\n f\"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict\"\n )\n for model_name_key, dedicated_embedding in available_models_attr.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n self.log(f\"[SEARCH] Models detected in index: {available_models}\")\n self.log(f\"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n # Track matching status for debugging\n matched_models = []\n unmatched_models = []\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n matched_models.append(model_name)\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n self.log(f\"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding\")\n else:\n # No matching embedding found for this model\n unmatched_models.append(model_name)\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}\")\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n self.log(f\"[ERROR] Embedding generation failed for '{model_name}': {e}\")\n\n # Log summary of model matching\n logger.info(f\"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n self.log(f\"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n if unmatched_models:\n self.log(f\"[WARN] Unmatched models in index: {unmatched_models}\")\n\n if not query_embeddings:\n msg = (\n f\"Failed to generate embeddings for any model. \"\n f\"Index has models: {available_models}, but no matching embedding objects found. \"\n f\"Available embedding identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[FAIL] Search failed: {msg}\")\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n self.log(f\"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'\")\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n self.log(f\"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping\")\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n self.log(\n f\"[WARN] No valid KNN queries could be built. \"\n f\"Query embeddings generated: {list(query_embeddings.keys())}, \"\n f\"but no matching knn_vector fields found in index.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Determine the best aggregation field for filename based on index mapping\n filename_agg_field = self._get_filename_agg_field(index_properties)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": filename_agg_field, \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: \"\n f\"{list(query_embeddings.keys())}\"\n )\n self.log(f\"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}\")\n self.log(f\"[EXEC] Embedding models used: {list(query_embeddings.keys())}\")\n self.log(f\"[EXEC] KNN fields being queried: {embedding_fields}\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n self.log(f\"[RESULT] Search complete: {len(hits)} results found\")\n\n if len(hits) == 0:\n self.log(\n f\"[EMPTY] Debug info: \"\n f\"models_in_index={available_models}, \"\n f\"matched_models={matched_models}, \"\n f\"knn_fields={embedding_fields}, \"\n f\"filters={len(filter_clauses)} clauses\"\n )\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n logger.info(f\"Search query: {self.search_query}\")\n if self._cached_vector_store is None:\n self.build_vector_store()\n\n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n\n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n if is_basic:\n build_config[\"jwt_token\"][\"value\"] = \"\"\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" }, "docs_metadata": { "_input_type": "TableInput", @@ -2613,7 +2547,7 @@ "info": "Name of the embedding model to use for ingestion. This selects which embedding from the list will be used to embed documents. Matches on deployment, model, model_id, or model_name. For duplicate deployments, use combined format: 'deployment:model' (e.g., 'text-embedding-ada-002:text-embedding-3-large'). Leave empty to use the first embedding. Error message will show all available identifiers.", "list": false, "list_add_label": "Add More", - "load_from_db": true, + "load_from_db": false, "name": "embedding_model_name", "override_skip": false, "placeholder": "", @@ -2624,7 +2558,7 @@ "trace_as_metadata": true, "track_in_telemetry": false, "type": "str", - "value": "SELECTED_EMBEDDING_MODEL" + "value": "" }, "engine": { "_input_type": "DropdownInput", @@ -2634,13 +2568,13 @@ "display_name": "Vector Engine", "dynamic": false, "external_options": {}, - "info": "Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.", + "info": "Vector search engine for similarity calculations. 'nmslib' works with standard OpenSearch. 'jvector' requires OpenSearch 2.9+. 'lucene' requires index.knn: true. Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.", "name": "engine", "options": [ - "jvector", "nmslib", "faiss", - "lucene" + "lucene", + "jvector" ], "options_metadata": [], "override_skip": false, @@ -2672,6 +2606,7 @@ "multiline": true, "name": "filter_expression", "override_skip": false, + "password": false, "placeholder": "", "required": false, "show": true, @@ -2788,6 +2723,27 @@ "type": "int", "value": 16 }, + "max_retries": { + "_input_type": "StrInput", + "advanced": true, + "display_name": "Max Retries", + "dynamic": false, + "info": "Number of retries for failed connections before raising an error.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "max_retries", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "3" + }, "num_candidates": { "_input_type": "IntInput", "advanced": true, @@ -2866,7 +2822,28 @@ "title_case": false, "track_in_telemetry": false, "type": "str", - "value": "epC8FOOeq3$3t*VB" + "value": "8$8@SqVR&ZvbGBrd" + }, + "request_timeout": { + "_input_type": "StrInput", + "advanced": true, + "display_name": "Request Timeout (seconds)", + "dynamic": false, + "info": "Time in seconds to wait for a response from OpenSearch. Increase for large bulk ingestion or complex hybrid queries.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "request_timeout", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "60" }, "search_query": { "_input_type": "QueryInput", @@ -3028,19 +3005,19 @@ }, "tool_mode": false }, - "selected_output": "dataframe", + "selected_output": "search_results", "showNode": true, "type": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding" }, "dragging": false, "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3", "measured": { - "height": 904, + "height": 967, "width": 320 }, "position": { - "x": 387.88180968996585, - "y": 879.9328678310967 + "x": 365.79131713313234, + "y": 767.0259058739476 }, "selected": false, "type": "genericNode" @@ -3060,14 +3037,12 @@ "description": "Generate embeddings using a specified provider.", "display_name": "Embedding Model", "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": true, + "edited": false, "field_order": [ - "provider", - "api_base", - "ollama_base_url", - "base_url_ibm_watsonx", "model", "api_key", + "api_base", + "base_url_ibm_watsonx", "project_id", "dimensions", "chunk_size", @@ -3076,48 +3051,27 @@ "show_progress_bar", "model_kwargs", "truncate_input_tokens", - "input_text", - "fail_safe_mode" + "input_text" ], "frozen": false, "icon": "binary", - "last_updated": "2025-12-02T21:24:52.480Z", + "last_updated": "2026-02-27T18:44:05.044Z", "legacy": false, - "lf_version": "1.7.0.dev21", + "lf_version": "1.8.0", "metadata": { - "code_hash": "0e2d6fe67a26", + "code_hash": "c5ce0982da48", "dependencies": { "dependencies": [ - { - "name": "requests", - "version": "2.32.5" - }, - { - "name": "ibm_watsonx_ai", - "version": "1.4.2" - }, - { - "name": "langchain_openai", - "version": "0.3.23" - }, { "name": "lfx", - "version": "0.2.0.dev21" - }, - { - "name": "langchain_ollama", - "version": "0.3.10" - }, - { - "name": "langchain_community", - "version": "0.3.21" + "version": null }, { - "name": "langchain_ibm", - "version": "0.3.19" + "name": "langchain_core", + "version": "0.3.83" } ], - "total_dependencies": 7 + "total_dependencies": 2 }, "module": "custom_components.embedding_model" }, @@ -3149,7 +3103,7 @@ "value": "ebc01d31-1976-46ce-a385-b0240327226c" }, "_frontend_node_folder_id": { - "value": "69a7745e-dfb8-40a7-b5cb-5da3af0b10b6" + "value": "bbfbd352-e931-4894-afe8-6552a3f0cc2c" }, "_type": "Component", "api_base": { @@ -3168,7 +3122,7 @@ "override_skip": false, "placeholder": "", "required": false, - "show": false, + "show": true, "title_case": false, "tool_mode": false, "trace_as_input": true, @@ -3179,23 +3133,23 @@ }, "api_key": { "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "API Key (Optional)", + "advanced": true, + "display_name": "API Key", "dynamic": false, "info": "Model Provider API key", "input_types": [], - "load_from_db": true, + "load_from_db": false, "name": "api_key", "override_skip": false, "password": true, "placeholder": "", "real_time_refresh": true, "required": false, - "show": false, + "show": true, "title_case": false, "track_in_telemetry": false, "type": "str", - "value": "OPENAI_API_KEY" + "value": "" }, "base_url_ibm_watsonx": { "_input_type": "DropdownInput", @@ -3265,7 +3219,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"fail_safe_mode\",\n display_name=\"Fail-Safe Mode\",\n value=False,\n advanced=True,\n info=\"When enabled, errors will be logged instead of raising exceptions. \"\n \"The component will return None on error.\",\n real_time_refresh=True,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n async def fetch_ollama_models(self) -> list[str]:\n try:\n return await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n except Exception: # noqa: BLE001\n\n logger.exception(\"Error fetching models\")\n return []\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize OpenAI embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n try:\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await self.fetch_ollama_models()\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize Ollama embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to authenticate with IBM watsonx.ai: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n msg = f\"Unknown provider: {provider}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n # Handle fail_safe_mode changes first - set all required fields to False if enabled\n if field_name == \"fail_safe_mode\":\n if field_value: # If fail_safe_mode is enabled\n build_config[\"api_key\"][\"required\"] = False\n elif hasattr(self, \"provider\"):\n # If fail_safe_mode is disabled, restore required flags based on provider\n if self.provider in [\"OpenAI\", \"IBM watsonx.ai\"]:\n build_config[\"api_key\"][\"required\"] = True\n else: # Ollama\n build_config[\"api_key\"][\"required\"] = False\n\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.unified_models import (\n get_api_key_for_provider,\n get_embedding_class,\n get_embedding_model_options,\n get_unified_models_detailed,\n update_model_options_in_build_config,\n)\nfrom lfx.base.models.watsonx_constants import IBM_WATSONX_URLS\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n ModelInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n \"\"\"Dynamically update build config with user-filtered model options.\"\"\"\n # Update model options\n build_config = update_model_options_in_build_config(\n component=self,\n build_config=build_config,\n cache_key_prefix=\"embedding_model_options\",\n get_options_func=get_embedding_model_options,\n field_name=field_name,\n field_value=field_value,\n )\n\n # Show/hide provider-specific fields based on selected model\n if field_name == \"model\" and isinstance(field_value, list) and len(field_value) > 0:\n selected_model = field_value[0]\n provider = selected_model.get(\"provider\", \"\")\n\n # Show/hide watsonx fields\n is_watsonx = provider == \"IBM WatsonX\"\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = is_watsonx\n build_config[\"project_id\"][\"show\"] = is_watsonx\n build_config[\"truncate_input_tokens\"][\"show\"] = is_watsonx\n build_config[\"input_text\"][\"show\"] = is_watsonx\n if is_watsonx:\n build_config[\"base_url_ibm_watsonx\"][\"required\"] = True\n build_config[\"project_id\"][\"required\"] = True\n\n return build_config\n\n inputs = [\n ModelInput(\n name=\"model\",\n display_name=\"Embedding Model\",\n info=\"Select your model provider\",\n real_time_refresh=True,\n required=True,\n model_type=\"embedding\",\n input_types=[\"Embeddings\"], # Override default to accept Embeddings instead of LanguageModel\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"Model Provider API key\",\n real_time_refresh=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n # Watson-specific inputs\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n advanced=True,\n value=1000,\n ),\n FloatInput(\n name=\"request_timeout\",\n display_name=\"Request Timeout\",\n advanced=True,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n advanced=True,\n value=3,\n ),\n BoolInput(\n name=\"show_progress_bar\",\n display_name=\"Show Progress Bar\",\n advanced=True,\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n \"\"\"Build and return an embeddings instance based on the selected model.\n\n Returns an EmbeddingsWithModels wrapper that contains:\n - The primary embedding instance (for the selected model)\n - available_models dict mapping all available model names to their instances\n \"\"\"\n # If an Embeddings object is directly connected, return it\n try:\n from langchain_core.embeddings import Embeddings as BaseEmbeddings\n\n if isinstance(self.model, BaseEmbeddings):\n return self.model\n except ImportError:\n pass\n\n # Safely extract model configuration\n if not self.model or not isinstance(self.model, list):\n msg = \"Model must be a non-empty list\"\n raise ValueError(msg)\n\n model = self.model[0]\n model_name = model.get(\"name\")\n provider = model.get(\"provider\")\n metadata = model.get(\"metadata\", {})\n\n # Get API key from user input or global variables\n api_key = get_api_key_for_provider(self.user_id, provider, self.api_key)\n\n # Validate required fields (Ollama doesn't require API key)\n if not api_key and provider != \"Ollama\":\n msg = (\n f\"{provider} API key is required. \"\n f\"Please provide it in the component or configure it globally as \"\n f\"{provider.upper().replace(' ', '_')}_API_KEY.\"\n )\n raise ValueError(msg)\n\n if not model_name:\n msg = \"Model name is required\"\n raise ValueError(msg)\n\n # Get embedding class\n embedding_class_name = metadata.get(\"embedding_class\")\n if not embedding_class_name:\n msg = f\"No embedding class defined in metadata for {model_name}\"\n raise ValueError(msg)\n\n embedding_class = get_embedding_class(embedding_class_name)\n\n # Build kwargs using parameter mapping for primary instance\n kwargs = self._build_kwargs(model, metadata)\n primary_instance = embedding_class(**kwargs)\n\n # Get all available embedding models for this provider\n available_models_dict = self._build_available_models(\n provider=provider,\n embedding_class=embedding_class,\n metadata=metadata,\n api_key=api_key,\n )\n\n # Wrap with EmbeddingsWithModels to provide available_models metadata\n return EmbeddingsWithModels(\n embeddings=primary_instance,\n available_models=available_models_dict,\n )\n\n def _build_available_models(\n self,\n provider: str,\n embedding_class: type,\n metadata: dict[str, Any],\n api_key: str | None,\n ) -> dict[str, Embeddings]:\n \"\"\"Build a dictionary of all available embedding model instances for the provider.\n\n Args:\n provider: The provider name (e.g., \"OpenAI\", \"Ollama\")\n embedding_class: The embedding class to instantiate\n metadata: Metadata containing param_mapping\n api_key: The API key for the provider\n\n Returns:\n Dict mapping model names to their embedding instances\n \"\"\"\n available_models_dict: dict[str, Embeddings] = {}\n\n # Get all embedding models for this provider from unified models\n all_embedding_models = get_unified_models_detailed(\n providers=[provider],\n model_type=\"embeddings\",\n include_deprecated=False,\n include_unsupported=False,\n )\n\n if not all_embedding_models:\n return available_models_dict\n\n # Extract models from the provider data\n for provider_data in all_embedding_models:\n if provider_data.get(\"provider\") != provider:\n continue\n\n for model_data in provider_data.get(\"models\", []):\n model_name = model_data.get(\"model_name\")\n if not model_name:\n continue\n\n # Create a model dict compatible with _build_kwargs\n model_dict = {\n \"name\": model_name,\n \"provider\": provider,\n \"metadata\": metadata, # Reuse the same metadata/param_mapping\n }\n\n try:\n # Build kwargs for this model\n model_kwargs = self._build_kwargs_for_model(model_dict, metadata, api_key)\n # Create the embedding instance\n available_models_dict[model_name] = embedding_class(**model_kwargs)\n except Exception: # noqa: BLE001\n # Skip models that fail to instantiate\n # This handles cases where specific models have incompatible parameters\n logger.debug(\"Failed to instantiate embedding model %s: skipping\", model_name, exc_info=True)\n continue\n\n return available_models_dict\n\n def _build_kwargs_for_model(\n self,\n model: dict[str, Any],\n metadata: dict[str, Any],\n api_key: str | None,\n ) -> dict[str, Any]:\n \"\"\"Build kwargs dictionary for a specific model using parameter mapping.\n\n This is similar to _build_kwargs but uses the provided api_key directly\n instead of looking it up again.\n\n Args:\n model: Model dict with name and provider\n metadata: Metadata containing param_mapping\n api_key: The API key to use\n\n Returns:\n kwargs dict for embedding class instantiation\n \"\"\"\n param_mapping = metadata.get(\"param_mapping\", {})\n if not param_mapping:\n msg = \"Parameter mapping not found in metadata\"\n raise ValueError(msg)\n\n kwargs = {}\n provider = model.get(\"provider\")\n\n # Required parameters - handle both \"model\" and \"model_id\" (for watsonx)\n if \"model\" in param_mapping:\n kwargs[param_mapping[\"model\"]] = model.get(\"name\")\n elif \"model_id\" in param_mapping:\n kwargs[param_mapping[\"model_id\"]] = model.get(\"name\")\n\n # Add API key if mapped\n if \"api_key\" in param_mapping and api_key:\n kwargs[param_mapping[\"api_key\"]] = api_key\n\n # Optional parameters with their values\n optional_params = {\n \"api_base\": self.api_base if self.api_base else None,\n \"dimensions\": int(self.dimensions) if self.dimensions else None,\n \"chunk_size\": int(self.chunk_size) if self.chunk_size else None,\n \"request_timeout\": float(self.request_timeout) if self.request_timeout else None,\n \"max_retries\": int(self.max_retries) if self.max_retries else None,\n \"show_progress_bar\": self.show_progress_bar if hasattr(self, \"show_progress_bar\") else None,\n \"model_kwargs\": self.model_kwargs if self.model_kwargs else None,\n }\n\n # Watson-specific parameters\n if provider in {\"IBM WatsonX\", \"IBM watsonx.ai\"}:\n # Map base_url_ibm_watsonx to \"url\" parameter for watsonx\n if \"url\" in param_mapping:\n url_value = (\n self.base_url_ibm_watsonx\n if hasattr(self, \"base_url_ibm_watsonx\") and self.base_url_ibm_watsonx\n else \"https://us-south.ml.cloud.ibm.com\"\n )\n kwargs[param_mapping[\"url\"]] = url_value\n # Map project_id for watsonx\n if hasattr(self, \"project_id\") and self.project_id and \"project_id\" in param_mapping:\n kwargs[param_mapping[\"project_id\"]] = self.project_id\n\n # Ollama-specific parameters\n if provider == \"Ollama\" and \"base_url\" in param_mapping:\n # Map api_base to \"base_url\" parameter for Ollama\n base_url_value = self.api_base if hasattr(self, \"api_base\") and self.api_base else \"http://localhost:11434\"\n kwargs[param_mapping[\"base_url\"]] = base_url_value\n\n # Add optional parameters if they have values and are mapped\n for param_name, param_value in optional_params.items():\n if param_value is not None and param_name in param_mapping:\n # Special handling for request_timeout with Google provider\n if param_name == \"request_timeout\":\n if provider == \"Google Generative AI\" and isinstance(param_value, (int, float)):\n kwargs[param_mapping[param_name]] = {\"timeout\": param_value}\n else:\n kwargs[param_mapping[param_name]] = param_value\n else:\n kwargs[param_mapping[param_name]] = param_value\n\n return kwargs\n\n def _build_kwargs(self, model: dict[str, Any], metadata: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Build kwargs dictionary using parameter mapping.\"\"\"\n param_mapping = metadata.get(\"param_mapping\", {})\n if not param_mapping:\n msg = \"Parameter mapping not found in metadata\"\n raise ValueError(msg)\n\n kwargs = {}\n\n # Required parameters - handle both \"model\" and \"model_id\" (for watsonx)\n if \"model\" in param_mapping:\n kwargs[param_mapping[\"model\"]] = model.get(\"name\")\n elif \"model_id\" in param_mapping:\n kwargs[param_mapping[\"model_id\"]] = model.get(\"name\")\n if \"api_key\" in param_mapping:\n kwargs[param_mapping[\"api_key\"]] = get_api_key_for_provider(\n self.user_id,\n model.get(\"provider\"),\n self.api_key,\n )\n\n # Optional parameters with their values\n provider = model.get(\"provider\")\n optional_params = {\n \"api_base\": self.api_base if self.api_base else None,\n \"dimensions\": int(self.dimensions) if self.dimensions else None,\n \"chunk_size\": int(self.chunk_size) if self.chunk_size else None,\n \"request_timeout\": float(self.request_timeout) if self.request_timeout else None,\n \"max_retries\": int(self.max_retries) if self.max_retries else None,\n \"show_progress_bar\": self.show_progress_bar if hasattr(self, \"show_progress_bar\") else None,\n \"model_kwargs\": self.model_kwargs if self.model_kwargs else None,\n }\n\n # Watson-specific parameters\n if provider in {\"IBM WatsonX\", \"IBM watsonx.ai\"}:\n # Map base_url_ibm_watsonx to \"url\" parameter for watsonx\n if \"url\" in param_mapping:\n url_value = (\n self.base_url_ibm_watsonx\n if hasattr(self, \"base_url_ibm_watsonx\") and self.base_url_ibm_watsonx\n else \"https://us-south.ml.cloud.ibm.com\"\n )\n kwargs[param_mapping[\"url\"]] = url_value\n # Map project_id for watsonx\n if hasattr(self, \"project_id\") and self.project_id and \"project_id\" in param_mapping:\n kwargs[param_mapping[\"project_id\"]] = self.project_id\n\n # Ollama-specific parameters\n if provider == \"Ollama\" and \"base_url\" in param_mapping:\n # Map api_base to \"base_url\" parameter for Ollama\n base_url_value = self.api_base if hasattr(self, \"api_base\") and self.api_base else \"http://localhost:11434\"\n kwargs[param_mapping[\"base_url\"]] = base_url_value\n\n # Add optional parameters if they have values and are mapped\n for param_name, param_value in optional_params.items():\n if param_value is not None and param_name in param_mapping:\n # Special handling for request_timeout with Google provider\n if param_name == \"request_timeout\":\n if provider == \"Google Generative AI\" and isinstance(param_value, (int, float)):\n kwargs[param_mapping[param_name]] = {\"timeout\": param_value}\n else:\n kwargs[param_mapping[param_name]] = param_value\n else:\n kwargs[param_mapping[param_name]] = param_value\n\n return kwargs\n" }, "dimensions": { "_input_type": "IntInput", @@ -3275,6 +3229,7 @@ "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", "list": false, "list_add_label": "Add More", + "load_from_db": false, "name": "dimensions", "override_skip": false, "placeholder": "", @@ -3287,28 +3242,6 @@ "type": "int", "value": "" }, - "fail_safe_mode": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Fail-Safe Mode", - "dynamic": false, - "info": "When enabled, errors will be logged instead of raising exceptions. The component will return None on error.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "fail_safe_mode", - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "bool", - "value": true - }, "input_text": { "_input_type": "BoolInput", "advanced": true, @@ -3329,7 +3262,7 @@ "type": "bool", "value": true }, - "is_refresh": false, + "is_refresh": true, "max_retries": { "_input_type": "IntInput", "advanced": true, @@ -3351,31 +3284,148 @@ "value": 3 }, "model": { - "_input_type": "DropdownInput", + "_input_type": "ModelInput", "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Name", + "display_name": "Embedding Model", "dynamic": false, - "external_options": {}, - "info": "Select the embedding model to use", - "load_from_db": false, + "external_options": { + "fields": { + "data": { + "node": { + "display_name": "Connect other models", + "icon": "CornerDownLeft", + "name": "connect_other_models" + } + } + } + }, + "info": "Select your model provider", + "input_types": [ + "Embeddings" + ], + "list": false, + "list_add_label": "Add More", + "model_type": "embedding", "name": "model", - "options": [], - "options_metadata": [], + "options": [ + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-small", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-large", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-ada-002", + "provider": "OpenAI" + }, + { + "category": "Google Generative AI", + "icon": "GoogleGenerativeAI", + "metadata": { + "is_disabled_provider": true, + "variable_name": null + }, + "name": "__enable_provider_Google Generative AI__", + "provider": "Google Generative AI" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "is_disabled_provider": true, + "variable_name": null + }, + "name": "__enable_provider_IBM WatsonX__", + "provider": "IBM WatsonX" + } + ], "override_skip": false, - "placeholder": "", + "placeholder": "Setup Provider", "real_time_refresh": true, "refresh_button": true, - "required": false, + "required": true, "show": true, "title_case": false, - "toggle": false, "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "" + "trace_as_input": true, + "track_in_telemetry": false, + "type": "model", + "value": [ + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-small", + "provider": "OpenAI" + } + ] }, "model_kwargs": { "_input_type": "DictInput", @@ -3397,45 +3447,19 @@ "type": "dict", "value": {} }, - "ollama_base_url": { + "project_id": { "_input_type": "MessageTextInput", "advanced": false, - "display_name": "Ollama API URL", + "display_name": "Project ID", "dynamic": false, - "info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434", + "info": "IBM watsonx.ai Project ID (required for IBM watsonx.ai)", "input_types": [ "Message" ], "list": false, "list_add_label": "Add More", - "load_from_db": true, - "name": "ollama_base_url", - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "OLLAMA_BASE_URL" - }, - "project_id": { - "_input_type": "MessageTextInput", - "advanced": false, - "display_name": "Project ID", - "dynamic": false, - "info": "IBM watsonx.ai Project ID (required for IBM watsonx.ai)", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "project_id", + "load_from_db": false, + "name": "project_id", "override_skip": false, "placeholder": "", "required": false, @@ -3448,49 +3472,6 @@ "type": "str", "value": "" }, - "provider": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Provider", - "dynamic": false, - "external_options": {}, - "info": "Select the embedding model provider", - "load_from_db": false, - "name": "provider", - "options": [ - "OpenAI", - "Ollama", - "IBM watsonx.ai" - ], - "options_metadata": [ - { - "icon": "OpenAI" - }, - { - "icon": "Ollama" - }, - { - "icon": "WatsonxAI" - } - ], - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "selected_metadata": { - "icon": "Ollama" - }, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "Ollama" - }, "request_timeout": { "_input_type": "FloatInput", "advanced": true, @@ -3560,12 +3541,12 @@ "dragging": false, "id": "EmbeddingModel-EzcW6", "measured": { - "height": 369, + "height": 207, "width": 320 }, "position": { - "x": -742.3027218520097, - "y": 1224.367844475079 + "x": -325.4729770185256, + "y": 1151.3746172692154 }, "selected": false, "type": "genericNode" @@ -3585,14 +3566,12 @@ "description": "Generate embeddings using a specified provider.", "display_name": "Embedding Model", "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": true, + "edited": false, "field_order": [ - "provider", - "api_base", - "ollama_base_url", - "base_url_ibm_watsonx", "model", "api_key", + "api_base", + "base_url_ibm_watsonx", "project_id", "dimensions", "chunk_size", @@ -3601,48 +3580,27 @@ "show_progress_bar", "model_kwargs", "truncate_input_tokens", - "input_text", - "fail_safe_mode" + "input_text" ], "frozen": false, "icon": "binary", - "last_updated": "2025-12-02T21:24:52.481Z", + "last_updated": "2026-02-27T18:44:06.668Z", "legacy": false, - "lf_version": "1.7.0.dev21", + "lf_version": "1.8.0", "metadata": { - "code_hash": "0e2d6fe67a26", + "code_hash": "c5ce0982da48", "dependencies": { "dependencies": [ - { - "name": "requests", - "version": "2.32.5" - }, - { - "name": "ibm_watsonx_ai", - "version": "1.4.2" - }, - { - "name": "langchain_openai", - "version": "0.3.23" - }, { "name": "lfx", - "version": "0.2.0.dev21" - }, - { - "name": "langchain_ollama", - "version": "0.3.10" - }, - { - "name": "langchain_community", - "version": "0.3.21" + "version": null }, { - "name": "langchain_ibm", - "version": "0.3.19" + "name": "langchain_core", + "version": "0.3.83" } ], - "total_dependencies": 7 + "total_dependencies": 2 }, "module": "custom_components.embedding_model" }, @@ -3674,7 +3632,7 @@ "value": "ebc01d31-1976-46ce-a385-b0240327226c" }, "_frontend_node_folder_id": { - "value": "69a7745e-dfb8-40a7-b5cb-5da3af0b10b6" + "value": "bbfbd352-e931-4894-afe8-6552a3f0cc2c" }, "_type": "Component", "api_base": { @@ -3693,7 +3651,7 @@ "override_skip": false, "placeholder": "", "required": false, - "show": false, + "show": true, "title_case": false, "tool_mode": false, "trace_as_input": true, @@ -3704,12 +3662,12 @@ }, "api_key": { "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "IBM watsonx.ai API Key", + "advanced": true, + "display_name": "API Key", "dynamic": false, "info": "Model Provider API key", "input_types": [], - "load_from_db": true, + "load_from_db": false, "name": "api_key", "override_skip": false, "password": true, @@ -3720,7 +3678,7 @@ "title_case": false, "track_in_telemetry": false, "type": "str", - "value": "WATSONX_API_KEY" + "value": "" }, "base_url_ibm_watsonx": { "_input_type": "DropdownInput", @@ -3745,7 +3703,7 @@ "placeholder": "", "real_time_refresh": true, "required": false, - "show": true, + "show": false, "title_case": false, "toggle": false, "tool_mode": false, @@ -3790,7 +3748,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"fail_safe_mode\",\n display_name=\"Fail-Safe Mode\",\n value=False,\n advanced=True,\n info=\"When enabled, errors will be logged instead of raising exceptions. \"\n \"The component will return None on error.\",\n real_time_refresh=True,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n async def fetch_ollama_models(self) -> list[str]:\n try:\n return await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n except Exception: # noqa: BLE001\n\n logger.exception(\"Error fetching models\")\n return []\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize OpenAI embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n try:\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await self.fetch_ollama_models()\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize Ollama embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to authenticate with IBM watsonx.ai: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n msg = f\"Unknown provider: {provider}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n # Handle fail_safe_mode changes first - set all required fields to False if enabled\n if field_name == \"fail_safe_mode\":\n if field_value: # If fail_safe_mode is enabled\n build_config[\"api_key\"][\"required\"] = False\n elif hasattr(self, \"provider\"):\n # If fail_safe_mode is disabled, restore required flags based on provider\n if self.provider in [\"OpenAI\", \"IBM watsonx.ai\"]:\n build_config[\"api_key\"][\"required\"] = True\n else: # Ollama\n build_config[\"api_key\"][\"required\"] = False\n\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.unified_models import (\n get_api_key_for_provider,\n get_embedding_class,\n get_embedding_model_options,\n get_unified_models_detailed,\n update_model_options_in_build_config,\n)\nfrom lfx.base.models.watsonx_constants import IBM_WATSONX_URLS\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n ModelInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n \"\"\"Dynamically update build config with user-filtered model options.\"\"\"\n # Update model options\n build_config = update_model_options_in_build_config(\n component=self,\n build_config=build_config,\n cache_key_prefix=\"embedding_model_options\",\n get_options_func=get_embedding_model_options,\n field_name=field_name,\n field_value=field_value,\n )\n\n # Show/hide provider-specific fields based on selected model\n if field_name == \"model\" and isinstance(field_value, list) and len(field_value) > 0:\n selected_model = field_value[0]\n provider = selected_model.get(\"provider\", \"\")\n\n # Show/hide watsonx fields\n is_watsonx = provider == \"IBM WatsonX\"\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = is_watsonx\n build_config[\"project_id\"][\"show\"] = is_watsonx\n build_config[\"truncate_input_tokens\"][\"show\"] = is_watsonx\n build_config[\"input_text\"][\"show\"] = is_watsonx\n if is_watsonx:\n build_config[\"base_url_ibm_watsonx\"][\"required\"] = True\n build_config[\"project_id\"][\"required\"] = True\n\n return build_config\n\n inputs = [\n ModelInput(\n name=\"model\",\n display_name=\"Embedding Model\",\n info=\"Select your model provider\",\n real_time_refresh=True,\n required=True,\n model_type=\"embedding\",\n input_types=[\"Embeddings\"], # Override default to accept Embeddings instead of LanguageModel\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"Model Provider API key\",\n real_time_refresh=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n # Watson-specific inputs\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n advanced=True,\n value=1000,\n ),\n FloatInput(\n name=\"request_timeout\",\n display_name=\"Request Timeout\",\n advanced=True,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n advanced=True,\n value=3,\n ),\n BoolInput(\n name=\"show_progress_bar\",\n display_name=\"Show Progress Bar\",\n advanced=True,\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n \"\"\"Build and return an embeddings instance based on the selected model.\n\n Returns an EmbeddingsWithModels wrapper that contains:\n - The primary embedding instance (for the selected model)\n - available_models dict mapping all available model names to their instances\n \"\"\"\n # If an Embeddings object is directly connected, return it\n try:\n from langchain_core.embeddings import Embeddings as BaseEmbeddings\n\n if isinstance(self.model, BaseEmbeddings):\n return self.model\n except ImportError:\n pass\n\n # Safely extract model configuration\n if not self.model or not isinstance(self.model, list):\n msg = \"Model must be a non-empty list\"\n raise ValueError(msg)\n\n model = self.model[0]\n model_name = model.get(\"name\")\n provider = model.get(\"provider\")\n metadata = model.get(\"metadata\", {})\n\n # Get API key from user input or global variables\n api_key = get_api_key_for_provider(self.user_id, provider, self.api_key)\n\n # Validate required fields (Ollama doesn't require API key)\n if not api_key and provider != \"Ollama\":\n msg = (\n f\"{provider} API key is required. \"\n f\"Please provide it in the component or configure it globally as \"\n f\"{provider.upper().replace(' ', '_')}_API_KEY.\"\n )\n raise ValueError(msg)\n\n if not model_name:\n msg = \"Model name is required\"\n raise ValueError(msg)\n\n # Get embedding class\n embedding_class_name = metadata.get(\"embedding_class\")\n if not embedding_class_name:\n msg = f\"No embedding class defined in metadata for {model_name}\"\n raise ValueError(msg)\n\n embedding_class = get_embedding_class(embedding_class_name)\n\n # Build kwargs using parameter mapping for primary instance\n kwargs = self._build_kwargs(model, metadata)\n primary_instance = embedding_class(**kwargs)\n\n # Get all available embedding models for this provider\n available_models_dict = self._build_available_models(\n provider=provider,\n embedding_class=embedding_class,\n metadata=metadata,\n api_key=api_key,\n )\n\n # Wrap with EmbeddingsWithModels to provide available_models metadata\n return EmbeddingsWithModels(\n embeddings=primary_instance,\n available_models=available_models_dict,\n )\n\n def _build_available_models(\n self,\n provider: str,\n embedding_class: type,\n metadata: dict[str, Any],\n api_key: str | None,\n ) -> dict[str, Embeddings]:\n \"\"\"Build a dictionary of all available embedding model instances for the provider.\n\n Args:\n provider: The provider name (e.g., \"OpenAI\", \"Ollama\")\n embedding_class: The embedding class to instantiate\n metadata: Metadata containing param_mapping\n api_key: The API key for the provider\n\n Returns:\n Dict mapping model names to their embedding instances\n \"\"\"\n available_models_dict: dict[str, Embeddings] = {}\n\n # Get all embedding models for this provider from unified models\n all_embedding_models = get_unified_models_detailed(\n providers=[provider],\n model_type=\"embeddings\",\n include_deprecated=False,\n include_unsupported=False,\n )\n\n if not all_embedding_models:\n return available_models_dict\n\n # Extract models from the provider data\n for provider_data in all_embedding_models:\n if provider_data.get(\"provider\") != provider:\n continue\n\n for model_data in provider_data.get(\"models\", []):\n model_name = model_data.get(\"model_name\")\n if not model_name:\n continue\n\n # Create a model dict compatible with _build_kwargs\n model_dict = {\n \"name\": model_name,\n \"provider\": provider,\n \"metadata\": metadata, # Reuse the same metadata/param_mapping\n }\n\n try:\n # Build kwargs for this model\n model_kwargs = self._build_kwargs_for_model(model_dict, metadata, api_key)\n # Create the embedding instance\n available_models_dict[model_name] = embedding_class(**model_kwargs)\n except Exception: # noqa: BLE001\n # Skip models that fail to instantiate\n # This handles cases where specific models have incompatible parameters\n logger.debug(\"Failed to instantiate embedding model %s: skipping\", model_name, exc_info=True)\n continue\n\n return available_models_dict\n\n def _build_kwargs_for_model(\n self,\n model: dict[str, Any],\n metadata: dict[str, Any],\n api_key: str | None,\n ) -> dict[str, Any]:\n \"\"\"Build kwargs dictionary for a specific model using parameter mapping.\n\n This is similar to _build_kwargs but uses the provided api_key directly\n instead of looking it up again.\n\n Args:\n model: Model dict with name and provider\n metadata: Metadata containing param_mapping\n api_key: The API key to use\n\n Returns:\n kwargs dict for embedding class instantiation\n \"\"\"\n param_mapping = metadata.get(\"param_mapping\", {})\n if not param_mapping:\n msg = \"Parameter mapping not found in metadata\"\n raise ValueError(msg)\n\n kwargs = {}\n provider = model.get(\"provider\")\n\n # Required parameters - handle both \"model\" and \"model_id\" (for watsonx)\n if \"model\" in param_mapping:\n kwargs[param_mapping[\"model\"]] = model.get(\"name\")\n elif \"model_id\" in param_mapping:\n kwargs[param_mapping[\"model_id\"]] = model.get(\"name\")\n\n # Add API key if mapped\n if \"api_key\" in param_mapping and api_key:\n kwargs[param_mapping[\"api_key\"]] = api_key\n\n # Optional parameters with their values\n optional_params = {\n \"api_base\": self.api_base if self.api_base else None,\n \"dimensions\": int(self.dimensions) if self.dimensions else None,\n \"chunk_size\": int(self.chunk_size) if self.chunk_size else None,\n \"request_timeout\": float(self.request_timeout) if self.request_timeout else None,\n \"max_retries\": int(self.max_retries) if self.max_retries else None,\n \"show_progress_bar\": self.show_progress_bar if hasattr(self, \"show_progress_bar\") else None,\n \"model_kwargs\": self.model_kwargs if self.model_kwargs else None,\n }\n\n # Watson-specific parameters\n if provider in {\"IBM WatsonX\", \"IBM watsonx.ai\"}:\n # Map base_url_ibm_watsonx to \"url\" parameter for watsonx\n if \"url\" in param_mapping:\n url_value = (\n self.base_url_ibm_watsonx\n if hasattr(self, \"base_url_ibm_watsonx\") and self.base_url_ibm_watsonx\n else \"https://us-south.ml.cloud.ibm.com\"\n )\n kwargs[param_mapping[\"url\"]] = url_value\n # Map project_id for watsonx\n if hasattr(self, \"project_id\") and self.project_id and \"project_id\" in param_mapping:\n kwargs[param_mapping[\"project_id\"]] = self.project_id\n\n # Ollama-specific parameters\n if provider == \"Ollama\" and \"base_url\" in param_mapping:\n # Map api_base to \"base_url\" parameter for Ollama\n base_url_value = self.api_base if hasattr(self, \"api_base\") and self.api_base else \"http://localhost:11434\"\n kwargs[param_mapping[\"base_url\"]] = base_url_value\n\n # Add optional parameters if they have values and are mapped\n for param_name, param_value in optional_params.items():\n if param_value is not None and param_name in param_mapping:\n # Special handling for request_timeout with Google provider\n if param_name == \"request_timeout\":\n if provider == \"Google Generative AI\" and isinstance(param_value, (int, float)):\n kwargs[param_mapping[param_name]] = {\"timeout\": param_value}\n else:\n kwargs[param_mapping[param_name]] = param_value\n else:\n kwargs[param_mapping[param_name]] = param_value\n\n return kwargs\n\n def _build_kwargs(self, model: dict[str, Any], metadata: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Build kwargs dictionary using parameter mapping.\"\"\"\n param_mapping = metadata.get(\"param_mapping\", {})\n if not param_mapping:\n msg = \"Parameter mapping not found in metadata\"\n raise ValueError(msg)\n\n kwargs = {}\n\n # Required parameters - handle both \"model\" and \"model_id\" (for watsonx)\n if \"model\" in param_mapping:\n kwargs[param_mapping[\"model\"]] = model.get(\"name\")\n elif \"model_id\" in param_mapping:\n kwargs[param_mapping[\"model_id\"]] = model.get(\"name\")\n if \"api_key\" in param_mapping:\n kwargs[param_mapping[\"api_key\"]] = get_api_key_for_provider(\n self.user_id,\n model.get(\"provider\"),\n self.api_key,\n )\n\n # Optional parameters with their values\n provider = model.get(\"provider\")\n optional_params = {\n \"api_base\": self.api_base if self.api_base else None,\n \"dimensions\": int(self.dimensions) if self.dimensions else None,\n \"chunk_size\": int(self.chunk_size) if self.chunk_size else None,\n \"request_timeout\": float(self.request_timeout) if self.request_timeout else None,\n \"max_retries\": int(self.max_retries) if self.max_retries else None,\n \"show_progress_bar\": self.show_progress_bar if hasattr(self, \"show_progress_bar\") else None,\n \"model_kwargs\": self.model_kwargs if self.model_kwargs else None,\n }\n\n # Watson-specific parameters\n if provider in {\"IBM WatsonX\", \"IBM watsonx.ai\"}:\n # Map base_url_ibm_watsonx to \"url\" parameter for watsonx\n if \"url\" in param_mapping:\n url_value = (\n self.base_url_ibm_watsonx\n if hasattr(self, \"base_url_ibm_watsonx\") and self.base_url_ibm_watsonx\n else \"https://us-south.ml.cloud.ibm.com\"\n )\n kwargs[param_mapping[\"url\"]] = url_value\n # Map project_id for watsonx\n if hasattr(self, \"project_id\") and self.project_id and \"project_id\" in param_mapping:\n kwargs[param_mapping[\"project_id\"]] = self.project_id\n\n # Ollama-specific parameters\n if provider == \"Ollama\" and \"base_url\" in param_mapping:\n # Map api_base to \"base_url\" parameter for Ollama\n base_url_value = self.api_base if hasattr(self, \"api_base\") and self.api_base else \"http://localhost:11434\"\n kwargs[param_mapping[\"base_url\"]] = base_url_value\n\n # Add optional parameters if they have values and are mapped\n for param_name, param_value in optional_params.items():\n if param_value is not None and param_name in param_mapping:\n # Special handling for request_timeout with Google provider\n if param_name == \"request_timeout\":\n if provider == \"Google Generative AI\" and isinstance(param_value, (int, float)):\n kwargs[param_mapping[param_name]] = {\"timeout\": param_value}\n else:\n kwargs[param_mapping[param_name]] = param_value\n else:\n kwargs[param_mapping[param_name]] = param_value\n\n return kwargs\n" }, "dimensions": { "_input_type": "IntInput", @@ -3800,6 +3758,7 @@ "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", "list": false, "list_add_label": "Add More", + "load_from_db": false, "name": "dimensions", "override_skip": false, "placeholder": "", @@ -3812,28 +3771,6 @@ "type": "int", "value": "" }, - "fail_safe_mode": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Fail-Safe Mode", - "dynamic": false, - "info": "When enabled, errors will be logged instead of raising exceptions. The component will return None on error.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "fail_safe_mode", - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "bool", - "value": true - }, "input_text": { "_input_type": "BoolInput", "advanced": true, @@ -3846,7 +3783,7 @@ "override_skip": false, "placeholder": "", "required": false, - "show": true, + "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, @@ -3854,7 +3791,7 @@ "type": "bool", "value": true }, - "is_refresh": false, + "is_refresh": true, "max_retries": { "_input_type": "IntInput", "advanced": true, @@ -3876,37 +3813,148 @@ "value": 3 }, "model": { - "_input_type": "DropdownInput", + "_input_type": "ModelInput", "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Name", + "display_name": "Embedding Model", "dynamic": false, - "external_options": {}, - "info": "Select the embedding model to use", - "load_from_db": false, + "external_options": { + "fields": { + "data": { + "node": { + "display_name": "Connect other models", + "icon": "CornerDownLeft", + "name": "connect_other_models" + } + } + } + }, + "info": "Select your model provider", + "input_types": [ + "Embeddings" + ], + "list": false, + "list_add_label": "Add More", + "model_type": "embedding", "name": "model", "options": [ - "ibm/granite-embedding-278m-multilingual", - "ibm/slate-125m-english-rtrvr-v2", - "ibm/slate-30m-english-rtrvr-v2", - "intfloat/multilingual-e5-large", - "sentence-transformers/all-minilm-l6-v2" + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-small", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-large", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-ada-002", + "provider": "OpenAI" + }, + { + "category": "Google Generative AI", + "icon": "GoogleGenerativeAI", + "metadata": { + "is_disabled_provider": true, + "variable_name": null + }, + "name": "__enable_provider_Google Generative AI__", + "provider": "Google Generative AI" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "is_disabled_provider": true, + "variable_name": null + }, + "name": "__enable_provider_IBM WatsonX__", + "provider": "IBM WatsonX" + } ], - "options_metadata": [], "override_skip": false, - "placeholder": "", + "placeholder": "Setup Provider", "real_time_refresh": true, "refresh_button": true, - "required": false, + "required": true, "show": true, "title_case": false, - "toggle": false, "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "ibm/granite-embedding-278m-multilingual" + "trace_as_input": true, + "track_in_telemetry": false, + "type": "model", + "value": [ + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-small", + "provider": "OpenAI" + } + ] }, "model_kwargs": { "_input_type": "DictInput", @@ -3928,32 +3976,6 @@ "type": "dict", "value": {} }, - "ollama_base_url": { - "_input_type": "MessageTextInput", - "advanced": false, - "display_name": "Ollama API URL", - "dynamic": false, - "info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": true, - "name": "ollama_base_url", - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "OLLAMA_BASE_URL" - }, "project_id": { "_input_type": "MessageTextInput", "advanced": false, @@ -3970,7 +3992,7 @@ "override_skip": false, "placeholder": "", "required": false, - "show": true, + "show": false, "title_case": false, "tool_mode": false, "trace_as_input": true, @@ -3979,49 +4001,6 @@ "type": "str", "value": "WATSONX_PROJECT_ID" }, - "provider": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Provider", - "dynamic": false, - "external_options": {}, - "info": "Select the embedding model provider", - "load_from_db": false, - "name": "provider", - "options": [ - "OpenAI", - "Ollama", - "IBM watsonx.ai" - ], - "options_metadata": [ - { - "icon": "OpenAI" - }, - { - "icon": "Ollama" - }, - { - "icon": "WatsonxAI" - } - ], - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "selected_metadata": { - "icon": "WatsonxAI" - }, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "IBM watsonx.ai" - }, "request_timeout": { "_input_type": "FloatInput", "advanced": true, @@ -4074,7 +4053,7 @@ "override_skip": false, "placeholder": "", "required": false, - "show": true, + "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, @@ -4091,7 +4070,7 @@ "dragging": false, "id": "EmbeddingModel-cONxU", "measured": { - "height": 534, + "height": 207, "width": 320 }, "position": { @@ -4100,23 +4079,773 @@ }, "selected": false, "type": "genericNode" + }, + { + "data": { + "id": "TextInput-BG2U3", + "node": { + "base_classes": [ + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Get user text inputs.", + "display_name": "Text Input", + "documentation": "https://docs.langflow.org/text-input-and-output", + "edited": false, + "field_order": [ + "input_value", + "use_global_variable" + ], + "frozen": false, + "icon": "type", + "last_updated": "2026-02-27T18:44:23.265Z", + "legacy": false, + "lf_version": "1.8.0", + "metadata": { + "code_hash": "518f16485886", + "dependencies": { + "dependencies": [ + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 1 + }, + "module": "lfx.components.input_output.text.TextInputComponent" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Output Text", + "group_outputs": false, + "loop_types": null, + "method": "text_response", + "name": "text", + "options": null, + "required_inputs": null, + "selected": "Message", + "tool_mode": true, + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "ebc01d31-1976-46ce-a385-b0240327226c" + }, + "_frontend_node_folder_id": { + "value": "2bef1fdd-4d60-4bb6-8fd2-c0a3eae09d1e" + }, + "_type": "Component", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Any\n\nfrom lfx.base.io.text import TextComponent\nfrom lfx.io import BoolInput, MultilineInput, Output\nfrom lfx.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get user text inputs.\"\n documentation: str = \"https://docs.langflow.org/text-input-and-output\"\n icon = \"type\"\n name = \"TextInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n BoolInput(\n name=\"use_global_variable\",\n display_name=\"Use Global Variable\",\n info=\"Enable to select from global variables (shows globe icon). Disables multiline editing.\",\n value=False,\n advanced=True,\n real_time_refresh=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\n ]\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name == \"use_global_variable\":\n if field_value:\n # Enable global variable mode: single-line with password masking and globe dropdown\n build_config[\"input_value\"][\"multiline\"] = False\n build_config[\"input_value\"][\"password\"] = True\n else:\n # Default mode: multiline text editing\n build_config[\"input_value\"][\"multiline\"] = True\n build_config[\"input_value\"][\"password\"] = False\n return build_config\n\n def text_response(self) -> Message:\n return Message(\n text=self.input_value,\n )\n" + }, + "input_value": { + "_input_type": "MultilineInput", + "advanced": false, + "ai_enabled": false, + "copy_field": false, + "display_name": "Text", + "dynamic": false, + "info": "Text to be passed as input.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": true, + "multiline": false, + "name": "input_value", + "override_skip": false, + "password": true, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "OPENRAG-QUERY-FILTER" + }, + "is_refresh": false, + "use_global_variable": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Use Global Variable", + "dynamic": false, + "info": "Enable to select from global variables (shows globe icon). Disables multiline editing.", + "list": false, + "list_add_label": "Add More", + "name": "use_global_variable", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + } + }, + "tool_mode": false + }, + "showNode": true, + "type": "TextInput" + }, + "dragging": false, + "id": "TextInput-BG2U3", + "measured": { + "height": 207, + "width": 320 + }, + "position": { + "x": -326.2359962261157, + "y": 1657.3533165363156 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "id": "ParserComponent-nLvsY", + "node": { + "base_classes": [ + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Extracts text using a template.", + "display_name": "Parser", + "documentation": "https://docs.langflow.org/parser", + "edited": false, + "field_order": [ + "input_data", + "mode", + "pattern", + "sep" + ], + "frozen": false, + "icon": "braces", + "last_updated": "2026-02-27T20:10:40.472Z", + "legacy": false, + "metadata": { + "code_hash": "3cda25c3f7b5", + "dependencies": { + "dependencies": [ + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 1 + }, + "module": "lfx.components.processing.parser.ParserComponent" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Parsed Text", + "group_outputs": false, + "loop_types": null, + "method": "parse_combined_text", + "name": "parsed_text", + "options": null, + "required_inputs": null, + "selected": "Message", + "tool_mode": true, + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "ebc01d31-1976-46ce-a385-b0240327226c" + }, + "_frontend_node_folder_id": { + "value": "bbfbd352-e931-4894-afe8-6552a3f0cc2c" + }, + "_type": "Component", + "clean_data": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Clean Data", + "dynamic": false, + "info": "Enable to clean the data by removing empty rows and lines in each cell of the DataFrame/ Data object.", + "list": false, + "list_add_label": "Add More", + "name": "clean_data", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from lfx.custom.custom_component.component import Component\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, HandleInput, MessageTextInput, MultilineInput, TabInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.template.field.base import Output\n\n\nclass ParserComponent(Component):\n display_name = \"Parser\"\n description = \"Extracts text using a template.\"\n documentation: str = \"https://docs.langflow.org/parser\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"input_data\",\n display_name=\"Data or DataFrame\",\n input_types=[\"DataFrame\", \"Data\"],\n info=\"Accepts either a DataFrame or a Data object.\",\n required=True,\n ),\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Parser\", \"Stringify\"],\n value=\"Parser\",\n info=\"Convert into raw string instead of using a template.\",\n real_time_refresh=True,\n ),\n MultilineInput(\n name=\"pattern\",\n display_name=\"Template\",\n info=(\n \"Use variables within curly brackets to extract column values for DataFrames \"\n \"or key values for Data.\"\n \"For example: `Name: {Name}, Age: {Age}, Country: {Country}`\"\n ),\n value=\"Text: {text}\", # Example default\n dynamic=True,\n show=True,\n required=True,\n ),\n MessageTextInput(\n name=\"sep\",\n display_name=\"Separator\",\n advanced=True,\n value=\"\\n\",\n info=\"String used to separate rows/items.\",\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"Parsed Text\",\n name=\"parsed_text\",\n info=\"Formatted text output.\",\n method=\"parse_combined_text\",\n ),\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n \"\"\"Dynamically hide/show `template` and enforce requirement based on `stringify`.\"\"\"\n if field_name == \"mode\":\n build_config[\"pattern\"][\"show\"] = self.mode == \"Parser\"\n build_config[\"pattern\"][\"required\"] = self.mode == \"Parser\"\n if field_value:\n clean_data = BoolInput(\n name=\"clean_data\",\n display_name=\"Clean Data\",\n info=(\n \"Enable to clean the data by removing empty rows and lines \"\n \"in each cell of the DataFrame/ Data object.\"\n ),\n value=True,\n advanced=True,\n required=False,\n )\n build_config[\"clean_data\"] = clean_data.to_dict()\n else:\n build_config.pop(\"clean_data\", None)\n\n return build_config\n\n def _clean_args(self):\n \"\"\"Prepare arguments based on input type.\"\"\"\n input_data = self.input_data\n\n match input_data:\n case list() if all(isinstance(item, Data) for item in input_data):\n msg = \"List of Data objects is not supported.\"\n raise ValueError(msg)\n case DataFrame():\n return input_data, None\n case Data():\n return None, input_data\n case dict() if \"data\" in input_data:\n try:\n if \"columns\" in input_data: # Likely a DataFrame\n return DataFrame.from_dict(input_data), None\n # Likely a Data object\n return None, Data(**input_data)\n except (TypeError, ValueError, KeyError) as e:\n msg = f\"Invalid structured input provided: {e!s}\"\n raise ValueError(msg) from e\n case _:\n msg = f\"Unsupported input type: {type(input_data)}. Expected DataFrame or Data.\"\n raise ValueError(msg)\n\n def parse_combined_text(self) -> Message:\n \"\"\"Parse all rows/items into a single text or convert input to string if `stringify` is enabled.\"\"\"\n # Early return for stringify option\n if self.mode == \"Stringify\":\n return self.convert_to_string()\n\n df, data = self._clean_args()\n\n lines = []\n if df is not None:\n for _, row in df.iterrows():\n formatted_text = self.pattern.format(**row.to_dict())\n lines.append(formatted_text)\n elif data is not None:\n # Use format_map with a dict that returns default_value for missing keys\n class DefaultDict(dict):\n def __missing__(self, key):\n return data.default_value or \"\"\n\n formatted_text = self.pattern.format_map(DefaultDict(data.data))\n lines.append(formatted_text)\n\n combined_text = self.sep.join(lines)\n self.status = combined_text\n return Message(text=combined_text)\n\n def convert_to_string(self) -> Message:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n result = \"\"\n if isinstance(self.input_data, list):\n result = \"\\n\".join([safe_convert(item, clean_data=self.clean_data or False) for item in self.input_data])\n else:\n result = safe_convert(self.input_data or False)\n self.log(f\"Converted to string with length: {len(result)}\")\n\n message = Message(text=result)\n self.status = message\n return message\n" + }, + "input_data": { + "_input_type": "HandleInput", + "advanced": false, + "display_name": "Data or DataFrame", + "dynamic": false, + "info": "Accepts either a DataFrame or a Data object.", + "input_types": [ + "DataFrame", + "Data" + ], + "list": false, + "list_add_label": "Add More", + "name": "input_data", + "override_skip": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "other", + "value": "" + }, + "is_refresh": false, + "mode": { + "_input_type": "TabInput", + "advanced": false, + "display_name": "Mode", + "dynamic": false, + "info": "Convert into raw string instead of using a template.", + "name": "mode", + "options": [ + "Parser", + "Stringify" + ], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "tab", + "value": "Stringify" + }, + "pattern": { + "_input_type": "MultilineInput", + "advanced": false, + "ai_enabled": false, + "copy_field": false, + "display_name": "Template", + "dynamic": true, + "info": "Use variables within curly brackets to extract column values for DataFrames or key values for Data.For example: `Name: {Name}, Age: {Age}, Country: {Country}`", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "multiline": true, + "name": "pattern", + "override_skip": false, + "password": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "{text}" + }, + "sep": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "Separator", + "dynamic": false, + "info": "String used to separate rows/items.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "sep", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "\n" + } + }, + "tool_mode": false + }, + "showNode": false, + "type": "ParserComponent" + }, + "dragging": false, + "id": "ParserComponent-nLvsY", + "measured": { + "height": 52, + "width": 192 + }, + "position": { + "x": 721.56879231078, + "y": 1356.0976800693716 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "id": "TypeConverterComponent-AM0BE", + "node": { + "base_classes": [ + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Convert between different types (Message, Data, DataFrame)", + "display_name": "Type Convert", + "documentation": "https://docs.langflow.org/type-convert", + "edited": false, + "field_order": [ + "input_data", + "auto_parse", + "output_type" + ], + "frozen": false, + "icon": "repeat", + "last_updated": "2026-02-27T20:09:31.009Z", + "legacy": false, + "metadata": { + "code_hash": "be7797f8df1c", + "dependencies": { + "dependencies": [ + { + "name": "lfx", + "version": null + }, + { + "name": "pandas", + "version": "2.2.3" + } + ], + "total_dependencies": 2 + }, + "module": "lfx.components.processing.converter.TypeConverterComponent" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "DataFrame Output", + "group_outputs": false, + "hidden": null, + "loop_types": null, + "method": "convert_to_dataframe", + "name": "dataframe_output", + "options": null, + "required_inputs": null, + "selected": "DataFrame", + "tool_mode": true, + "types": [ + "DataFrame" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "ebc01d31-1976-46ce-a385-b0240327226c" + }, + "_frontend_node_folder_id": { + "value": "bbfbd352-e931-4894-afe8-6552a3f0cc2c" + }, + "_type": "Component", + "auto_parse": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Auto Parse", + "dynamic": false, + "info": "Detect and convert JSON/CSV strings automatically.", + "list": false, + "list_add_label": "Add More", + "name": "auto_parse", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "import json\nfrom typing import Any\n\nfrom lfx.custom import Component\nfrom lfx.io import BoolInput, HandleInput, Output, TabInput\nfrom lfx.schema import Data, DataFrame, Message\n\nMIN_CSV_LINES = 2\n\n\ndef convert_to_message(v) -> Message:\n \"\"\"Convert input to Message type.\n\n Args:\n v: Input to convert (Message, Data, DataFrame, or dict)\n\n Returns:\n Message: Converted Message object\n \"\"\"\n return v if isinstance(v, Message) else v.to_message()\n\n\ndef convert_to_data(v: DataFrame | Data | Message | dict, *, auto_parse: bool) -> Data:\n \"\"\"Convert input to Data type.\n\n Args:\n v: Input to convert (Message, Data, DataFrame, or dict)\n auto_parse: Enable automatic parsing of structured data (JSON/CSV)\n\n Returns:\n Data: Converted Data object\n \"\"\"\n if isinstance(v, dict):\n return Data(v)\n if isinstance(v, Message):\n data = Data(data={\"text\": v.data[\"text\"]})\n return parse_structured_data(data) if auto_parse else data\n\n return v if isinstance(v, Data) else v.to_data()\n\n\ndef convert_to_dataframe(v: DataFrame | Data | Message | dict, *, auto_parse: bool) -> DataFrame:\n \"\"\"Convert input to DataFrame type.\n\n Args:\n v: Input to convert (Message, Data, DataFrame, or dict)\n auto_parse: Enable automatic parsing of structured data (JSON/CSV)\n\n Returns:\n DataFrame: Converted DataFrame object\n \"\"\"\n import pandas as pd\n\n if isinstance(v, dict):\n return DataFrame([v])\n if isinstance(v, DataFrame):\n return v\n # Handle pandas DataFrame\n if isinstance(v, pd.DataFrame):\n # Convert pandas DataFrame to our DataFrame by creating Data objects\n return DataFrame(data=v)\n\n if isinstance(v, Message):\n data = Data(data={\"text\": v.data[\"text\"]})\n return parse_structured_data(data).to_dataframe() if auto_parse else data.to_dataframe()\n # For other types, call to_dataframe method\n return v.to_dataframe()\n\n\ndef parse_structured_data(data: Data) -> Data:\n \"\"\"Parse structured data (JSON, CSV) from Data's text field.\n\n Args:\n data: Data object with text content to parse\n\n Returns:\n Data: Modified Data object with parsed content or original if parsing fails\n \"\"\"\n raw_text = data.get_text() or \"\"\n text = raw_text.lstrip(\"\\ufeff\").strip()\n\n # Try JSON parsing first\n parsed_json = _try_parse_json(text)\n if parsed_json is not None:\n return parsed_json\n\n # Try CSV parsing\n if _looks_like_csv(text):\n try:\n return _parse_csv_to_data(text)\n except Exception: # noqa: BLE001\n # Heuristic misfire or malformed CSV — keep original data\n return data\n\n # Return original data if no parsing succeeded\n return data\n\n\ndef _try_parse_json(text: str) -> Data | None:\n \"\"\"Try to parse text as JSON and return Data object.\"\"\"\n try:\n parsed = json.loads(text)\n\n if isinstance(parsed, dict):\n # Single JSON object\n return Data(data=parsed)\n if isinstance(parsed, list) and all(isinstance(item, dict) for item in parsed):\n # Array of JSON objects - create Data with the list\n return Data(data={\"records\": parsed})\n\n except (json.JSONDecodeError, ValueError):\n pass\n\n return None\n\n\ndef _looks_like_csv(text: str) -> bool:\n \"\"\"Simple heuristic to detect CSV content.\"\"\"\n lines = text.strip().split(\"\\n\")\n if len(lines) < MIN_CSV_LINES:\n return False\n\n header_line = lines[0]\n return \",\" in header_line and len(lines) > 1\n\n\ndef _parse_csv_to_data(text: str) -> Data:\n \"\"\"Parse CSV text and return Data object.\"\"\"\n from io import StringIO\n\n import pandas as pd\n\n # Parse CSV to DataFrame, then convert to list of dicts\n parsed_df = pd.read_csv(StringIO(text))\n records = parsed_df.to_dict(orient=\"records\")\n\n return Data(data={\"records\": records})\n\n\nclass TypeConverterComponent(Component):\n display_name = \"Type Convert\"\n description = \"Convert between different types (Message, Data, DataFrame)\"\n documentation: str = \"https://docs.langflow.org/type-convert\"\n icon = \"repeat\"\n\n inputs = [\n HandleInput(\n name=\"input_data\",\n display_name=\"Input\",\n input_types=[\"Message\", \"Data\", \"DataFrame\"],\n info=\"Accept Message, Data or DataFrame as input\",\n required=True,\n ),\n BoolInput(\n name=\"auto_parse\",\n display_name=\"Auto Parse\",\n info=\"Detect and convert JSON/CSV strings automatically.\",\n advanced=True,\n value=False,\n required=False,\n ),\n TabInput(\n name=\"output_type\",\n display_name=\"Output Type\",\n options=[\"Message\", \"Data\", \"DataFrame\"],\n info=\"Select the desired output data type\",\n real_time_refresh=True,\n value=\"Message\",\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"Message Output\",\n name=\"message_output\",\n method=\"convert_to_message\",\n )\n ]\n\n def update_outputs(self, frontend_node: dict, field_name: str, field_value: Any) -> dict:\n \"\"\"Dynamically show only the relevant output based on the selected output type.\"\"\"\n if field_name == \"output_type\":\n # Start with empty outputs\n frontend_node[\"outputs\"] = []\n\n # Add only the selected output type\n if field_value == \"Message\":\n frontend_node[\"outputs\"].append(\n Output(\n display_name=\"Message Output\",\n name=\"message_output\",\n method=\"convert_to_message\",\n ).to_dict()\n )\n elif field_value == \"Data\":\n frontend_node[\"outputs\"].append(\n Output(\n display_name=\"Data Output\",\n name=\"data_output\",\n method=\"convert_to_data\",\n ).to_dict()\n )\n elif field_value == \"DataFrame\":\n frontend_node[\"outputs\"].append(\n Output(\n display_name=\"DataFrame Output\",\n name=\"dataframe_output\",\n method=\"convert_to_dataframe\",\n ).to_dict()\n )\n\n return frontend_node\n\n def convert_to_message(self) -> Message:\n \"\"\"Convert input to Message type.\"\"\"\n input_value = self.input_data[0] if isinstance(self.input_data, list) else self.input_data\n\n # Handle string input by converting to Message first\n if isinstance(input_value, str):\n input_value = Message(text=input_value)\n\n result = convert_to_message(input_value)\n self.status = result\n return result\n\n def convert_to_data(self) -> Data:\n \"\"\"Convert input to Data type.\"\"\"\n input_value = self.input_data[0] if isinstance(self.input_data, list) else self.input_data\n\n # Handle string input by converting to Message first\n if isinstance(input_value, str):\n input_value = Message(text=input_value)\n\n result = convert_to_data(input_value, auto_parse=self.auto_parse)\n self.status = result\n return result\n\n def convert_to_dataframe(self) -> DataFrame:\n \"\"\"Convert input to DataFrame type.\"\"\"\n input_value = self.input_data[0] if isinstance(self.input_data, list) else self.input_data\n\n # Handle string input by converting to Message first\n if isinstance(input_value, str):\n input_value = Message(text=input_value)\n\n result = convert_to_dataframe(input_value, auto_parse=self.auto_parse)\n self.status = result\n return result\n" + }, + "input_data": { + "_input_type": "HandleInput", + "advanced": false, + "display_name": "Input", + "dynamic": false, + "info": "Accept Message, Data or DataFrame as input", + "input_types": [ + "Message", + "Data", + "DataFrame" + ], + "list": false, + "list_add_label": "Add More", + "name": "input_data", + "override_skip": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "other", + "value": "" + }, + "is_refresh": false, + "output_type": { + "_input_type": "TabInput", + "advanced": false, + "display_name": "Output Type", + "dynamic": false, + "info": "Select the desired output data type", + "name": "output_type", + "options": [ + "Message", + "Data", + "DataFrame" + ], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "tab", + "value": "DataFrame" + } + }, + "tool_mode": false + }, + "showNode": false, + "type": "TypeConverterComponent" + }, + "dragging": false, + "id": "TypeConverterComponent-AM0BE", + "measured": { + "height": 52, + "width": 192 + }, + "position": { + "x": 897.4779808982449, + "y": 1461.3901362355743 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "id": "ParserComponent-x6BS7", + "node": { + "base_classes": [ + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Extracts text using a template.", + "display_name": "Parser", + "documentation": "https://docs.langflow.org/parser", + "edited": false, + "field_order": [ + "input_data", + "mode", + "pattern", + "sep" + ], + "frozen": false, + "icon": "braces", + "legacy": false, + "metadata": { + "code_hash": "3cda25c3f7b5", + "dependencies": { + "dependencies": [ + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 1 + }, + "module": "lfx.components.processing.parser.ParserComponent" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Parsed Text", + "group_outputs": false, + "method": "parse_combined_text", + "name": "parsed_text", + "selected": "Message", + "tool_mode": true, + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_type": "Component", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from lfx.custom.custom_component.component import Component\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, HandleInput, MessageTextInput, MultilineInput, TabInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.template.field.base import Output\n\n\nclass ParserComponent(Component):\n display_name = \"Parser\"\n description = \"Extracts text using a template.\"\n documentation: str = \"https://docs.langflow.org/parser\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"input_data\",\n display_name=\"Data or DataFrame\",\n input_types=[\"DataFrame\", \"Data\"],\n info=\"Accepts either a DataFrame or a Data object.\",\n required=True,\n ),\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Parser\", \"Stringify\"],\n value=\"Parser\",\n info=\"Convert into raw string instead of using a template.\",\n real_time_refresh=True,\n ),\n MultilineInput(\n name=\"pattern\",\n display_name=\"Template\",\n info=(\n \"Use variables within curly brackets to extract column values for DataFrames \"\n \"or key values for Data.\"\n \"For example: `Name: {Name}, Age: {Age}, Country: {Country}`\"\n ),\n value=\"Text: {text}\", # Example default\n dynamic=True,\n show=True,\n required=True,\n ),\n MessageTextInput(\n name=\"sep\",\n display_name=\"Separator\",\n advanced=True,\n value=\"\\n\",\n info=\"String used to separate rows/items.\",\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"Parsed Text\",\n name=\"parsed_text\",\n info=\"Formatted text output.\",\n method=\"parse_combined_text\",\n ),\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n \"\"\"Dynamically hide/show `template` and enforce requirement based on `stringify`.\"\"\"\n if field_name == \"mode\":\n build_config[\"pattern\"][\"show\"] = self.mode == \"Parser\"\n build_config[\"pattern\"][\"required\"] = self.mode == \"Parser\"\n if field_value:\n clean_data = BoolInput(\n name=\"clean_data\",\n display_name=\"Clean Data\",\n info=(\n \"Enable to clean the data by removing empty rows and lines \"\n \"in each cell of the DataFrame/ Data object.\"\n ),\n value=True,\n advanced=True,\n required=False,\n )\n build_config[\"clean_data\"] = clean_data.to_dict()\n else:\n build_config.pop(\"clean_data\", None)\n\n return build_config\n\n def _clean_args(self):\n \"\"\"Prepare arguments based on input type.\"\"\"\n input_data = self.input_data\n\n match input_data:\n case list() if all(isinstance(item, Data) for item in input_data):\n msg = \"List of Data objects is not supported.\"\n raise ValueError(msg)\n case DataFrame():\n return input_data, None\n case Data():\n return None, input_data\n case dict() if \"data\" in input_data:\n try:\n if \"columns\" in input_data: # Likely a DataFrame\n return DataFrame.from_dict(input_data), None\n # Likely a Data object\n return None, Data(**input_data)\n except (TypeError, ValueError, KeyError) as e:\n msg = f\"Invalid structured input provided: {e!s}\"\n raise ValueError(msg) from e\n case _:\n msg = f\"Unsupported input type: {type(input_data)}. Expected DataFrame or Data.\"\n raise ValueError(msg)\n\n def parse_combined_text(self) -> Message:\n \"\"\"Parse all rows/items into a single text or convert input to string if `stringify` is enabled.\"\"\"\n # Early return for stringify option\n if self.mode == \"Stringify\":\n return self.convert_to_string()\n\n df, data = self._clean_args()\n\n lines = []\n if df is not None:\n for _, row in df.iterrows():\n formatted_text = self.pattern.format(**row.to_dict())\n lines.append(formatted_text)\n elif data is not None:\n # Use format_map with a dict that returns default_value for missing keys\n class DefaultDict(dict):\n def __missing__(self, key):\n return data.default_value or \"\"\n\n formatted_text = self.pattern.format_map(DefaultDict(data.data))\n lines.append(formatted_text)\n\n combined_text = self.sep.join(lines)\n self.status = combined_text\n return Message(text=combined_text)\n\n def convert_to_string(self) -> Message:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n result = \"\"\n if isinstance(self.input_data, list):\n result = \"\\n\".join([safe_convert(item, clean_data=self.clean_data or False) for item in self.input_data])\n else:\n result = safe_convert(self.input_data or False)\n self.log(f\"Converted to string with length: {len(result)}\")\n\n message = Message(text=result)\n self.status = message\n return message\n" + }, + "input_data": { + "_input_type": "HandleInput", + "advanced": false, + "display_name": "Data or DataFrame", + "dynamic": false, + "info": "Accepts either a DataFrame or a Data object.", + "input_types": [ + "DataFrame", + "Data" + ], + "list": false, + "list_add_label": "Add More", + "name": "input_data", + "override_skip": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "other", + "value": "" + }, + "mode": { + "_input_type": "TabInput", + "advanced": false, + "display_name": "Mode", + "dynamic": false, + "info": "Convert into raw string instead of using a template.", + "name": "mode", + "options": [ + "Parser", + "Stringify" + ], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "tab", + "value": "Parser" + }, + "pattern": { + "_input_type": "MultilineInput", + "advanced": false, + "ai_enabled": false, + "copy_field": false, + "display_name": "Template", + "dynamic": true, + "info": "Use variables within curly brackets to extract column values for DataFrames or key values for Data.For example: `Name: {Name}, Age: {Age}, Country: {Country}`", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "multiline": true, + "name": "pattern", + "override_skip": false, + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "{text}" + }, + "sep": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "Separator", + "dynamic": false, + "info": "String used to separate rows/items.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "sep", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "\n" + } + }, + "tool_mode": false + }, + "showNode": false, + "type": "ParserComponent" + }, + "dragging": false, + "id": "ParserComponent-x6BS7", + "measured": { + "height": 52, + "width": 192 + }, + "position": { + "x": 1069.3164581772887, + "y": 1357.8654452412288 + }, + "selected": false, + "type": "genericNode" } ], "viewport": { - "x": 470.8619752966712, - "y": -133.63346875696777, - "zoom": 0.36224591795587874 + "x": 392.9736430078082, + "y": 12.763120500193224, + "zoom": 0.34590735780401993 } }, "description": "OpenRAG OpenSearch Nudges generator, based on the OpenSearch documents and the chat history.", "endpoint_name": null, "id": "ebc01d31-1976-46ce-a385-b0240327226c", "is_component": false, + "last_tested_version": "1.8.0", "locked": true, - "last_tested_version": "1.7.0.dev21", "name": "OpenRAG OpenSearch Nudges Flow", "tags": [ "assistants", "agents" ] -} +} \ No newline at end of file diff --git a/flows/openrag_url_mcp.json b/flows/openrag_url_mcp.json index e01cd2db9..d2235b2a5 100644 --- a/flows/openrag_url_mcp.json +++ b/flows/openrag_url_mcp.json @@ -173,122 +173,6 @@ "target": "ChatOutput-0XHyo", "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-0XHyoœ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}" }, - { - "animated": false, - "className": "", - "data": { - "sourceHandle": { - "dataType": "SecretInput", - "id": "SecretInput-lr9k6", - "name": "text", - "output_types": [ - "Message" - ] - }, - "targetHandle": { - "fieldName": "dynamic_connector_type", - "id": "AdvancedDynamicFormBuilder-ziCu4", - "inputTypes": [ - "Text", - "Message" - ], - "type": "str" - } - }, - "id": "xy-edge__SecretInput-lr9k6{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-lr9k6œ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-ziCu4{œfieldNameœ:œdynamic_connector_typeœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", - "selected": false, - "source": "SecretInput-lr9k6", - "sourceHandle": "{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-lr9k6œ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", - "target": "AdvancedDynamicFormBuilder-ziCu4", - "targetHandle": "{œfieldNameœ:œdynamic_connector_typeœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" - }, - { - "animated": false, - "className": "", - "data": { - "sourceHandle": { - "dataType": "SecretInput", - "id": "SecretInput-KYwsB", - "name": "text", - "output_types": [ - "Message" - ] - }, - "targetHandle": { - "fieldName": "dynamic_owner", - "id": "AdvancedDynamicFormBuilder-ziCu4", - "inputTypes": [ - "Text", - "Message" - ], - "type": "str" - } - }, - "id": "xy-edge__SecretInput-KYwsB{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-KYwsBœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-ziCu4{œfieldNameœ:œdynamic_ownerœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", - "selected": false, - "source": "SecretInput-KYwsB", - "sourceHandle": "{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-KYwsBœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", - "target": "AdvancedDynamicFormBuilder-ziCu4", - "targetHandle": "{œfieldNameœ:œdynamic_ownerœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" - }, - { - "animated": false, - "className": "", - "data": { - "sourceHandle": { - "dataType": "SecretInput", - "id": "SecretInput-pYHMH", - "name": "text", - "output_types": [ - "Message" - ] - }, - "targetHandle": { - "fieldName": "dynamic_owner_email", - "id": "AdvancedDynamicFormBuilder-ziCu4", - "inputTypes": [ - "Text", - "Message" - ], - "type": "str" - } - }, - "id": "xy-edge__SecretInput-pYHMH{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-pYHMHœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-ziCu4{œfieldNameœ:œdynamic_owner_emailœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", - "selected": false, - "source": "SecretInput-pYHMH", - "sourceHandle": "{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-pYHMHœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", - "target": "AdvancedDynamicFormBuilder-ziCu4", - "targetHandle": "{œfieldNameœ:œdynamic_owner_emailœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" - }, - { - "animated": false, - "className": "", - "data": { - "sourceHandle": { - "dataType": "SecretInput", - "id": "SecretInput-aoBVB", - "name": "text", - "output_types": [ - "Message" - ] - }, - "targetHandle": { - "fieldName": "dynamic_owner_name", - "id": "AdvancedDynamicFormBuilder-ziCu4", - "inputTypes": [ - "Text", - "Message" - ], - "type": "str" - } - }, - "id": "xy-edge__SecretInput-aoBVB{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-aoBVBœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-ziCu4{œfieldNameœ:œdynamic_owner_nameœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", - "selected": false, - "source": "SecretInput-aoBVB", - "sourceHandle": "{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-aoBVBœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", - "target": "AdvancedDynamicFormBuilder-ziCu4", - "targetHandle": "{œfieldNameœ:œdynamic_owner_nameœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" - }, { "animated": false, "className": "", @@ -514,6 +398,118 @@ "sourceHandle": "{œdataTypeœ:œParserComponentœ,œidœ:œParserComponent-1eim1œ,œnameœ:œparsed_textœ,œoutput_typesœ:[œMessageœ]}", "target": "DataFrameOperations-hqIoy", "targetHandle": "{œfieldNameœ:œnew_column_valueœ,œidœ:œDataFrameOperations-hqIoyœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" + }, + { + "animated": false, + "data": { + "sourceHandle": { + "dataType": "TextInput", + "id": "TextInput-tahMb", + "name": "text", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "dynamic_owner_name", + "id": "AdvancedDynamicFormBuilder-ziCu4", + "inputTypes": [ + "Text", + "Message" + ], + "type": "str" + } + }, + "id": "xy-edge__TextInput-tahMb{œdataTypeœ:œTextInputœ,œidœ:œTextInput-tahMbœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-ziCu4{œfieldNameœ:œdynamic_owner_nameœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", + "selected": false, + "source": "TextInput-tahMb", + "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-tahMbœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "target": "AdvancedDynamicFormBuilder-ziCu4", + "targetHandle": "{œfieldNameœ:œdynamic_owner_nameœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" + }, + { + "animated": false, + "data": { + "sourceHandle": { + "dataType": "TextInput", + "id": "TextInput-Rm9YX", + "name": "text", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "dynamic_owner_email", + "id": "AdvancedDynamicFormBuilder-ziCu4", + "inputTypes": [ + "Text", + "Message" + ], + "type": "str" + } + }, + "id": "xy-edge__TextInput-Rm9YX{œdataTypeœ:œTextInputœ,œidœ:œTextInput-Rm9YXœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-ziCu4{œfieldNameœ:œdynamic_owner_emailœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", + "selected": false, + "source": "TextInput-Rm9YX", + "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-Rm9YXœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "target": "AdvancedDynamicFormBuilder-ziCu4", + "targetHandle": "{œfieldNameœ:œdynamic_owner_emailœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" + }, + { + "animated": false, + "data": { + "sourceHandle": { + "dataType": "TextInput", + "id": "TextInput-G5CiZ", + "name": "text", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "dynamic_owner", + "id": "AdvancedDynamicFormBuilder-ziCu4", + "inputTypes": [ + "Text", + "Message" + ], + "type": "str" + } + }, + "id": "xy-edge__TextInput-G5CiZ{œdataTypeœ:œTextInputœ,œidœ:œTextInput-G5CiZœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-ziCu4{œfieldNameœ:œdynamic_ownerœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", + "selected": false, + "source": "TextInput-G5CiZ", + "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-G5CiZœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "target": "AdvancedDynamicFormBuilder-ziCu4", + "targetHandle": "{œfieldNameœ:œdynamic_ownerœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" + }, + { + "animated": false, + "data": { + "sourceHandle": { + "dataType": "TextInput", + "id": "TextInput-OPyEf", + "name": "text", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "dynamic_connector_type", + "id": "AdvancedDynamicFormBuilder-ziCu4", + "inputTypes": [ + "Text", + "Message" + ], + "type": "str" + } + }, + "id": "xy-edge__TextInput-OPyEf{œdataTypeœ:œTextInputœ,œidœ:œTextInput-OPyEfœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-ziCu4{œfieldNameœ:œdynamic_connector_typeœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", + "selected": false, + "source": "TextInput-OPyEf", + "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-OPyEfœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "target": "AdvancedDynamicFormBuilder-ziCu4", + "targetHandle": "{œfieldNameœ:œdynamic_connector_typeœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" } ], "nodes": [ @@ -531,27 +527,28 @@ "custom_fields": {}, "description": "Split text into chunks based on specified criteria.", "display_name": "Split Text", - "documentation": "https://docs.langflow.org/components-processing#split-text", - "edited": true, + "documentation": "https://docs.langflow.org/split-text", + "edited": false, "field_order": [ "data_inputs", "chunk_overlap", "chunk_size", "separator", "text_key", - "keep_separator" + "keep_separator", + "clean_output" ], "frozen": false, "icon": "scissors-line-dashed", "legacy": false, - "lf_version": "1.6.0", + "lf_version": "1.8.0", "metadata": { - "code_hash": "f2867efda61f", + "code_hash": "29ae597d2d86", "dependencies": { "dependencies": [ { "name": "langchain_text_splitters", - "version": "0.3.9" + "version": "0.3.11" }, { "name": "lfx", @@ -571,6 +568,7 @@ "display_name": "Chunks", "group_outputs": false, "hidden": null, + "loop_types": null, "method": "split_text", "name": "dataframe", "options": null, @@ -595,12 +593,14 @@ "list": false, "list_add_label": "Add More", "name": "chunk_overlap", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": 200 }, @@ -613,15 +613,37 @@ "list": false, "list_add_label": "Add More", "name": "chunk_size", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": 1000 }, + "clean_output": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Clean Output", + "dynamic": false, + "info": "When enabled, only the text column is included in the output. Metadata columns are removed.", + "list": false, + "list_add_label": "Add More", + "name": "clean_output", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": false + }, "code": { "advanced": true, "dynamic": true, @@ -638,7 +660,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from langchain_text_splitters import CharacterTextSplitter\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.io import DropdownInput, HandleInput, IntInput, MessageTextInput, Output\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.utils.util import unescape_string\n\n\nclass SplitTextComponent(Component):\n display_name: str = \"Split Text\"\n description: str = \"Split text into chunks based on specified criteria.\"\n documentation: str = \"https://docs.langflow.org/components-processing#split-text\"\n icon = \"scissors-line-dashed\"\n name = \"SplitText\"\n\n inputs = [\n HandleInput(\n name=\"data_inputs\",\n display_name=\"Input\",\n info=\"The data with texts to split in chunks.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n IntInput(\n name=\"chunk_overlap\",\n display_name=\"Chunk Overlap\",\n info=\"Number of characters to overlap between chunks.\",\n value=200,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n info=(\n \"The maximum length of each chunk. Text is first split by separator, \"\n \"then chunks are merged up to this size. \"\n \"Individual splits larger than this won't be further divided.\"\n ),\n value=1000,\n ),\n MessageTextInput(\n name=\"separator\",\n display_name=\"Separator\",\n info=(\n \"The character to split on. Use \\\\n for newline. \"\n \"Examples: \\\\n\\\\n for paragraphs, \\\\n for lines, . for sentences\"\n ),\n value=\"\\n\",\n ),\n MessageTextInput(\n name=\"text_key\",\n display_name=\"Text Key\",\n info=\"The key to use for the text column.\",\n value=\"text\",\n advanced=True,\n ),\n DropdownInput(\n name=\"keep_separator\",\n display_name=\"Keep Separator\",\n info=\"Whether to keep the separator in the output chunks and where to place it.\",\n options=[\"False\", \"True\", \"Start\", \"End\"],\n value=\"False\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Chunks\", name=\"dataframe\", method=\"split_text\"),\n ]\n\n def _docs_to_data(self, docs) -> list[Data]:\n return [Data(text=doc.page_content, data=doc.metadata) for doc in docs]\n\n def _fix_separator(self, separator: str) -> str:\n \"\"\"Fix common separator issues and convert to proper format.\"\"\"\n if separator == \"/n\":\n return \"\\n\"\n if separator == \"/t\":\n return \"\\t\"\n return separator\n\n def split_text_base(self):\n separator = self._fix_separator(self.separator)\n separator = unescape_string(separator)\n\n if isinstance(self.data_inputs, DataFrame):\n if not len(self.data_inputs):\n msg = \"DataFrame is empty\"\n raise TypeError(msg)\n\n self.data_inputs.text_key = self.text_key\n try:\n documents = self.data_inputs.to_lc_documents()\n except Exception as e:\n msg = f\"Error converting DataFrame to documents: {e}\"\n raise TypeError(msg) from e\n elif isinstance(self.data_inputs, Message):\n self.data_inputs = [self.data_inputs.to_data()]\n return self.split_text_base()\n else:\n if not self.data_inputs:\n msg = \"No data inputs provided\"\n raise TypeError(msg)\n\n documents = []\n if isinstance(self.data_inputs, Data):\n self.data_inputs.text_key = self.text_key\n documents = [self.data_inputs.to_lc_document()]\n else:\n try:\n documents = [input_.to_lc_document() for input_ in self.data_inputs if isinstance(input_, Data)]\n if not documents:\n msg = f\"No valid Data inputs found in {type(self.data_inputs)}\"\n raise TypeError(msg)\n except AttributeError as e:\n msg = f\"Invalid input type in collection: {e}\"\n raise TypeError(msg) from e\n try:\n # Convert string 'False'/'True' to boolean\n keep_sep = self.keep_separator\n if isinstance(keep_sep, str):\n if keep_sep.lower() == \"false\":\n keep_sep = False\n elif keep_sep.lower() == \"true\":\n keep_sep = True\n # 'start' and 'end' are kept as strings\n\n splitter = CharacterTextSplitter(\n chunk_overlap=self.chunk_overlap,\n chunk_size=self.chunk_size,\n separator=separator,\n keep_separator=keep_sep,\n )\n return splitter.split_documents(documents)\n except Exception as e:\n msg = f\"Error splitting text: {e}\"\n raise TypeError(msg) from e\n\n def split_text(self) -> DataFrame:\n return DataFrame(self._docs_to_data(self.split_text_base()))\n" + "value": "from langchain_text_splitters import CharacterTextSplitter\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MessageTextInput, Output\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.utils.util import unescape_string\n\n\nclass SplitTextComponent(Component):\n display_name: str = \"Split Text\"\n description: str = \"Split text into chunks based on specified criteria.\"\n documentation: str = \"https://docs.langflow.org/split-text\"\n icon = \"scissors-line-dashed\"\n name = \"SplitText\"\n\n inputs = [\n HandleInput(\n name=\"data_inputs\",\n display_name=\"Input\",\n info=\"The data with texts to split in chunks.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n IntInput(\n name=\"chunk_overlap\",\n display_name=\"Chunk Overlap\",\n info=\"Number of characters to overlap between chunks.\",\n value=200,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n info=(\n \"The maximum length of each chunk. Text is first split by separator, \"\n \"then chunks are merged up to this size. \"\n \"Individual splits larger than this won't be further divided.\"\n ),\n value=1000,\n ),\n MessageTextInput(\n name=\"separator\",\n display_name=\"Separator\",\n info=(\n \"The character to split on. Use \\\\n for newline. \"\n \"Examples: \\\\n\\\\n for paragraphs, \\\\n for lines, . for sentences\"\n ),\n value=\"\\n\",\n ),\n MessageTextInput(\n name=\"text_key\",\n display_name=\"Text Key\",\n info=\"The key to use for the text column.\",\n value=\"text\",\n advanced=True,\n ),\n DropdownInput(\n name=\"keep_separator\",\n display_name=\"Keep Separator\",\n info=\"Whether to keep the separator in the output chunks and where to place it.\",\n options=[\"False\", \"True\", \"Start\", \"End\"],\n value=\"False\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_output\",\n display_name=\"Clean Output\",\n info=\"When enabled, only the text column is included in the output. Metadata columns are removed.\",\n value=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Chunks\", name=\"dataframe\", method=\"split_text\"),\n ]\n\n def _docs_to_data(self, docs, *, clean: bool = False) -> list[Data]:\n return [\n Data(text=doc.page_content) if clean else Data(text=doc.page_content, data=doc.metadata) for doc in docs\n ]\n\n def _fix_separator(self, separator: str) -> str:\n \"\"\"Fix common separator issues and convert to proper format.\"\"\"\n if separator == \"/n\":\n return \"\\n\"\n if separator == \"/t\":\n return \"\\t\"\n return separator\n\n def split_text_base(self):\n separator = self._fix_separator(self.separator)\n separator = unescape_string(separator)\n\n if isinstance(self.data_inputs, DataFrame):\n if not len(self.data_inputs):\n msg = \"DataFrame is empty\"\n raise TypeError(msg)\n\n self.data_inputs.text_key = self.text_key\n try:\n documents = self.data_inputs.to_lc_documents()\n except Exception as e:\n msg = f\"Error converting DataFrame to documents: {e}\"\n raise TypeError(msg) from e\n elif isinstance(self.data_inputs, Message):\n self.data_inputs = [self.data_inputs.to_data()]\n return self.split_text_base()\n else:\n if not self.data_inputs:\n msg = \"No data inputs provided\"\n raise TypeError(msg)\n\n documents = []\n if isinstance(self.data_inputs, Data):\n self.data_inputs.text_key = self.text_key\n documents = [self.data_inputs.to_lc_document()]\n else:\n try:\n documents = [input_.to_lc_document() for input_ in self.data_inputs if isinstance(input_, Data)]\n if not documents:\n msg = f\"No valid Data inputs found in {type(self.data_inputs)}\"\n raise TypeError(msg)\n except AttributeError as e:\n msg = f\"Invalid input type in collection: {e}\"\n raise TypeError(msg) from e\n try:\n # Convert string 'False'/'True' to boolean\n keep_sep = self.keep_separator\n if isinstance(keep_sep, str):\n if keep_sep.lower() == \"false\":\n keep_sep = False\n elif keep_sep.lower() == \"true\":\n keep_sep = True\n # 'start' and 'end' are kept as strings\n\n splitter = CharacterTextSplitter(\n chunk_overlap=self.chunk_overlap,\n chunk_size=self.chunk_size,\n separator=separator,\n keep_separator=keep_sep,\n )\n return splitter.split_documents(documents)\n except Exception as e:\n msg = f\"Error splitting text: {e}\"\n raise TypeError(msg) from e\n\n def split_text(self) -> DataFrame:\n docs = self.split_text_base()\n df = DataFrame(self._docs_to_data(docs, clean=self.clean_output))\n return df if self.clean_output else df.smart_column_order()\n" }, "data_inputs": { "_input_type": "HandleInput", @@ -654,11 +676,13 @@ "list": false, "list_add_label": "Add More", "name": "data_inputs", + "override_skip": false, "placeholder": "", "required": true, "show": true, "title_case": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "other", "value": "" }, @@ -679,6 +703,7 @@ "End" ], "options_metadata": [], + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -686,6 +711,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "False" }, @@ -702,6 +728,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "separator", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -709,6 +736,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "\n" }, @@ -725,6 +753,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "text_key", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -732,6 +761,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "text" } @@ -749,8 +779,8 @@ "width": 320 }, "position": { - "x": 2299.485091096586, - "y": 1430.4506304359015 + "x": 2342.7091324997477, + "y": 1447.0585015368276 }, "positionAbsolute": { "x": 1683.4543896546102, @@ -791,7 +821,7 @@ "frozen": false, "icon": "layout-template", "legacy": false, - "lf_version": "1.7.0.dev21", + "lf_version": "1.8.0", "metadata": { "code_hash": "4c72ce0f2e34", "dependencies": { @@ -994,12 +1024,14 @@ { "description": "Header name", "display_name": "Header", + "formatter": "text", "name": "key", "type": "str" }, { "description": "Header value", "display_name": "Value", + "formatter": "text", "name": "value", "type": "str" } @@ -1132,7 +1164,7 @@ "dragging": false, "id": "URLComponent-lnA0q", "measured": { - "height": 292, + "height": 295, "width": 320 }, "position": { @@ -1176,6 +1208,7 @@ "icon": "table", "last_updated": "2025-12-12T20:28:32.647Z", "legacy": false, + "lf_version": "1.8.0", "metadata": { "code_hash": "904f4eaebccd", "dependencies": { @@ -1582,12 +1615,12 @@ "dragging": false, "id": "DataFrameOperations-hqIoy", "measured": { - "height": 399, + "height": 401, "width": 320 }, "position": { - "x": 1578.348410746631, - "y": 1137.0951737512514 + "x": 1571.6992954806653, + "y": 1440.2579105342722 }, "selected": false, "type": "genericNode" @@ -1626,6 +1659,7 @@ "icon": "table", "last_updated": "2025-12-12T20:28:32.648Z", "legacy": false, + "lf_version": "1.8.0", "metadata": { "code_hash": "904f4eaebccd", "dependencies": { @@ -2032,12 +2066,12 @@ "dragging": false, "id": "DataFrameOperations-A98BL", "measured": { - "height": 399, + "height": 401, "width": 320 }, "position": { - "x": 1946.8185577395595, - "y": 1432.2126327108165 + "x": 1948.5216514348, + "y": 1447.7742340282566 }, "selected": false, "type": "genericNode" @@ -2076,6 +2110,7 @@ "icon": "table", "last_updated": "2025-12-12T20:28:32.648Z", "legacy": false, + "lf_version": "1.8.0", "metadata": { "code_hash": "904f4eaebccd", "dependencies": { @@ -2482,12 +2517,12 @@ "dragging": false, "id": "DataFrameOperations-RhKoe", "measured": { - "height": 317, + "height": 319, "width": 320 }, "position": { - "x": 3227.5026887437266, - "y": 1418.705251721416 + "x": 3160.147225163303, + "y": 1517.6296872526634 }, "selected": false, "type": "genericNode" @@ -2520,7 +2555,7 @@ "frozen": false, "icon": "MessagesSquare", "legacy": false, - "lf_version": "1.7.0.dev21", + "lf_version": "1.8.0", "metadata": { "code_hash": "7a26c54d89ed", "dependencies": { @@ -2780,7 +2815,7 @@ "dragging": false, "id": "ChatInput-sskrk", "measured": { - "height": 204, + "height": 207, "width": 320 }, "position": { @@ -2819,8 +2854,9 @@ "frozen": false, "icon": "MessagesSquare", "legacy": false, + "lf_version": "1.8.0", "metadata": { - "code_hash": "cae45e2d53f6", + "code_hash": "8c87e536cca4", "dependencies": { "dependencies": [ { @@ -2829,7 +2865,7 @@ }, { "name": "fastapi", - "version": "0.120.0" + "version": "0.133.1" }, { "name": "lfx", @@ -2900,7 +2936,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/chat-input-and-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n advanced=True,\n info=\"Whether to clean data before converting to string.\",\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, _, display_name, source_id = self.get_properties_from_source_component()\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message) and not self.is_connected_to_chat_input():\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id or self.graph.session_id or \"\"\n message.context_id = self.context_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n\n # Store message if needed\n if message.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n clean_data: bool = getattr(self, \"clean_data\", False)\n return \"\\n\".join([safe_convert(item, clean_data=clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" + "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/chat-input-and-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n advanced=True,\n info=\"Whether to clean data before converting to string.\",\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, _, display_name, source_id = self.get_properties_from_source_component()\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message) and not self.is_connected_to_chat_input():\n message = self.input_value\n # Update message properties\n message.text = text\n # Preserve existing session_id from the incoming message if it exists\n existing_session_id = message.session_id\n else:\n message = Message(text=text)\n existing_session_id = None\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n # Preserve session_id from incoming message, or use component/graph session_id\n message.session_id = (\n self.session_id or existing_session_id or (self.graph.session_id if hasattr(self, \"graph\") else None) or \"\"\n )\n message.context_id = self.context_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n\n # Store message if needed\n if message.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n clean_data: bool = getattr(self, \"clean_data\", False)\n return \"\\n\".join([safe_convert(item, clean_data=clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" }, "context_id": { "_input_type": "MessageTextInput", @@ -3082,12 +3118,12 @@ "dragging": false, "id": "ChatOutput-0XHyo", "measured": { - "height": 48, + "height": 52, "width": 192 }, "position": { - "x": 3823.513108139744, - "y": 1571.0417919511297 + "x": 3588.211427790188, + "y": 1759.283136230774 }, "selected": false, "type": "genericNode" @@ -3107,14 +3143,12 @@ "description": "Generate embeddings using a specified provider.", "display_name": "Embedding Model", "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": true, + "edited": false, "field_order": [ - "provider", - "api_base", - "ollama_base_url", - "base_url_ibm_watsonx", "model", "api_key", + "api_base", + "base_url_ibm_watsonx", "project_id", "dimensions", "chunk_size", @@ -3123,47 +3157,27 @@ "show_progress_bar", "model_kwargs", "truncate_input_tokens", - "input_text", - "fail_safe_mode" + "input_text" ], "frozen": false, "icon": "binary", - "last_updated": "2025-12-12T20:28:32.527Z", + "last_updated": "2026-02-27T18:48:50.338Z", "legacy": false, + "lf_version": "1.8.0", "metadata": { - "code_hash": "0e2d6fe67a26", + "code_hash": "c5ce0982da48", "dependencies": { "dependencies": [ - { - "name": "requests", - "version": "2.32.5" - }, - { - "name": "ibm_watsonx_ai", - "version": "1.4.2" - }, - { - "name": "langchain_openai", - "version": "0.3.23" - }, { "name": "lfx", - "version": "0.2.0.dev21" - }, - { - "name": "langchain_ollama", - "version": "0.3.10" - }, - { - "name": "langchain_community", - "version": "0.3.21" + "version": null }, { - "name": "langchain_ibm", - "version": "0.3.19" + "name": "langchain_core", + "version": "0.3.83" } ], - "total_dependencies": 7 + "total_dependencies": 2 }, "module": "custom_components.embedding_model" }, @@ -3195,12 +3209,12 @@ "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" }, "_frontend_node_folder_id": { - "value": "2bee9dd9-f030-469f-a568-6fcb3a6e7140" + "value": "7a54b33c-ad03-4c78-b61b-f95adbb74e17" }, "_type": "Component", "api_base": { "_input_type": "MessageTextInput", - "advanced": false, + "advanced": true, "display_name": "API Base URL", "dynamic": false, "info": "Base URL for the API. Leave empty for default.", @@ -3209,7 +3223,7 @@ ], "list": false, "list_add_label": "Add More", - "load_from_db": true, + "load_from_db": false, "name": "api_base", "override_skip": false, "placeholder": "", @@ -3221,27 +3235,27 @@ "trace_as_metadata": true, "track_in_telemetry": false, "type": "str", - "value": "OLLAMA_BASE_URL" + "value": "" }, "api_key": { "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "API Key (Optional)", + "advanced": true, + "display_name": "API Key", "dynamic": false, "info": "Model Provider API key", "input_types": [], - "load_from_db": true, + "load_from_db": false, "name": "api_key", "override_skip": false, "password": true, "placeholder": "", "real_time_refresh": true, "required": false, - "show": false, + "show": true, "title_case": false, "track_in_telemetry": false, "type": "str", - "value": "OPENAI_API_KEY" + "value": "" }, "base_url_ibm_watsonx": { "_input_type": "DropdownInput", @@ -3311,7 +3325,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"fail_safe_mode\",\n display_name=\"Fail-Safe Mode\",\n value=False,\n advanced=True,\n info=\"When enabled, errors will be logged instead of raising exceptions. \"\n \"The component will return None on error.\",\n real_time_refresh=True,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n async def fetch_ollama_models(self) -> list[str]:\n try:\n return await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n except Exception: # noqa: BLE001\n\n logger.exception(\"Error fetching models\")\n return []\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize OpenAI embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n try:\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await self.fetch_ollama_models()\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize Ollama embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to authenticate with IBM watsonx.ai: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n msg = f\"Unknown provider: {provider}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n # Handle fail_safe_mode changes first - set all required fields to False if enabled\n if field_name == \"fail_safe_mode\":\n if field_value: # If fail_safe_mode is enabled\n build_config[\"api_key\"][\"required\"] = False\n elif hasattr(self, \"provider\"):\n # If fail_safe_mode is disabled, restore required flags based on provider\n if self.provider in [\"OpenAI\", \"IBM watsonx.ai\"]:\n build_config[\"api_key\"][\"required\"] = True\n else: # Ollama\n build_config[\"api_key\"][\"required\"] = False\n\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.unified_models import (\n get_api_key_for_provider,\n get_embedding_class,\n get_embedding_model_options,\n get_unified_models_detailed,\n update_model_options_in_build_config,\n)\nfrom lfx.base.models.watsonx_constants import IBM_WATSONX_URLS\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n ModelInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n \"\"\"Dynamically update build config with user-filtered model options.\"\"\"\n # Update model options\n build_config = update_model_options_in_build_config(\n component=self,\n build_config=build_config,\n cache_key_prefix=\"embedding_model_options\",\n get_options_func=get_embedding_model_options,\n field_name=field_name,\n field_value=field_value,\n )\n\n # Show/hide provider-specific fields based on selected model\n if field_name == \"model\" and isinstance(field_value, list) and len(field_value) > 0:\n selected_model = field_value[0]\n provider = selected_model.get(\"provider\", \"\")\n\n # Show/hide watsonx fields\n is_watsonx = provider == \"IBM WatsonX\"\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = is_watsonx\n build_config[\"project_id\"][\"show\"] = is_watsonx\n build_config[\"truncate_input_tokens\"][\"show\"] = is_watsonx\n build_config[\"input_text\"][\"show\"] = is_watsonx\n if is_watsonx:\n build_config[\"base_url_ibm_watsonx\"][\"required\"] = True\n build_config[\"project_id\"][\"required\"] = True\n\n return build_config\n\n inputs = [\n ModelInput(\n name=\"model\",\n display_name=\"Embedding Model\",\n info=\"Select your model provider\",\n real_time_refresh=True,\n required=True,\n model_type=\"embedding\",\n input_types=[\"Embeddings\"], # Override default to accept Embeddings instead of LanguageModel\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"Model Provider API key\",\n real_time_refresh=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n # Watson-specific inputs\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n advanced=True,\n value=1000,\n ),\n FloatInput(\n name=\"request_timeout\",\n display_name=\"Request Timeout\",\n advanced=True,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n advanced=True,\n value=3,\n ),\n BoolInput(\n name=\"show_progress_bar\",\n display_name=\"Show Progress Bar\",\n advanced=True,\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n \"\"\"Build and return an embeddings instance based on the selected model.\n\n Returns an EmbeddingsWithModels wrapper that contains:\n - The primary embedding instance (for the selected model)\n - available_models dict mapping all available model names to their instances\n \"\"\"\n # If an Embeddings object is directly connected, return it\n try:\n from langchain_core.embeddings import Embeddings as BaseEmbeddings\n\n if isinstance(self.model, BaseEmbeddings):\n return self.model\n except ImportError:\n pass\n\n # Safely extract model configuration\n if not self.model or not isinstance(self.model, list):\n msg = \"Model must be a non-empty list\"\n raise ValueError(msg)\n\n model = self.model[0]\n model_name = model.get(\"name\")\n provider = model.get(\"provider\")\n metadata = model.get(\"metadata\", {})\n\n # Get API key from user input or global variables\n api_key = get_api_key_for_provider(self.user_id, provider, self.api_key)\n\n # Validate required fields (Ollama doesn't require API key)\n if not api_key and provider != \"Ollama\":\n msg = (\n f\"{provider} API key is required. \"\n f\"Please provide it in the component or configure it globally as \"\n f\"{provider.upper().replace(' ', '_')}_API_KEY.\"\n )\n raise ValueError(msg)\n\n if not model_name:\n msg = \"Model name is required\"\n raise ValueError(msg)\n\n # Get embedding class\n embedding_class_name = metadata.get(\"embedding_class\")\n if not embedding_class_name:\n msg = f\"No embedding class defined in metadata for {model_name}\"\n raise ValueError(msg)\n\n embedding_class = get_embedding_class(embedding_class_name)\n\n # Build kwargs using parameter mapping for primary instance\n kwargs = self._build_kwargs(model, metadata)\n primary_instance = embedding_class(**kwargs)\n\n # Get all available embedding models for this provider\n available_models_dict = self._build_available_models(\n provider=provider,\n embedding_class=embedding_class,\n metadata=metadata,\n api_key=api_key,\n )\n\n # Wrap with EmbeddingsWithModels to provide available_models metadata\n return EmbeddingsWithModels(\n embeddings=primary_instance,\n available_models=available_models_dict,\n )\n\n def _build_available_models(\n self,\n provider: str,\n embedding_class: type,\n metadata: dict[str, Any],\n api_key: str | None,\n ) -> dict[str, Embeddings]:\n \"\"\"Build a dictionary of all available embedding model instances for the provider.\n\n Args:\n provider: The provider name (e.g., \"OpenAI\", \"Ollama\")\n embedding_class: The embedding class to instantiate\n metadata: Metadata containing param_mapping\n api_key: The API key for the provider\n\n Returns:\n Dict mapping model names to their embedding instances\n \"\"\"\n available_models_dict: dict[str, Embeddings] = {}\n\n # Get all embedding models for this provider from unified models\n all_embedding_models = get_unified_models_detailed(\n providers=[provider],\n model_type=\"embeddings\",\n include_deprecated=False,\n include_unsupported=False,\n )\n\n if not all_embedding_models:\n return available_models_dict\n\n # Extract models from the provider data\n for provider_data in all_embedding_models:\n if provider_data.get(\"provider\") != provider:\n continue\n\n for model_data in provider_data.get(\"models\", []):\n model_name = model_data.get(\"model_name\")\n if not model_name:\n continue\n\n # Create a model dict compatible with _build_kwargs\n model_dict = {\n \"name\": model_name,\n \"provider\": provider,\n \"metadata\": metadata, # Reuse the same metadata/param_mapping\n }\n\n try:\n # Build kwargs for this model\n model_kwargs = self._build_kwargs_for_model(model_dict, metadata, api_key)\n # Create the embedding instance\n available_models_dict[model_name] = embedding_class(**model_kwargs)\n except Exception: # noqa: BLE001\n # Skip models that fail to instantiate\n # This handles cases where specific models have incompatible parameters\n logger.debug(\"Failed to instantiate embedding model %s: skipping\", model_name, exc_info=True)\n continue\n\n return available_models_dict\n\n def _build_kwargs_for_model(\n self,\n model: dict[str, Any],\n metadata: dict[str, Any],\n api_key: str | None,\n ) -> dict[str, Any]:\n \"\"\"Build kwargs dictionary for a specific model using parameter mapping.\n\n This is similar to _build_kwargs but uses the provided api_key directly\n instead of looking it up again.\n\n Args:\n model: Model dict with name and provider\n metadata: Metadata containing param_mapping\n api_key: The API key to use\n\n Returns:\n kwargs dict for embedding class instantiation\n \"\"\"\n param_mapping = metadata.get(\"param_mapping\", {})\n if not param_mapping:\n msg = \"Parameter mapping not found in metadata\"\n raise ValueError(msg)\n\n kwargs = {}\n provider = model.get(\"provider\")\n\n # Required parameters - handle both \"model\" and \"model_id\" (for watsonx)\n if \"model\" in param_mapping:\n kwargs[param_mapping[\"model\"]] = model.get(\"name\")\n elif \"model_id\" in param_mapping:\n kwargs[param_mapping[\"model_id\"]] = model.get(\"name\")\n\n # Add API key if mapped\n if \"api_key\" in param_mapping and api_key:\n kwargs[param_mapping[\"api_key\"]] = api_key\n\n # Optional parameters with their values\n optional_params = {\n \"api_base\": self.api_base if self.api_base else None,\n \"dimensions\": int(self.dimensions) if self.dimensions else None,\n \"chunk_size\": int(self.chunk_size) if self.chunk_size else None,\n \"request_timeout\": float(self.request_timeout) if self.request_timeout else None,\n \"max_retries\": int(self.max_retries) if self.max_retries else None,\n \"show_progress_bar\": self.show_progress_bar if hasattr(self, \"show_progress_bar\") else None,\n \"model_kwargs\": self.model_kwargs if self.model_kwargs else None,\n }\n\n # Watson-specific parameters\n if provider in {\"IBM WatsonX\", \"IBM watsonx.ai\"}:\n # Map base_url_ibm_watsonx to \"url\" parameter for watsonx\n if \"url\" in param_mapping:\n url_value = (\n self.base_url_ibm_watsonx\n if hasattr(self, \"base_url_ibm_watsonx\") and self.base_url_ibm_watsonx\n else \"https://us-south.ml.cloud.ibm.com\"\n )\n kwargs[param_mapping[\"url\"]] = url_value\n # Map project_id for watsonx\n if hasattr(self, \"project_id\") and self.project_id and \"project_id\" in param_mapping:\n kwargs[param_mapping[\"project_id\"]] = self.project_id\n\n # Ollama-specific parameters\n if provider == \"Ollama\" and \"base_url\" in param_mapping:\n # Map api_base to \"base_url\" parameter for Ollama\n base_url_value = self.api_base if hasattr(self, \"api_base\") and self.api_base else \"http://localhost:11434\"\n kwargs[param_mapping[\"base_url\"]] = base_url_value\n\n # Add optional parameters if they have values and are mapped\n for param_name, param_value in optional_params.items():\n if param_value is not None and param_name in param_mapping:\n # Special handling for request_timeout with Google provider\n if param_name == \"request_timeout\":\n if provider == \"Google Generative AI\" and isinstance(param_value, (int, float)):\n kwargs[param_mapping[param_name]] = {\"timeout\": param_value}\n else:\n kwargs[param_mapping[param_name]] = param_value\n else:\n kwargs[param_mapping[param_name]] = param_value\n\n return kwargs\n\n def _build_kwargs(self, model: dict[str, Any], metadata: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Build kwargs dictionary using parameter mapping.\"\"\"\n param_mapping = metadata.get(\"param_mapping\", {})\n if not param_mapping:\n msg = \"Parameter mapping not found in metadata\"\n raise ValueError(msg)\n\n kwargs = {}\n\n # Required parameters - handle both \"model\" and \"model_id\" (for watsonx)\n if \"model\" in param_mapping:\n kwargs[param_mapping[\"model\"]] = model.get(\"name\")\n elif \"model_id\" in param_mapping:\n kwargs[param_mapping[\"model_id\"]] = model.get(\"name\")\n if \"api_key\" in param_mapping:\n kwargs[param_mapping[\"api_key\"]] = get_api_key_for_provider(\n self.user_id,\n model.get(\"provider\"),\n self.api_key,\n )\n\n # Optional parameters with their values\n provider = model.get(\"provider\")\n optional_params = {\n \"api_base\": self.api_base if self.api_base else None,\n \"dimensions\": int(self.dimensions) if self.dimensions else None,\n \"chunk_size\": int(self.chunk_size) if self.chunk_size else None,\n \"request_timeout\": float(self.request_timeout) if self.request_timeout else None,\n \"max_retries\": int(self.max_retries) if self.max_retries else None,\n \"show_progress_bar\": self.show_progress_bar if hasattr(self, \"show_progress_bar\") else None,\n \"model_kwargs\": self.model_kwargs if self.model_kwargs else None,\n }\n\n # Watson-specific parameters\n if provider in {\"IBM WatsonX\", \"IBM watsonx.ai\"}:\n # Map base_url_ibm_watsonx to \"url\" parameter for watsonx\n if \"url\" in param_mapping:\n url_value = (\n self.base_url_ibm_watsonx\n if hasattr(self, \"base_url_ibm_watsonx\") and self.base_url_ibm_watsonx\n else \"https://us-south.ml.cloud.ibm.com\"\n )\n kwargs[param_mapping[\"url\"]] = url_value\n # Map project_id for watsonx\n if hasattr(self, \"project_id\") and self.project_id and \"project_id\" in param_mapping:\n kwargs[param_mapping[\"project_id\"]] = self.project_id\n\n # Ollama-specific parameters\n if provider == \"Ollama\" and \"base_url\" in param_mapping:\n # Map api_base to \"base_url\" parameter for Ollama\n base_url_value = self.api_base if hasattr(self, \"api_base\") and self.api_base else \"http://localhost:11434\"\n kwargs[param_mapping[\"base_url\"]] = base_url_value\n\n # Add optional parameters if they have values and are mapped\n for param_name, param_value in optional_params.items():\n if param_value is not None and param_name in param_mapping:\n # Special handling for request_timeout with Google provider\n if param_name == \"request_timeout\":\n if provider == \"Google Generative AI\" and isinstance(param_value, (int, float)):\n kwargs[param_mapping[param_name]] = {\"timeout\": param_value}\n else:\n kwargs[param_mapping[param_name]] = param_value\n else:\n kwargs[param_mapping[param_name]] = param_value\n\n return kwargs\n" }, "dimensions": { "_input_type": "IntInput", @@ -3321,6 +3335,7 @@ "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", "list": false, "list_add_label": "Add More", + "load_from_db": false, "name": "dimensions", "override_skip": false, "placeholder": "", @@ -3333,27 +3348,6 @@ "type": "int", "value": "" }, - "fail_safe_mode": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Fail-Safe Mode", - "dynamic": false, - "info": "When enabled, errors will be logged instead of raising exceptions. The component will return None on error.", - "list": false, - "list_add_label": "Add More", - "name": "fail_safe_mode", - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "bool", - "value": true - }, "input_text": { "_input_type": "BoolInput", "advanced": true, @@ -3374,7 +3368,7 @@ "type": "bool", "value": true }, - "is_refresh": false, + "is_refresh": true, "max_retries": { "_input_type": "IntInput", "advanced": true, @@ -3396,34 +3390,148 @@ "value": 3 }, "model": { - "_input_type": "DropdownInput", + "_input_type": "ModelInput", "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Name", + "display_name": "Embedding Model", "dynamic": false, - "external_options": {}, - "info": "Select the embedding model to use", + "external_options": { + "fields": { + "data": { + "node": { + "display_name": "Connect other models", + "icon": "CornerDownLeft", + "name": "connect_other_models" + } + } + } + }, + "info": "Select your model provider", + "input_types": [ + "Embeddings" + ], + "list": false, + "list_add_label": "Add More", + "model_type": "embedding", "name": "model", "options": [ - "embeddinggemma:latest", - "mxbai-embed-large:latest", - "nomic-embed-text:latest" + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-small", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-large", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-ada-002", + "provider": "OpenAI" + }, + { + "category": "Google Generative AI", + "icon": "GoogleGenerativeAI", + "metadata": { + "is_disabled_provider": true, + "variable_name": null + }, + "name": "__enable_provider_Google Generative AI__", + "provider": "Google Generative AI" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "is_disabled_provider": true, + "variable_name": null + }, + "name": "__enable_provider_IBM WatsonX__", + "provider": "IBM WatsonX" + } ], - "options_metadata": [], "override_skip": false, - "placeholder": "", + "placeholder": "Setup Provider", "real_time_refresh": true, "refresh_button": true, - "required": false, + "required": true, "show": true, "title_case": false, - "toggle": false, "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "embeddinggemma:latest" + "trace_as_input": true, + "track_in_telemetry": false, + "type": "model", + "value": [ + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-small", + "provider": "OpenAI" + } + ] }, "model_kwargs": { "_input_type": "DictInput", @@ -3445,32 +3553,6 @@ "type": "dict", "value": {} }, - "ollama_base_url": { - "_input_type": "MessageTextInput", - "advanced": false, - "display_name": "Ollama API URL", - "dynamic": false, - "info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": true, - "name": "ollama_base_url", - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "OLLAMA_BASE_URL" - }, "project_id": { "_input_type": "MessageTextInput", "advanced": false, @@ -3496,45 +3578,6 @@ "type": "str", "value": "" }, - "provider": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Provider", - "dynamic": false, - "external_options": {}, - "info": "Select the embedding model provider", - "name": "provider", - "options": [ - "OpenAI", - "Ollama", - "IBM watsonx.ai" - ], - "options_metadata": [ - { - "icon": "OpenAI" - }, - { - "icon": "Ollama" - }, - { - "icon": "WatsonxAI" - } - ], - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "Ollama" - }, "request_timeout": { "_input_type": "FloatInput", "advanced": true, @@ -3604,32 +3647,33 @@ "dragging": false, "id": "EmbeddingModel-XjV5v", "measured": { - "height": 451, + "height": 207, "width": 320 }, "position": { "x": 2066.3681917820168, - "y": 2053.0594731518368 + "y": 2060.330493367782 }, "selected": false, "type": "genericNode" }, { "data": { + "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search. To search use the tools search_documents and raw_search. Search documents takes a query for vector search, for example\n {search_query: \"components in openrag\"}", + "display_name": "OpenSearch (Multi-Model Multi-Embedding)", "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGV", "node": { "base_classes": [ "Data", - "DataFrame", "VectorStore" ], "beta": false, "conditional_paths": [], "custom_fields": {}, - "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.", + "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search. To search use the tools search_documents and raw_search. Search documents takes a query for vector search, for example\n {search_query: \"components in openrag\"}", "display_name": "OpenSearch (Multi-Model Multi-Embedding)", "documentation": "", - "edited": true, + "edited": false, "field_order": [ "docs_metadata", "opensearch_url", @@ -3654,14 +3698,17 @@ "jwt_header", "bearer_prefix", "use_ssl", - "verify_certs" + "verify_certs", + "request_timeout", + "max_retries" ], "frozen": false, "icon": "OpenSearch", - "last_updated": "2025-11-26T05:27:07.589Z", + "last_updated": "2026-02-27T18:47:41.579Z", "legacy": false, + "lf_version": "1.8.0", "metadata": { - "code_hash": "000397b17863", + "code_hash": "6a3df45b55c5", "dependencies": { "dependencies": [ { @@ -3670,10 +3717,14 @@ }, { "name": "lfx", - "version": "0.2.0.dev21" + "version": null + }, + { + "name": "tenacity", + "version": "8.5.0" } ], - "total_dependencies": 2 + "total_dependencies": 3 }, "module": "custom_components.opensearch_multimodel_multiembedding" }, @@ -3701,18 +3752,18 @@ { "allows_loop": false, "cache": true, - "display_name": "DataFrame", + "display_name": "Raw Search", "group_outputs": false, "hidden": null, "loop_types": null, - "method": "as_dataframe", - "name": "dataframe", + "method": "raw_search", + "name": "raw_search", "options": null, "required_inputs": null, - "selected": "DataFrame", + "selected": "Data", "tool_mode": true, "types": [ - "DataFrame" + "Data" ], "value": "__UNDEFINED__" }, @@ -3741,7 +3792,7 @@ "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" }, "_frontend_node_folder_id": { - "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" + "value": "2bef1fdd-4d60-4bb6-8fd2-c0a3eae09d1e" }, "_type": "Component", "auth_mode": { @@ -3809,7 +3860,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\n\nfrom lfx.io import (\n BoolInput,\n DropdownInput,\n HandleInput,\n IntInput,\n MultilineInput,\n Output,\n SecretStrInput,\n StrInput,\n TableInput,\n)\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search. \"\n \"To search use the tools search_documents and raw_search. \"\n \"Search documents takes a query for vector search, for example\\n\"\n ' {search_query: \"components in openrag\"}'\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n # DictInput(name=\"query\", display_name=\"Query\", input_types=[\"Data\"], is_list=False, tool_mode=True),\n ]\n outputs = [\n Output(\n display_name=\"Search Results\",\n name=\"search_results\",\n method=\"search_documents\",\n ),\n Output(display_name=\"Raw Search\", name=\"raw_search\", method=\"raw_search\"),\n ]\n\n def raw_search(self, query: str | None = None) -> Data:\n \"\"\"Execute a raw OpenSearch query against the target index.\n\n Args:\n query (dict[str, Any]): The OpenSearch query DSL dictionary.\n\n Returns:\n Data: Search results as a Data object.\n\n Raises:\n ValueError: If 'query' is not a valid OpenSearch query (must be a non-empty dict).\n \"\"\"\n raw_query = query if query is not None else self.search_query\n if isinstance(raw_query, str):\n raw_query = json.loads(raw_query)\n client = self.build_client()\n logger.info(f\"query: {raw_query}\")\n resp = client.search(\n index=self.index_name,\n body=raw_query,\n params={\"terminate_after\": 0},\n )\n # Remove any _source keys whose value is a list of floats (embedding vectors)\n # Minimum length threshold to identify embedding vectors\n min_vector_length = 100\n\n def is_vector(val):\n # Accepts if it's a list of numbers (float or int) and has reasonable vector length\n return (\n isinstance(val, list) and len(val) > min_vector_length and all(isinstance(x, (float, int)) for x in val)\n )\n\n if \"hits\" in resp and \"hits\" in resp[\"hits\"]:\n for hit in resp[\"hits\"][\"hits\"]:\n source = hit.get(\"_source\")\n if isinstance(source, dict):\n keys_to_remove = [k for k, v in source.items() if is_vector(v)]\n for k in keys_to_remove:\n source.pop(k)\n logger.info(f\"Raw search response (all embedding vectors removed): {resp}\")\n return Data(**resp)\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Note: Some OpenSearch versions/configurations have issues with dynamically adding\n knn_vector mappings (NullPointerException). This method checks if the field\n already exists before attempting to add it, and gracefully skips if the field\n is already properly configured.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n # First, check if the field already exists and is properly mapped\n properties = self._get_index_properties(client)\n if self._is_knn_vector_field(properties, field_name):\n # Field already exists as knn_vector - verify dimensions match\n existing_dim = self._get_field_dimension(properties, field_name)\n if existing_dim is not None and existing_dim != dim:\n logger.warning(\n f\"Field '{field_name}' exists with dimension {existing_dim}, \"\n f\"but current embedding has dimension {dim}. Using existing mapping.\"\n )\n else:\n logger.info(\n f\"[OpenSearchMultimodal] Field '{field_name}' already exists\"\n f\"as knn_vector with matching dimensions - skipping mapping update\"\n )\n return\n\n # Field doesn't exist, try to add the mapping\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n # Check if this is the known OpenSearch k-NN NullPointerException issue\n error_str = str(e).lower()\n if \"null\" in error_str or \"nullpointerexception\" in error_str:\n logger.warning(\n f\"[OpenSearchMultimodal] Could not add embedding field mapping for {field_name}\"\n f\"due to OpenSearch k-NN plugin issue: {e}. \"\n f\"This is a known issue with some OpenSearch versions. \"\n f\"[OpenSearchMultimodal] Skipping mapping update. \"\n f\"Please ensure the index has the correct mapping for KNN search to work.\"\n )\n # Skip and continue - ingestion will proceed, but KNN search may fail if mapping doesn't exist\n return\n logger.warning(f\"[OpenSearchMultimodal] Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n # Verify the field was added correctly\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.error(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n logger.debug(f\"[OpenSearchMultimodal] Bulk ingesting embeddings for {index_name}\")\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n\n # Normalize ACL fields that may arrive as JSON strings from flows\n for key in (\"allowed_users\", \"allowed_groups\"):\n value = metadata.get(key)\n if isinstance(value, str):\n try:\n parsed = json.loads(value)\n if isinstance(parsed, list):\n metadata[key] = parsed\n except (json.JSONDecodeError, TypeError):\n # Leave value as-is if it isn't valid JSON\n pass\n\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n logger.debug(\"[OpenSearchMultimodal] Building OpenSearch client\")\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n\n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"[OpenSearchMultimodal] Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"[OpenSearchMultimodal] Starting ingestion mode...\")\n\n logger.debug(f\"[OpenSearchMultimodal] Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[OpenSearchMultimodal][INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n logger.debug(\n f\"[OpenSearchMultimodal][INGESTION] ingest_data type: \"\n f\"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\"\n )\n logger.debug(\n f\"[OpenSearchMultimodal][INGESTION] ingest_data content: \"\n f\"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\"\n )\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[OpenSearchMultimodal][INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"[OpenSearchMultimodal][INGESTION] Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings with rate-limit-aware retry logic using tenacity\n from tenacity import (\n retry,\n retry_if_exception,\n stop_after_attempt,\n wait_exponential,\n )\n\n def is_rate_limit_error(exception: Exception) -> bool:\n \"\"\"Check if exception is a rate limit error (429).\"\"\"\n error_str = str(exception).lower()\n return \"429\" in error_str or \"rate_limit\" in error_str or \"rate limit\" in error_str\n\n def is_other_retryable_error(exception: Exception) -> bool:\n \"\"\"Check if exception is retryable but not a rate limit error.\"\"\"\n # Retry on most exceptions except for specific non-retryable ones\n # Add other non-retryable exceptions here if needed\n return not is_rate_limit_error(exception)\n\n # Create retry decorator for rate limit errors (longer backoff)\n retry_on_rate_limit = retry(\n retry=retry_if_exception(is_rate_limit_error),\n stop=stop_after_attempt(5),\n wait=wait_exponential(multiplier=2, min=2, max=30),\n reraise=True,\n before_sleep=lambda retry_state: logger.warning(\n f\"Rate limit hit for chunk (attempt {retry_state.attempt_number}/5), \"\n f\"backing off for {retry_state.next_action.sleep:.1f}s\"\n ),\n )\n\n # Create retry decorator for other errors (shorter backoff)\n retry_on_other_errors = retry(\n retry=retry_if_exception(is_other_retryable_error),\n stop=stop_after_attempt(3),\n wait=wait_exponential(multiplier=1, min=1, max=8),\n reraise=True,\n before_sleep=lambda retry_state: logger.warning(\n f\"Error embedding chunk (attempt {retry_state.attempt_number}/3), \"\n f\"retrying in {retry_state.next_action.sleep:.1f}s: {retry_state.outcome.exception()}\"\n ),\n )\n\n def embed_chunk_with_retry(chunk_text: str, chunk_idx: int) -> list[float]:\n \"\"\"Embed a single chunk with rate-limit-aware retry logic.\"\"\"\n\n @retry_on_rate_limit\n @retry_on_other_errors\n def _embed(text: str) -> list[float]:\n return selected_embedding.embed_documents([text])[0]\n\n try:\n return _embed(chunk_text)\n except Exception as e:\n logger.error(\n f\"Failed to embed chunk {chunk_idx} after all retries: {e}\",\n error=str(e),\n )\n raise\n\n # Restrict concurrency for IBM/Watsonx models to avoid rate limits\n is_ibm = (embedding_model and \"ibm\" in str(embedding_model).lower()) or (\n selected_embedding and \"watsonx\" in type(selected_embedding).__name__.lower()\n )\n logger.debug(f\"Is IBM: {is_ibm}\")\n\n # For IBM models, use sequential processing with rate limiting\n # For other models, use parallel processing\n vectors: list[list[float]] = [None] * len(texts)\n\n if is_ibm:\n # Sequential processing with inter-request delay for IBM models\n inter_request_delay = 0.6 # ~1.67 req/s, safely under 2 req/s limit\n logger.info(f\"Using sequential processing for IBM model with {inter_request_delay}s delay between requests\")\n\n for idx, chunk in enumerate(texts):\n if idx > 0:\n # Add delay between requests (but not before the first one)\n time.sleep(inter_request_delay)\n vectors[idx] = embed_chunk_with_retry(chunk, idx)\n else:\n # Parallel processing for non-IBM models\n max_workers = min(max(len(texts), 1), 8)\n logger.debug(f\"Using parallel processing with {max_workers} workers\")\n\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk_with_retry, chunk, idx): idx for idx, chunk in enumerate(texts)}\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(\n f\"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\"\n )\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n logger.debug(f\"Model detection query: {agg_query}\")\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n # Log detailed bucket info for debugging\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n if not models:\n total_hits = result.get(\"hits\", {}).get(\"total\", {})\n total_count = total_hits.get(\"value\", 0) if isinstance(total_hits, dict) else total_hits\n logger.warning(\n f\"No embedding_model values found in index '{self.index_name}'. \"\n f\"Total docs in index: {total_count}. \"\n f\"This may indicate documents were indexed without the embedding_model field.\"\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n fallback_model = self._get_embedding_model_name()\n logger.info(f\"Using fallback model: {fallback_model}\")\n return [fallback_model]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n if not embeddings_list:\n logger.error(\n \"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\"\n )\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models_attr}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models_attr and isinstance(available_models_attr, dict):\n logger.info(\n f\"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict\"\n )\n for model_name_key, dedicated_embedding in available_models_attr.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n self.log(f\"[SEARCH] Models detected in index: {available_models}\")\n self.log(f\"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n # Track matching status for debugging\n matched_models = []\n unmatched_models = []\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n matched_models.append(model_name)\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n self.log(f\"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding\")\n else:\n # No matching embedding found for this model\n unmatched_models.append(model_name)\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}\")\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n self.log(f\"[ERROR] Embedding generation failed for '{model_name}': {e}\")\n\n # Log summary of model matching\n logger.info(f\"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n self.log(f\"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n if unmatched_models:\n self.log(f\"[WARN] Unmatched models in index: {unmatched_models}\")\n\n if not query_embeddings:\n msg = (\n f\"Failed to generate embeddings for any model. \"\n f\"Index has models: {available_models}, but no matching embedding objects found. \"\n f\"Available embedding identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[FAIL] Search failed: {msg}\")\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n self.log(f\"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'\")\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n self.log(f\"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping\")\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n self.log(\n f\"[WARN] No valid KNN queries could be built. \"\n f\"Query embeddings generated: {list(query_embeddings.keys())}, \"\n f\"but no matching knn_vector fields found in index.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename.keyword\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: \"\n f\"{list(query_embeddings.keys())}\"\n )\n self.log(f\"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}\")\n self.log(f\"[EXEC] Embedding models used: {list(query_embeddings.keys())}\")\n self.log(f\"[EXEC] KNN fields being queried: {embedding_fields}\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n self.log(f\"[RESULT] Search complete: {len(hits)} results found\")\n\n if len(hits) == 0:\n self.log(\n f\"[EMPTY] Debug info: \"\n f\"models_in_index={available_models}, \"\n f\"matched_models={matched_models}, \"\n f\"knn_fields={embedding_fields}, \"\n f\"filters={len(filter_clauses)} clauses\"\n )\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n logger.info(f\"Search query: {self.search_query}\")\n if self._cached_vector_store is None:\n self.build_vector_store()\n\n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n\n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config" + "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import (\n BoolInput,\n DropdownInput,\n HandleInput,\n IntInput,\n MultilineInput,\n Output,\n SecretStrInput,\n StrInput,\n TableInput,\n)\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\nREQUEST_TIMEOUT = 60\nMAX_RETRIES = 5\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search. \"\n \"To search use the tools search_documents and raw_search. \"\n \"Search documents takes a query for vector search, for example\\n\"\n ' {search_query: \"components in openrag\"}'\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n \"request_timeout\",\n \"max_retries\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"nmslib\", \"faiss\", \"lucene\", \"jvector\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'nmslib' works with standard \"\n \"OpenSearch. 'jvector' requires OpenSearch 2.9+. 'lucene' requires index.knn: true. \"\n \"Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n # ----- Timeout / Retry -----\n StrInput(\n name=\"request_timeout\",\n display_name=\"Request Timeout (seconds)\",\n value=\"60\",\n advanced=True,\n info=(\n \"Time in seconds to wait for a response from OpenSearch. \"\n \"Increase for large bulk ingestion or complex hybrid queries.\"\n ),\n ),\n StrInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n value=\"3\",\n advanced=True,\n info=\"Number of retries for failed connections before raising an error.\",\n ),\n ]\n outputs = [\n Output(\n display_name=\"Search Results\",\n name=\"search_results\",\n method=\"search_documents\",\n ),\n Output(display_name=\"Raw Search\", name=\"raw_search\", method=\"raw_search\"),\n ]\n\n def raw_search(self, query: str | dict | None = None) -> Data:\n \"\"\"Execute a raw OpenSearch query against the target index.\n\n Args:\n query (dict[str, Any]): The OpenSearch query DSL dictionary.\n\n Returns:\n Data: Search results as a Data object.\n\n Raises:\n ValueError: If 'query' is not a valid OpenSearch query (must be a non-empty dict).\n \"\"\"\n raw_query = query if query is not None else self.search_query\n\n if raw_query is None or (isinstance(raw_query, str) and not raw_query.strip()):\n self.log(\"No query provided for raw search - returning empty results\")\n return Data(data={})\n\n if isinstance(raw_query, dict):\n query_body = raw_query\n elif isinstance(raw_query, str):\n s = raw_query.strip()\n\n # First, optimistically try to parse as JSON DSL\n try:\n query_body = json.loads(s)\n except json.JSONDecodeError:\n # Fallback: treat as a basic text query over common fields\n query_body = {\n \"query\": {\n \"multi_match\": {\n \"query\": s,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n }\n }\n }\n else:\n msg = f\"Unsupported raw_search query type: {type(raw_query)!r}\"\n raise TypeError(msg)\n\n client = self.build_client()\n logger.info(f\"query: {query_body}\")\n resp = client.search(\n index=self.index_name,\n body=query_body,\n params={\"terminate_after\": 0},\n )\n # Remove any _source keys whose value is a list of floats (embedding vectors)\n # Minimum length threshold to identify embedding vectors\n min_vector_length = 100\n\n def is_vector(val):\n # Accepts if it's a list of numbers (float or int) and has reasonable vector length\n return (\n isinstance(val, list) and len(val) > min_vector_length and all(isinstance(x, (float, int)) for x in val)\n )\n\n if \"hits\" in resp and \"hits\" in resp[\"hits\"]:\n for hit in resp[\"hits\"][\"hits\"]:\n source = hit.get(\"_source\")\n if isinstance(source, dict):\n keys_to_remove = [k for k, v in source.items() if is_vector(v)]\n for k in keys_to_remove:\n source.pop(k)\n logger.info(f\"Raw search response (all embedding vectors removed): {resp}\")\n return Data(**resp)\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Note: Some OpenSearch versions/configurations have issues with dynamically adding\n knn_vector mappings (NullPointerException). This method checks if the field\n already exists before attempting to add it, and gracefully skips if the field\n is already properly configured.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n # First, check if the field already exists and is properly mapped\n properties = self._get_index_properties(client)\n if self._is_knn_vector_field(properties, field_name):\n # Field already exists as knn_vector - verify dimensions match\n existing_dim = self._get_field_dimension(properties, field_name)\n if existing_dim is not None and existing_dim != dim:\n logger.warning(\n f\"Field '{field_name}' exists with dimension {existing_dim}, \"\n f\"but current embedding has dimension {dim}. Using existing mapping.\"\n )\n else:\n logger.info(\n f\"[OpenSearchMultimodel] Field '{field_name}' already exists\"\n f\"as knn_vector with matching dimensions - skipping mapping update\"\n )\n return\n\n # Field doesn't exist, try to add the mapping\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except RequestError as e:\n error_str = str(e).lower()\n if \"invalid engine\" in error_str and \"jvector\" in error_str:\n msg = (\n \"The 'jvector' engine is not available in your OpenSearch installation. \"\n \"Use 'nmslib' or 'faiss' for standard OpenSearch, or upgrade to OpenSearch 2.9+.\"\n )\n raise ValueError(msg) from e\n if \"index.knn\" in error_str:\n msg = (\n \"The index has index.knn: false. Delete the existing index and let the \"\n \"component recreate it, or create a new index with a different name.\"\n )\n raise ValueError(msg) from e\n raise\n except Exception as e:\n # Check if this is the known OpenSearch k-NN NullPointerException issue\n error_str = str(e).lower()\n if \"null\" in error_str or \"nullpointerexception\" in error_str:\n logger.warning(\n f\"[OpenSearchMultimodel] Could not add embedding field mapping for {field_name}\"\n f\"due to OpenSearch k-NN plugin issue: {e}. \"\n f\"This is a known issue with some OpenSearch versions. \"\n f\"[OpenSearchMultimodel] Skipping mapping update. \"\n f\"Please ensure the index has the correct mapping for KNN search to work.\"\n )\n # Skip and continue - ingestion will proceed, but KNN search may fail if mapping doesn't exist\n return\n logger.warning(f\"[OpenSearchMultimodel] Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n # Verify the field was added correctly\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.error(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n logger.debug(f\"[OpenSearchMultimodel] Bulk ingesting embeddings for {index_name}\")\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n\n # Normalize ACL fields that may arrive as JSON strings from flows\n for key in (\"allowed_users\", \"allowed_groups\"):\n value = metadata.get(key)\n if isinstance(value, str):\n try:\n parsed = json.loads(value)\n if isinstance(parsed, list):\n metadata[key] = parsed\n except (json.JSONDecodeError, TypeError):\n # Leave value as-is if it isn't valid JSON\n pass\n\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- param helpers ----------\n def _parse_int_param(self, attr_name: str, default: int) -> int:\n \"\"\"Parse a string attribute to int, returning *default* on failure.\"\"\"\n raw = getattr(self, attr_name, None)\n if raw is None or str(raw).strip() == \"\":\n return default\n try:\n value = int(str(raw).strip())\n except ValueError:\n logger.warning(f\"Invalid integer value '{raw}' for {attr_name}, using default {default}\")\n return default\n\n if value < 0:\n logger.warning(f\"Negative value '{raw}' for {attr_name}, using default {default}\")\n return default\n\n return value\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n logger.debug(\"[OpenSearchMultimodel] Building OpenSearch client\")\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n timeout=self._parse_int_param(\"request_timeout\", REQUEST_TIMEOUT),\n max_retries=self._parse_int_param(\"max_retries\", MAX_RETRIES),\n retry_on_timeout=True,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n\n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"[OpenSearchMultimodel] Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"[OpenSearchMultimodel] Starting ingestion mode...\")\n\n logger.debug(f\"[OpenSearchMultimodel] Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[OpenSearchMultimodel][INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n logger.debug(\n f\"[OpenSearchMultimodel][INGESTION] ingest_data type: \"\n f\"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\"\n )\n logger.debug(\n f\"[OpenSearchMultimodel][INGESTION] ingest_data content: \"\n f\"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\"\n )\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[OpenSearchMultimodel][INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"[OpenSearchMultimodel][INGESTION] Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings with rate-limit-aware retry logic using tenacity\n from tenacity import (\n retry,\n retry_if_exception,\n stop_after_attempt,\n wait_exponential,\n )\n\n def is_rate_limit_error(exception: Exception) -> bool:\n \"\"\"Check if exception is a rate limit error (429).\"\"\"\n error_str = str(exception).lower()\n return \"429\" in error_str or \"rate_limit\" in error_str or \"rate limit\" in error_str\n\n def is_other_retryable_error(exception: Exception) -> bool:\n \"\"\"Check if exception is retryable but not a rate limit error.\"\"\"\n # Retry on most exceptions except for specific non-retryable ones\n # Add other non-retryable exceptions here if needed\n return not is_rate_limit_error(exception)\n\n # Create retry decorator for rate limit errors (longer backoff)\n retry_on_rate_limit = retry(\n retry=retry_if_exception(is_rate_limit_error),\n stop=stop_after_attempt(5),\n wait=wait_exponential(multiplier=2, min=2, max=30),\n reraise=True,\n before_sleep=lambda retry_state: logger.warning(\n f\"Rate limit hit for chunk (attempt {retry_state.attempt_number}/5), \"\n f\"backing off for {retry_state.next_action.sleep:.1f}s\"\n ),\n )\n\n # Create retry decorator for other errors (shorter backoff)\n retry_on_other_errors = retry(\n retry=retry_if_exception(is_other_retryable_error),\n stop=stop_after_attempt(3),\n wait=wait_exponential(multiplier=1, min=1, max=8),\n reraise=True,\n before_sleep=lambda retry_state: logger.warning(\n f\"Error embedding chunk (attempt {retry_state.attempt_number}/3), \"\n f\"retrying in {retry_state.next_action.sleep:.1f}s: {retry_state.outcome.exception()}\"\n ),\n )\n\n def embed_chunk_with_retry(chunk_text: str, chunk_idx: int) -> list[float]:\n \"\"\"Embed a single chunk with rate-limit-aware retry logic.\"\"\"\n\n @retry_on_rate_limit\n @retry_on_other_errors\n def _embed(text: str) -> list[float]:\n return selected_embedding.embed_documents([text])[0]\n\n try:\n return _embed(chunk_text)\n except Exception as e:\n logger.error(\n f\"Failed to embed chunk {chunk_idx} after all retries: {e}\",\n error=str(e),\n )\n raise\n\n # Restrict concurrency for IBM/Watsonx models to avoid rate limits\n is_ibm = (embedding_model and \"ibm\" in str(embedding_model).lower()) or (\n selected_embedding and \"watsonx\" in type(selected_embedding).__name__.lower()\n )\n logger.debug(f\"Is IBM: {is_ibm}\")\n\n # For IBM models, use sequential processing with rate limiting\n # For other models, use parallel processing\n vectors: list[list[float]] = [None] * len(texts)\n\n if is_ibm:\n # Sequential processing with inter-request delay for IBM models\n inter_request_delay = 0.6 # ~1.67 req/s, safely under 2 req/s limit\n logger.info(f\"Using sequential processing for IBM model with {inter_request_delay}s delay between requests\")\n\n for idx, chunk in enumerate(texts):\n if idx > 0:\n # Add delay between requests (but not before the first one)\n time.sleep(inter_request_delay)\n vectors[idx] = embed_chunk_with_retry(chunk, idx)\n else:\n # Parallel processing for non-IBM models\n max_workers = min(max(len(texts), 1), 8)\n logger.debug(f\"Using parallel processing with {max_workers} workers\")\n\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk_with_retry, chunk, idx): idx for idx, chunk in enumerate(texts)}\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping (index.knn: true is required for vector search)\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error == \"resource_already_exists_exception\":\n pass # Index was created concurrently\n else:\n error_msg = str(creation_error).lower()\n if \"invalid engine\" in error_msg or \"illegal_argument\" in error_msg:\n if \"jvector\" in error_msg:\n msg = (\n \"The 'jvector' engine is not available in your OpenSearch installation. \"\n \"Use 'nmslib' or 'faiss' for standard OpenSearch, or upgrade to 2.9+.\"\n )\n raise ValueError(msg) from creation_error\n if \"index.knn\" in error_msg:\n msg = (\n \"The index has index.knn: false. Delete the existing index and let the \"\n \"component recreate it, or create a new index with a different name.\"\n )\n raise ValueError(msg) from creation_error\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n raise\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(\n f\"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\"\n )\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n logger.debug(f\"Model detection query: {agg_query}\")\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n # Log detailed bucket info for debugging\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n if not models:\n total_hits = result.get(\"hits\", {}).get(\"total\", {})\n total_count = total_hits.get(\"value\", 0) if isinstance(total_hits, dict) else total_hits\n logger.warning(\n f\"No embedding_model values found in index '{self.index_name}'. \"\n f\"Total docs in index: {total_count}. \"\n f\"This may indicate documents were indexed without the embedding_model field.\"\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n fallback_model = self._get_embedding_model_name()\n logger.info(f\"Using fallback model: {fallback_model}\")\n return [fallback_model]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n def _get_filename_agg_field(self, index_properties: dict[str, Any] | None) -> str:\n \"\"\"Choose the appropriate field for filename aggregations.\"\"\"\n if not index_properties:\n return \"filename.keyword\"\n\n filename_def = index_properties.get(\"filename\")\n if not isinstance(filename_def, dict):\n return \"filename.keyword\"\n\n field_type = filename_def.get(\"type\")\n fields_def = filename_def.get(\"fields\", {})\n\n # Top-level keyword with no subfields\n if field_type == \"keyword\" and not isinstance(fields_def, dict):\n return \"filename\"\n\n # Text field with keyword subfield\n if isinstance(fields_def, dict) and \"keyword\" in fields_def:\n return \"filename.keyword\"\n\n # Fallback: aggregate on filename directly\n return \"filename\"\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n if not embeddings_list:\n logger.error(\n \"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\"\n )\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models_attr}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models_attr and isinstance(available_models_attr, dict):\n logger.info(\n f\"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict\"\n )\n for model_name_key, dedicated_embedding in available_models_attr.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n self.log(f\"[SEARCH] Models detected in index: {available_models}\")\n self.log(f\"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n # Track matching status for debugging\n matched_models = []\n unmatched_models = []\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n matched_models.append(model_name)\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n self.log(f\"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding\")\n else:\n # No matching embedding found for this model\n unmatched_models.append(model_name)\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}\")\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n self.log(f\"[ERROR] Embedding generation failed for '{model_name}': {e}\")\n\n # Log summary of model matching\n logger.info(f\"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n self.log(f\"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n if unmatched_models:\n self.log(f\"[WARN] Unmatched models in index: {unmatched_models}\")\n\n if not query_embeddings:\n msg = (\n f\"Failed to generate embeddings for any model. \"\n f\"Index has models: {available_models}, but no matching embedding objects found. \"\n f\"Available embedding identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[FAIL] Search failed: {msg}\")\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n self.log(f\"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'\")\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n self.log(f\"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping\")\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n self.log(\n f\"[WARN] No valid KNN queries could be built. \"\n f\"Query embeddings generated: {list(query_embeddings.keys())}, \"\n f\"but no matching knn_vector fields found in index.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Determine the best aggregation field for filename based on index mapping\n filename_agg_field = self._get_filename_agg_field(index_properties)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": filename_agg_field, \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: \"\n f\"{list(query_embeddings.keys())}\"\n )\n self.log(f\"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}\")\n self.log(f\"[EXEC] Embedding models used: {list(query_embeddings.keys())}\")\n self.log(f\"[EXEC] KNN fields being queried: {embedding_fields}\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n self.log(f\"[RESULT] Search complete: {len(hits)} results found\")\n\n if len(hits) == 0:\n self.log(\n f\"[EMPTY] Debug info: \"\n f\"models_in_index={available_models}, \"\n f\"matched_models={matched_models}, \"\n f\"knn_fields={embedding_fields}, \"\n f\"filters={len(filter_clauses)} clauses\"\n )\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n logger.info(f\"Search query: {self.search_query}\")\n if self._cached_vector_store is None:\n self.build_vector_store()\n\n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n\n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n if is_basic:\n build_config[\"jwt_token\"][\"value\"] = \"\"\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" }, "docs_metadata": { "_input_type": "TableInput", @@ -3924,13 +3975,13 @@ "display_name": "Vector Engine", "dynamic": false, "external_options": {}, - "info": "Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.", + "info": "Vector search engine for similarity calculations. 'nmslib' works with standard OpenSearch. 'jvector' requires OpenSearch 2.9+. 'lucene' requires index.knn: true. Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.", "name": "engine", "options": [ - "jvector", "nmslib", "faiss", - "lucene" + "lucene", + "jvector" ], "options_metadata": [], "override_skip": false, @@ -3962,6 +4013,7 @@ "multiline": true, "name": "filter_expression", "override_skip": false, + "password": false, "placeholder": "", "required": false, "show": true, @@ -4078,6 +4130,27 @@ "type": "int", "value": 16 }, + "max_retries": { + "_input_type": "StrInput", + "advanced": true, + "display_name": "Max Retries", + "dynamic": false, + "info": "Number of retries for failed connections before raising an error.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "max_retries", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "3" + }, "num_candidates": { "_input_type": "IntInput", "advanced": true, @@ -4158,6 +4231,27 @@ "type": "str", "value": "" }, + "request_timeout": { + "_input_type": "StrInput", + "advanced": true, + "display_name": "Request Timeout (seconds)", + "dynamic": false, + "info": "Time in seconds to wait for a response from OpenSearch. Increase for large bulk ingestion or complex hybrid queries.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "request_timeout", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "60" + }, "search_query": { "_input_type": "QueryInput", "advanced": false, @@ -4325,12 +4419,12 @@ "dragging": false, "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGV", "measured": { - "height": 904, + "height": 967, "width": 320 }, "position": { - "x": 2779.4314297063547, - "y": 1442.1019431938519 + "x": 2729.318175939116, + "y": 1398.252846147518 }, "selected": false, "type": "genericNode" @@ -4356,9 +4450,9 @@ ], "frozen": false, "icon": "braces", - "last_updated": "2025-12-12T20:28:32.528Z", + "last_updated": "2026-03-05T16:40:19.995Z", "legacy": false, - "lf_version": "1.6.3.dev0", + "lf_version": "1.8.0", "metadata": {}, "minimized": false, "output_types": [], @@ -4406,7 +4500,7 @@ "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" }, "_frontend_node_folder_id": { - "value": "2bee9dd9-f030-469f-a568-6fcb3a6e7140" + "value": "7a54b33c-ad03-4c78-b61b-f95adbb74e17" }, "_type": "Component", "code": { @@ -4446,6 +4540,7 @@ "multiline": true, "name": "dynamic_connector_type", "override_skip": false, + "password": false, "placeholder": "", "real_time_refresh": null, "refresh_button": null, @@ -4479,6 +4574,7 @@ "multiline": true, "name": "dynamic_owner", "override_skip": false, + "password": false, "placeholder": "", "real_time_refresh": null, "refresh_button": null, @@ -4512,6 +4608,7 @@ "multiline": true, "name": "dynamic_owner_email", "override_skip": false, + "password": false, "placeholder": "", "real_time_refresh": null, "refresh_button": null, @@ -4545,6 +4642,7 @@ "multiline": true, "name": "dynamic_owner_name", "override_skip": false, + "password": false, "placeholder": "", "real_time_refresh": null, "refresh_button": null, @@ -4665,162 +4763,197 @@ "dragging": false, "id": "AdvancedDynamicFormBuilder-ziCu4", "measured": { - "height": 552, + "height": 553, "width": 320 }, "position": { - "x": 2393.139136901506, - "y": 595.9579209002084 + "x": 2344.490839041331, + "y": 830.665770502212 }, "selected": false, "type": "genericNode" }, { "data": { - "id": "SecretInput-lr9k6", + "description": "Generate embeddings using a specified provider.", + "display_name": "Embedding Model", + "id": "EmbeddingModel-muH88", "node": { "base_classes": [ - "Message" + "Embeddings" ], "beta": false, "conditional_paths": [], "custom_fields": {}, - "description": "Allows the selection of a secret to be generated as output..", - "display_name": "Secret Input", - "documentation": "https://docs.langflow.org/components-io#text-input", - "edited": true, + "description": "Generate embeddings using a specified provider.", + "display_name": "Embedding Model", + "documentation": "https://docs.langflow.org/components-embedding-models", + "edited": false, "field_order": [ - "input_value" + "model", + "api_key", + "api_base", + "base_url_ibm_watsonx", + "project_id", + "dimensions", + "chunk_size", + "request_timeout", + "max_retries", + "show_progress_bar", + "model_kwargs", + "truncate_input_tokens", + "input_text" ], "frozen": false, - "icon": "type", + "icon": "binary", + "last_updated": "2026-02-27T18:48:44.951Z", "legacy": false, - "lf_version": "1.6.3.dev0", - "metadata": {}, + "lf_version": "1.8.0", + "metadata": { + "code_hash": "c5ce0982da48", + "dependencies": { + "dependencies": [ + { + "name": "lfx", + "version": null + }, + { + "name": "langchain_core", + "version": "0.3.83" + } + ], + "total_dependencies": 2 + }, + "module": "custom_components.embedding_model" + }, "minimized": false, "output_types": [], "outputs": [ { "allows_loop": false, "cache": true, - "display_name": "Output Text", + "display_name": "Embedding Model", "group_outputs": false, "hidden": null, - "method": "text_response", - "name": "text", + "loop_types": null, + "method": "build_embeddings", + "name": "embeddings", "options": null, "required_inputs": null, - "selected": "Message", + "selected": "Embeddings", "tool_mode": true, "types": [ - "Message" + "Embeddings" ], "value": "__UNDEFINED__" } ], "pinned": false, "template": { + "_frontend_node_flow_id": { + "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" + }, + "_frontend_node_folder_id": { + "value": "7a54b33c-ad03-4c78-b61b-f95adbb74e17" + }, "_type": "Component", - "code": { + "api_base": { + "_input_type": "MessageTextInput", "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", + "display_name": "API Base URL", + "dynamic": false, + "info": "Base URL for the API. Leave empty for default.", + "input_types": [ + "Message" + ], "list": false, + "list_add_label": "Add More", "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, + "name": "api_base", + "override_skip": false, "placeholder": "", - "required": true, + "required": false, "show": true, "title_case": false, - "type": "code", - "value": "from langflow.base.io.text import TextComponent\r\nfrom langflow.io import MultilineInput, Output, SecretStrInput\r\nfrom langflow.schema.message import Message\r\n\r\n\r\nclass SecretInputComponent(TextComponent):\r\n display_name = \"Secret Input\"\r\n description = \"Allows the selection of a secret to be generated as output..\"\r\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\r\n icon = \"type\"\r\n name = \"SecretInput\"\r\n\r\n inputs = [\r\n SecretStrInput(\r\n name=\"input_value\",\r\n display_name=\"Secret\",\r\n info=\"Secret to be passed as input.\",\r\n ),\r\n ]\r\n outputs = [\r\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\r\n ]\r\n\r\n def text_response(self) -> Message:\r\n return Message(\r\n text=self.input_value,\r\n )\r\n" + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" }, - "input_value": { + "api_key": { "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "Secret", + "advanced": true, + "display_name": "API Key", "dynamic": false, - "info": "Secret to be passed as input.", + "info": "Model Provider API key", "input_types": [], - "load_from_db": true, - "name": "input_value", + "load_from_db": false, + "name": "api_key", + "override_skip": false, "password": true, "placeholder": "", + "real_time_refresh": true, "required": false, "show": true, "title_case": false, + "track_in_telemetry": false, "type": "str", - "value": "CONNECTOR_TYPE" - } - }, - "tool_mode": false - }, - "showNode": true, - "type": "SecretInput" - }, - "dragging": false, - "id": "SecretInput-lr9k6", - "measured": { - "height": 220, - "width": 320 - }, - "position": { - "x": 1957.8516847253545, - "y": 426.1185670449811 - }, - "selected": false, - "type": "genericNode" - }, - { - "data": { - "id": "SecretInput-KYwsB", - "node": { - "base_classes": [ - "Message" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Allows the selection of a secret to be generated as output..", - "display_name": "Secret Input", - "documentation": "https://docs.langflow.org/components-io#text-input", - "edited": true, - "field_order": [ - "input_value" - ], - "frozen": false, - "icon": "type", - "legacy": false, - "lf_version": "1.6.3.dev0", - "metadata": {}, - "minimized": false, - "output_types": [], - "outputs": [ - { - "allows_loop": false, - "cache": true, - "display_name": "Output Text", - "group_outputs": false, - "hidden": null, - "method": "text_response", - "name": "text", - "options": null, - "required_inputs": null, - "selected": "Message", - "tool_mode": true, - "types": [ - "Message" + "value": "" + }, + "base_url_ibm_watsonx": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "watsonx API Endpoint", + "dynamic": false, + "external_options": {}, + "info": "The base URL of the API (IBM watsonx.ai only)", + "name": "base_url_ibm_watsonx", + "options": [ + "https://us-south.ml.cloud.ibm.com", + "https://eu-de.ml.cloud.ibm.com", + "https://eu-gb.ml.cloud.ibm.com", + "https://au-syd.ml.cloud.ibm.com", + "https://jp-tok.ml.cloud.ibm.com", + "https://ca-tor.ml.cloud.ibm.com" ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": false, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "https://us-south.ml.cloud.ibm.com" + }, + "chunk_size": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Chunk Size", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "chunk_size", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 1000 + }, "code": { "advanced": true, "dynamic": true, @@ -4837,240 +4970,334 @@ "show": true, "title_case": false, "type": "code", - "value": "from langflow.base.io.text import TextComponent\r\nfrom langflow.io import MultilineInput, Output, SecretStrInput\r\nfrom langflow.schema.message import Message\r\n\r\n\r\nclass SecretInputComponent(TextComponent):\r\n display_name = \"Secret Input\"\r\n description = \"Allows the selection of a secret to be generated as output..\"\r\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\r\n icon = \"type\"\r\n name = \"SecretInput\"\r\n\r\n inputs = [\r\n SecretStrInput(\r\n name=\"input_value\",\r\n display_name=\"Secret\",\r\n info=\"Secret to be passed as input.\",\r\n ),\r\n ]\r\n outputs = [\r\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\r\n ]\r\n\r\n def text_response(self) -> Message:\r\n return Message(\r\n text=self.input_value,\r\n )\r\n" + "value": "from typing import Any\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.unified_models import (\n get_api_key_for_provider,\n get_embedding_class,\n get_embedding_model_options,\n get_unified_models_detailed,\n update_model_options_in_build_config,\n)\nfrom lfx.base.models.watsonx_constants import IBM_WATSONX_URLS\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n ModelInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n \"\"\"Dynamically update build config with user-filtered model options.\"\"\"\n # Update model options\n build_config = update_model_options_in_build_config(\n component=self,\n build_config=build_config,\n cache_key_prefix=\"embedding_model_options\",\n get_options_func=get_embedding_model_options,\n field_name=field_name,\n field_value=field_value,\n )\n\n # Show/hide provider-specific fields based on selected model\n if field_name == \"model\" and isinstance(field_value, list) and len(field_value) > 0:\n selected_model = field_value[0]\n provider = selected_model.get(\"provider\", \"\")\n\n # Show/hide watsonx fields\n is_watsonx = provider == \"IBM WatsonX\"\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = is_watsonx\n build_config[\"project_id\"][\"show\"] = is_watsonx\n build_config[\"truncate_input_tokens\"][\"show\"] = is_watsonx\n build_config[\"input_text\"][\"show\"] = is_watsonx\n if is_watsonx:\n build_config[\"base_url_ibm_watsonx\"][\"required\"] = True\n build_config[\"project_id\"][\"required\"] = True\n\n return build_config\n\n inputs = [\n ModelInput(\n name=\"model\",\n display_name=\"Embedding Model\",\n info=\"Select your model provider\",\n real_time_refresh=True,\n required=True,\n model_type=\"embedding\",\n input_types=[\"Embeddings\"], # Override default to accept Embeddings instead of LanguageModel\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"Model Provider API key\",\n real_time_refresh=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n # Watson-specific inputs\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n advanced=True,\n value=1000,\n ),\n FloatInput(\n name=\"request_timeout\",\n display_name=\"Request Timeout\",\n advanced=True,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n advanced=True,\n value=3,\n ),\n BoolInput(\n name=\"show_progress_bar\",\n display_name=\"Show Progress Bar\",\n advanced=True,\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n \"\"\"Build and return an embeddings instance based on the selected model.\n\n Returns an EmbeddingsWithModels wrapper that contains:\n - The primary embedding instance (for the selected model)\n - available_models dict mapping all available model names to their instances\n \"\"\"\n # If an Embeddings object is directly connected, return it\n try:\n from langchain_core.embeddings import Embeddings as BaseEmbeddings\n\n if isinstance(self.model, BaseEmbeddings):\n return self.model\n except ImportError:\n pass\n\n # Safely extract model configuration\n if not self.model or not isinstance(self.model, list):\n msg = \"Model must be a non-empty list\"\n raise ValueError(msg)\n\n model = self.model[0]\n model_name = model.get(\"name\")\n provider = model.get(\"provider\")\n metadata = model.get(\"metadata\", {})\n\n # Get API key from user input or global variables\n api_key = get_api_key_for_provider(self.user_id, provider, self.api_key)\n\n # Validate required fields (Ollama doesn't require API key)\n if not api_key and provider != \"Ollama\":\n msg = (\n f\"{provider} API key is required. \"\n f\"Please provide it in the component or configure it globally as \"\n f\"{provider.upper().replace(' ', '_')}_API_KEY.\"\n )\n raise ValueError(msg)\n\n if not model_name:\n msg = \"Model name is required\"\n raise ValueError(msg)\n\n # Get embedding class\n embedding_class_name = metadata.get(\"embedding_class\")\n if not embedding_class_name:\n msg = f\"No embedding class defined in metadata for {model_name}\"\n raise ValueError(msg)\n\n embedding_class = get_embedding_class(embedding_class_name)\n\n # Build kwargs using parameter mapping for primary instance\n kwargs = self._build_kwargs(model, metadata)\n primary_instance = embedding_class(**kwargs)\n\n # Get all available embedding models for this provider\n available_models_dict = self._build_available_models(\n provider=provider,\n embedding_class=embedding_class,\n metadata=metadata,\n api_key=api_key,\n )\n\n # Wrap with EmbeddingsWithModels to provide available_models metadata\n return EmbeddingsWithModels(\n embeddings=primary_instance,\n available_models=available_models_dict,\n )\n\n def _build_available_models(\n self,\n provider: str,\n embedding_class: type,\n metadata: dict[str, Any],\n api_key: str | None,\n ) -> dict[str, Embeddings]:\n \"\"\"Build a dictionary of all available embedding model instances for the provider.\n\n Args:\n provider: The provider name (e.g., \"OpenAI\", \"Ollama\")\n embedding_class: The embedding class to instantiate\n metadata: Metadata containing param_mapping\n api_key: The API key for the provider\n\n Returns:\n Dict mapping model names to their embedding instances\n \"\"\"\n available_models_dict: dict[str, Embeddings] = {}\n\n # Get all embedding models for this provider from unified models\n all_embedding_models = get_unified_models_detailed(\n providers=[provider],\n model_type=\"embeddings\",\n include_deprecated=False,\n include_unsupported=False,\n )\n\n if not all_embedding_models:\n return available_models_dict\n\n # Extract models from the provider data\n for provider_data in all_embedding_models:\n if provider_data.get(\"provider\") != provider:\n continue\n\n for model_data in provider_data.get(\"models\", []):\n model_name = model_data.get(\"model_name\")\n if not model_name:\n continue\n\n # Create a model dict compatible with _build_kwargs\n model_dict = {\n \"name\": model_name,\n \"provider\": provider,\n \"metadata\": metadata, # Reuse the same metadata/param_mapping\n }\n\n try:\n # Build kwargs for this model\n model_kwargs = self._build_kwargs_for_model(model_dict, metadata, api_key)\n # Create the embedding instance\n available_models_dict[model_name] = embedding_class(**model_kwargs)\n except Exception: # noqa: BLE001\n # Skip models that fail to instantiate\n # This handles cases where specific models have incompatible parameters\n logger.debug(\"Failed to instantiate embedding model %s: skipping\", model_name, exc_info=True)\n continue\n\n return available_models_dict\n\n def _build_kwargs_for_model(\n self,\n model: dict[str, Any],\n metadata: dict[str, Any],\n api_key: str | None,\n ) -> dict[str, Any]:\n \"\"\"Build kwargs dictionary for a specific model using parameter mapping.\n\n This is similar to _build_kwargs but uses the provided api_key directly\n instead of looking it up again.\n\n Args:\n model: Model dict with name and provider\n metadata: Metadata containing param_mapping\n api_key: The API key to use\n\n Returns:\n kwargs dict for embedding class instantiation\n \"\"\"\n param_mapping = metadata.get(\"param_mapping\", {})\n if not param_mapping:\n msg = \"Parameter mapping not found in metadata\"\n raise ValueError(msg)\n\n kwargs = {}\n provider = model.get(\"provider\")\n\n # Required parameters - handle both \"model\" and \"model_id\" (for watsonx)\n if \"model\" in param_mapping:\n kwargs[param_mapping[\"model\"]] = model.get(\"name\")\n elif \"model_id\" in param_mapping:\n kwargs[param_mapping[\"model_id\"]] = model.get(\"name\")\n\n # Add API key if mapped\n if \"api_key\" in param_mapping and api_key:\n kwargs[param_mapping[\"api_key\"]] = api_key\n\n # Optional parameters with their values\n optional_params = {\n \"api_base\": self.api_base if self.api_base else None,\n \"dimensions\": int(self.dimensions) if self.dimensions else None,\n \"chunk_size\": int(self.chunk_size) if self.chunk_size else None,\n \"request_timeout\": float(self.request_timeout) if self.request_timeout else None,\n \"max_retries\": int(self.max_retries) if self.max_retries else None,\n \"show_progress_bar\": self.show_progress_bar if hasattr(self, \"show_progress_bar\") else None,\n \"model_kwargs\": self.model_kwargs if self.model_kwargs else None,\n }\n\n # Watson-specific parameters\n if provider in {\"IBM WatsonX\", \"IBM watsonx.ai\"}:\n # Map base_url_ibm_watsonx to \"url\" parameter for watsonx\n if \"url\" in param_mapping:\n url_value = (\n self.base_url_ibm_watsonx\n if hasattr(self, \"base_url_ibm_watsonx\") and self.base_url_ibm_watsonx\n else \"https://us-south.ml.cloud.ibm.com\"\n )\n kwargs[param_mapping[\"url\"]] = url_value\n # Map project_id for watsonx\n if hasattr(self, \"project_id\") and self.project_id and \"project_id\" in param_mapping:\n kwargs[param_mapping[\"project_id\"]] = self.project_id\n\n # Ollama-specific parameters\n if provider == \"Ollama\" and \"base_url\" in param_mapping:\n # Map api_base to \"base_url\" parameter for Ollama\n base_url_value = self.api_base if hasattr(self, \"api_base\") and self.api_base else \"http://localhost:11434\"\n kwargs[param_mapping[\"base_url\"]] = base_url_value\n\n # Add optional parameters if they have values and are mapped\n for param_name, param_value in optional_params.items():\n if param_value is not None and param_name in param_mapping:\n # Special handling for request_timeout with Google provider\n if param_name == \"request_timeout\":\n if provider == \"Google Generative AI\" and isinstance(param_value, (int, float)):\n kwargs[param_mapping[param_name]] = {\"timeout\": param_value}\n else:\n kwargs[param_mapping[param_name]] = param_value\n else:\n kwargs[param_mapping[param_name]] = param_value\n\n return kwargs\n\n def _build_kwargs(self, model: dict[str, Any], metadata: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Build kwargs dictionary using parameter mapping.\"\"\"\n param_mapping = metadata.get(\"param_mapping\", {})\n if not param_mapping:\n msg = \"Parameter mapping not found in metadata\"\n raise ValueError(msg)\n\n kwargs = {}\n\n # Required parameters - handle both \"model\" and \"model_id\" (for watsonx)\n if \"model\" in param_mapping:\n kwargs[param_mapping[\"model\"]] = model.get(\"name\")\n elif \"model_id\" in param_mapping:\n kwargs[param_mapping[\"model_id\"]] = model.get(\"name\")\n if \"api_key\" in param_mapping:\n kwargs[param_mapping[\"api_key\"]] = get_api_key_for_provider(\n self.user_id,\n model.get(\"provider\"),\n self.api_key,\n )\n\n # Optional parameters with their values\n provider = model.get(\"provider\")\n optional_params = {\n \"api_base\": self.api_base if self.api_base else None,\n \"dimensions\": int(self.dimensions) if self.dimensions else None,\n \"chunk_size\": int(self.chunk_size) if self.chunk_size else None,\n \"request_timeout\": float(self.request_timeout) if self.request_timeout else None,\n \"max_retries\": int(self.max_retries) if self.max_retries else None,\n \"show_progress_bar\": self.show_progress_bar if hasattr(self, \"show_progress_bar\") else None,\n \"model_kwargs\": self.model_kwargs if self.model_kwargs else None,\n }\n\n # Watson-specific parameters\n if provider in {\"IBM WatsonX\", \"IBM watsonx.ai\"}:\n # Map base_url_ibm_watsonx to \"url\" parameter for watsonx\n if \"url\" in param_mapping:\n url_value = (\n self.base_url_ibm_watsonx\n if hasattr(self, \"base_url_ibm_watsonx\") and self.base_url_ibm_watsonx\n else \"https://us-south.ml.cloud.ibm.com\"\n )\n kwargs[param_mapping[\"url\"]] = url_value\n # Map project_id for watsonx\n if hasattr(self, \"project_id\") and self.project_id and \"project_id\" in param_mapping:\n kwargs[param_mapping[\"project_id\"]] = self.project_id\n\n # Ollama-specific parameters\n if provider == \"Ollama\" and \"base_url\" in param_mapping:\n # Map api_base to \"base_url\" parameter for Ollama\n base_url_value = self.api_base if hasattr(self, \"api_base\") and self.api_base else \"http://localhost:11434\"\n kwargs[param_mapping[\"base_url\"]] = base_url_value\n\n # Add optional parameters if they have values and are mapped\n for param_name, param_value in optional_params.items():\n if param_value is not None and param_name in param_mapping:\n # Special handling for request_timeout with Google provider\n if param_name == \"request_timeout\":\n if provider == \"Google Generative AI\" and isinstance(param_value, (int, float)):\n kwargs[param_mapping[param_name]] = {\"timeout\": param_value}\n else:\n kwargs[param_mapping[param_name]] = param_value\n else:\n kwargs[param_mapping[param_name]] = param_value\n\n return kwargs\n" }, - "input_value": { - "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "Secret", + "dimensions": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Dimensions", "dynamic": false, - "info": "Secret to be passed as input.", - "input_types": [], - "load_from_db": true, - "name": "input_value", - "password": true, + "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "dimensions", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, - "type": "str", - "value": "OWNER" - } - }, - "tool_mode": false - }, - "showNode": true, - "type": "SecretInput" - }, - "dragging": false, - "id": "SecretInput-KYwsB", - "measured": { - "height": 220, - "width": 320 - }, - "position": { - "x": 1956.094514779677, - "y": 676.2918713576739 - }, - "selected": false, - "type": "genericNode" - }, - { - "data": { - "id": "SecretInput-pYHMH", - "node": { - "base_classes": [ - "Message" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Allows the selection of a secret to be generated as output..", - "display_name": "Secret Input", - "documentation": "https://docs.langflow.org/components-io#text-input", - "edited": true, - "field_order": [ - "input_value" - ], - "frozen": false, - "icon": "type", - "legacy": false, - "lf_version": "1.6.3.dev0", - "metadata": {}, - "minimized": false, - "output_types": [], - "outputs": [ - { - "allows_loop": false, - "cache": true, - "display_name": "Output Text", - "group_outputs": false, - "hidden": null, - "method": "text_response", - "name": "text", - "options": null, - "required_inputs": null, - "selected": "Message", - "tool_mode": true, - "types": [ - "Message" - ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "code": { + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": "" + }, + "input_text": { + "_input_type": "BoolInput", "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", + "display_name": "Include the original text in the output", + "dynamic": false, "info": "", "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, + "list_add_label": "Add More", + "name": "input_text", + "override_skip": false, "placeholder": "", - "required": true, - "show": true, + "required": false, + "show": false, "title_case": false, - "type": "code", - "value": "from langflow.base.io.text import TextComponent\r\nfrom langflow.io import MultilineInput, Output, SecretStrInput\r\nfrom langflow.schema.message import Message\r\n\r\n\r\nclass SecretInputComponent(TextComponent):\r\n display_name = \"Secret Input\"\r\n description = \"Allows the selection of a secret to be generated as output..\"\r\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\r\n icon = \"type\"\r\n name = \"SecretInput\"\r\n\r\n inputs = [\r\n SecretStrInput(\r\n name=\"input_value\",\r\n display_name=\"Secret\",\r\n info=\"Secret to be passed as input.\",\r\n ),\r\n ]\r\n outputs = [\r\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\r\n ]\r\n\r\n def text_response(self) -> Message:\r\n return Message(\r\n text=self.input_value,\r\n )\r\n" + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true }, - "input_value": { - "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "Secret", + "is_refresh": true, + "max_retries": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Max Retries", "dynamic": false, - "info": "Secret to be passed as input.", - "input_types": [], - "load_from_db": true, - "name": "input_value", - "password": true, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "max_retries", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, - "type": "str", - "value": "OWNER_EMAIL" - } - }, - "tool_mode": false - }, - "showNode": true, - "type": "SecretInput" - }, - "dragging": false, - "id": "SecretInput-pYHMH", - "measured": { - "height": 220, - "width": 320 - }, - "position": { - "x": 1950.891944424919, - "y": 923.3252721744263 - }, - "selected": false, - "type": "genericNode" - }, - { - "data": { - "id": "SecretInput-aoBVB", - "node": { - "base_classes": [ - "Message" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Allows the selection of a secret to be generated as output..", - "display_name": "Secret Input", - "documentation": "https://docs.langflow.org/components-io#text-input", - "edited": true, - "field_order": [ - "input_value" - ], - "frozen": false, - "icon": "type", - "legacy": false, - "lf_version": "1.6.3.dev0", - "metadata": {}, - "minimized": false, - "output_types": [], - "outputs": [ - { - "allows_loop": false, - "cache": true, - "display_name": "Output Text", - "group_outputs": false, - "hidden": null, - "method": "text_response", - "name": "text", - "options": null, - "required_inputs": null, - "selected": "Message", - "tool_mode": true, - "types": [ + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 3 + }, + "model": { + "_input_type": "ModelInput", + "advanced": false, + "display_name": "Embedding Model", + "dynamic": false, + "external_options": { + "fields": { + "data": { + "node": { + "display_name": "Connect other models", + "icon": "CornerDownLeft", + "name": "connect_other_models" + } + } + } + }, + "info": "Select your model provider", + "input_types": [ + "Embeddings" + ], + "list": false, + "list_add_label": "Add More", + "model_type": "embedding", + "name": "model", + "options": [ + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-small", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-large", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-ada-002", + "provider": "OpenAI" + }, + { + "category": "Google Generative AI", + "icon": "GoogleGenerativeAI", + "metadata": { + "is_disabled_provider": true, + "variable_name": null + }, + "name": "__enable_provider_Google Generative AI__", + "provider": "Google Generative AI" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "is_disabled_provider": true, + "variable_name": null + }, + "name": "__enable_provider_IBM WatsonX__", + "provider": "IBM WatsonX" + } + ], + "override_skip": false, + "placeholder": "Setup Provider", + "real_time_refresh": true, + "refresh_button": true, + "required": true, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "track_in_telemetry": false, + "type": "model", + "value": [ + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-small", + "provider": "OpenAI" + } + ] + }, + "model_kwargs": { + "_input_type": "DictInput", + "advanced": true, + "display_name": "Model Kwargs", + "dynamic": false, + "info": "Additional keyword arguments to pass to the model.", + "list": false, + "list_add_label": "Add More", + "name": "model_kwargs", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "track_in_telemetry": false, + "type": "dict", + "value": {} + }, + "project_id": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "Project ID", + "dynamic": false, + "info": "IBM watsonx.ai Project ID (required for IBM watsonx.ai)", + "input_types": [ "Message" ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "code": { + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "project_id", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "request_timeout": { + "_input_type": "FloatInput", "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", + "display_name": "Request Timeout", + "dynamic": false, "info": "", "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, + "list_add_label": "Add More", + "name": "request_timeout", + "override_skip": false, "placeholder": "", - "required": true, + "required": false, "show": true, "title_case": false, - "type": "code", - "value": "from langflow.base.io.text import TextComponent\r\nfrom langflow.io import MultilineInput, Output, SecretStrInput\r\nfrom langflow.schema.message import Message\r\n\r\n\r\nclass SecretInputComponent(TextComponent):\r\n display_name = \"Secret Input\"\r\n description = \"Allows the selection of a secret to be generated as output..\"\r\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\r\n icon = \"type\"\r\n name = \"SecretInput\"\r\n\r\n inputs = [\r\n SecretStrInput(\r\n name=\"input_value\",\r\n display_name=\"Secret\",\r\n info=\"Secret to be passed as input.\",\r\n ),\r\n ]\r\n outputs = [\r\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\r\n ]\r\n\r\n def text_response(self) -> Message:\r\n return Message(\r\n text=self.input_value,\r\n )\r\n" + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "float", + "value": "" }, - "input_value": { - "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "Secret", + "show_progress_bar": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Show Progress Bar", "dynamic": false, - "info": "Secret to be passed as input.", - "input_types": [], - "load_from_db": true, - "name": "input_value", - "password": true, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "show_progress_bar", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, - "type": "str", - "value": "OWNER_NAME" + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": false + }, + "truncate_input_tokens": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Truncate Input Tokens", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "truncate_input_tokens", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 200 } }, "tool_mode": false }, "showNode": true, - "type": "SecretInput" + "type": "EmbeddingModel" }, "dragging": false, - "id": "SecretInput-aoBVB", + "id": "EmbeddingModel-muH88", "measured": { - "height": 220, + "height": 207, "width": 320 }, "position": { - "x": 1956.6284934755163, - "y": 1161.8450254281008 + "x": 1699.406784507022, + "y": 2056.210282680243 }, "selected": false, "type": "genericNode" @@ -5079,7 +5306,7 @@ "data": { "description": "Generate embeddings using a specified provider.", "display_name": "Embedding Model", - "id": "EmbeddingModel-muH88", + "id": "EmbeddingModel-Rp0iI", "node": { "base_classes": [ "Embeddings" @@ -5090,14 +5317,12 @@ "description": "Generate embeddings using a specified provider.", "display_name": "Embedding Model", "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": true, + "edited": false, "field_order": [ - "provider", - "api_base", - "ollama_base_url", - "base_url_ibm_watsonx", "model", "api_key", + "api_base", + "base_url_ibm_watsonx", "project_id", "dimensions", "chunk_size", @@ -5106,47 +5331,27 @@ "show_progress_bar", "model_kwargs", "truncate_input_tokens", - "input_text", - "fail_safe_mode" + "input_text" ], "frozen": false, "icon": "binary", - "last_updated": "2025-12-12T20:28:32.529Z", + "last_updated": "2026-02-27T18:48:40.046Z", "legacy": false, + "lf_version": "1.8.0", "metadata": { - "code_hash": "0e2d6fe67a26", + "code_hash": "c5ce0982da48", "dependencies": { "dependencies": [ - { - "name": "requests", - "version": "2.32.5" - }, - { - "name": "ibm_watsonx_ai", - "version": "1.4.2" - }, - { - "name": "langchain_openai", - "version": "0.3.23" - }, { "name": "lfx", - "version": "0.2.0.dev21" - }, - { - "name": "langchain_ollama", - "version": "0.3.10" - }, - { - "name": "langchain_community", - "version": "0.3.21" + "version": null }, { - "name": "langchain_ibm", - "version": "0.3.19" + "name": "langchain_core", + "version": "0.3.83" } ], - "total_dependencies": 7 + "total_dependencies": 2 }, "module": "custom_components.embedding_model" }, @@ -5178,7 +5383,7 @@ "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" }, "_frontend_node_folder_id": { - "value": "2bee9dd9-f030-469f-a568-6fcb3a6e7140" + "value": "7a54b33c-ad03-4c78-b61b-f95adbb74e17" }, "_type": "Component", "api_base": { @@ -5197,7 +5402,7 @@ "override_skip": false, "placeholder": "", "required": false, - "show": false, + "show": true, "title_case": false, "tool_mode": false, "trace_as_input": true, @@ -5208,23 +5413,23 @@ }, "api_key": { "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "API Key (Optional)", + "advanced": true, + "display_name": "API Key", "dynamic": false, "info": "Model Provider API key", "input_types": [], - "load_from_db": true, + "load_from_db": false, "name": "api_key", "override_skip": false, "password": true, "placeholder": "", "real_time_refresh": true, "required": false, - "show": false, + "show": true, "title_case": false, "track_in_telemetry": false, "type": "str", - "value": "OPENAI_API_KEY" + "value": "" }, "base_url_ibm_watsonx": { "_input_type": "DropdownInput", @@ -5294,7 +5499,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"fail_safe_mode\",\n display_name=\"Fail-Safe Mode\",\n value=False,\n advanced=True,\n info=\"When enabled, errors will be logged instead of raising exceptions. \"\n \"The component will return None on error.\",\n real_time_refresh=True,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n async def fetch_ollama_models(self) -> list[str]:\n try:\n return await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n except Exception: # noqa: BLE001\n\n logger.exception(\"Error fetching models\")\n return []\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize OpenAI embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n try:\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await self.fetch_ollama_models()\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize Ollama embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to authenticate with IBM watsonx.ai: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n msg = f\"Unknown provider: {provider}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n # Handle fail_safe_mode changes first - set all required fields to False if enabled\n if field_name == \"fail_safe_mode\":\n if field_value: # If fail_safe_mode is enabled\n build_config[\"api_key\"][\"required\"] = False\n elif hasattr(self, \"provider\"):\n # If fail_safe_mode is disabled, restore required flags based on provider\n if self.provider in [\"OpenAI\", \"IBM watsonx.ai\"]:\n build_config[\"api_key\"][\"required\"] = True\n else: # Ollama\n build_config[\"api_key\"][\"required\"] = False\n\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.unified_models import (\n get_api_key_for_provider,\n get_embedding_class,\n get_embedding_model_options,\n get_unified_models_detailed,\n update_model_options_in_build_config,\n)\nfrom lfx.base.models.watsonx_constants import IBM_WATSONX_URLS\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n ModelInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n \"\"\"Dynamically update build config with user-filtered model options.\"\"\"\n # Update model options\n build_config = update_model_options_in_build_config(\n component=self,\n build_config=build_config,\n cache_key_prefix=\"embedding_model_options\",\n get_options_func=get_embedding_model_options,\n field_name=field_name,\n field_value=field_value,\n )\n\n # Show/hide provider-specific fields based on selected model\n if field_name == \"model\" and isinstance(field_value, list) and len(field_value) > 0:\n selected_model = field_value[0]\n provider = selected_model.get(\"provider\", \"\")\n\n # Show/hide watsonx fields\n is_watsonx = provider == \"IBM WatsonX\"\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = is_watsonx\n build_config[\"project_id\"][\"show\"] = is_watsonx\n build_config[\"truncate_input_tokens\"][\"show\"] = is_watsonx\n build_config[\"input_text\"][\"show\"] = is_watsonx\n if is_watsonx:\n build_config[\"base_url_ibm_watsonx\"][\"required\"] = True\n build_config[\"project_id\"][\"required\"] = True\n\n return build_config\n\n inputs = [\n ModelInput(\n name=\"model\",\n display_name=\"Embedding Model\",\n info=\"Select your model provider\",\n real_time_refresh=True,\n required=True,\n model_type=\"embedding\",\n input_types=[\"Embeddings\"], # Override default to accept Embeddings instead of LanguageModel\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"Model Provider API key\",\n real_time_refresh=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n # Watson-specific inputs\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n advanced=True,\n value=1000,\n ),\n FloatInput(\n name=\"request_timeout\",\n display_name=\"Request Timeout\",\n advanced=True,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n advanced=True,\n value=3,\n ),\n BoolInput(\n name=\"show_progress_bar\",\n display_name=\"Show Progress Bar\",\n advanced=True,\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n \"\"\"Build and return an embeddings instance based on the selected model.\n\n Returns an EmbeddingsWithModels wrapper that contains:\n - The primary embedding instance (for the selected model)\n - available_models dict mapping all available model names to their instances\n \"\"\"\n # If an Embeddings object is directly connected, return it\n try:\n from langchain_core.embeddings import Embeddings as BaseEmbeddings\n\n if isinstance(self.model, BaseEmbeddings):\n return self.model\n except ImportError:\n pass\n\n # Safely extract model configuration\n if not self.model or not isinstance(self.model, list):\n msg = \"Model must be a non-empty list\"\n raise ValueError(msg)\n\n model = self.model[0]\n model_name = model.get(\"name\")\n provider = model.get(\"provider\")\n metadata = model.get(\"metadata\", {})\n\n # Get API key from user input or global variables\n api_key = get_api_key_for_provider(self.user_id, provider, self.api_key)\n\n # Validate required fields (Ollama doesn't require API key)\n if not api_key and provider != \"Ollama\":\n msg = (\n f\"{provider} API key is required. \"\n f\"Please provide it in the component or configure it globally as \"\n f\"{provider.upper().replace(' ', '_')}_API_KEY.\"\n )\n raise ValueError(msg)\n\n if not model_name:\n msg = \"Model name is required\"\n raise ValueError(msg)\n\n # Get embedding class\n embedding_class_name = metadata.get(\"embedding_class\")\n if not embedding_class_name:\n msg = f\"No embedding class defined in metadata for {model_name}\"\n raise ValueError(msg)\n\n embedding_class = get_embedding_class(embedding_class_name)\n\n # Build kwargs using parameter mapping for primary instance\n kwargs = self._build_kwargs(model, metadata)\n primary_instance = embedding_class(**kwargs)\n\n # Get all available embedding models for this provider\n available_models_dict = self._build_available_models(\n provider=provider,\n embedding_class=embedding_class,\n metadata=metadata,\n api_key=api_key,\n )\n\n # Wrap with EmbeddingsWithModels to provide available_models metadata\n return EmbeddingsWithModels(\n embeddings=primary_instance,\n available_models=available_models_dict,\n )\n\n def _build_available_models(\n self,\n provider: str,\n embedding_class: type,\n metadata: dict[str, Any],\n api_key: str | None,\n ) -> dict[str, Embeddings]:\n \"\"\"Build a dictionary of all available embedding model instances for the provider.\n\n Args:\n provider: The provider name (e.g., \"OpenAI\", \"Ollama\")\n embedding_class: The embedding class to instantiate\n metadata: Metadata containing param_mapping\n api_key: The API key for the provider\n\n Returns:\n Dict mapping model names to their embedding instances\n \"\"\"\n available_models_dict: dict[str, Embeddings] = {}\n\n # Get all embedding models for this provider from unified models\n all_embedding_models = get_unified_models_detailed(\n providers=[provider],\n model_type=\"embeddings\",\n include_deprecated=False,\n include_unsupported=False,\n )\n\n if not all_embedding_models:\n return available_models_dict\n\n # Extract models from the provider data\n for provider_data in all_embedding_models:\n if provider_data.get(\"provider\") != provider:\n continue\n\n for model_data in provider_data.get(\"models\", []):\n model_name = model_data.get(\"model_name\")\n if not model_name:\n continue\n\n # Create a model dict compatible with _build_kwargs\n model_dict = {\n \"name\": model_name,\n \"provider\": provider,\n \"metadata\": metadata, # Reuse the same metadata/param_mapping\n }\n\n try:\n # Build kwargs for this model\n model_kwargs = self._build_kwargs_for_model(model_dict, metadata, api_key)\n # Create the embedding instance\n available_models_dict[model_name] = embedding_class(**model_kwargs)\n except Exception: # noqa: BLE001\n # Skip models that fail to instantiate\n # This handles cases where specific models have incompatible parameters\n logger.debug(\"Failed to instantiate embedding model %s: skipping\", model_name, exc_info=True)\n continue\n\n return available_models_dict\n\n def _build_kwargs_for_model(\n self,\n model: dict[str, Any],\n metadata: dict[str, Any],\n api_key: str | None,\n ) -> dict[str, Any]:\n \"\"\"Build kwargs dictionary for a specific model using parameter mapping.\n\n This is similar to _build_kwargs but uses the provided api_key directly\n instead of looking it up again.\n\n Args:\n model: Model dict with name and provider\n metadata: Metadata containing param_mapping\n api_key: The API key to use\n\n Returns:\n kwargs dict for embedding class instantiation\n \"\"\"\n param_mapping = metadata.get(\"param_mapping\", {})\n if not param_mapping:\n msg = \"Parameter mapping not found in metadata\"\n raise ValueError(msg)\n\n kwargs = {}\n provider = model.get(\"provider\")\n\n # Required parameters - handle both \"model\" and \"model_id\" (for watsonx)\n if \"model\" in param_mapping:\n kwargs[param_mapping[\"model\"]] = model.get(\"name\")\n elif \"model_id\" in param_mapping:\n kwargs[param_mapping[\"model_id\"]] = model.get(\"name\")\n\n # Add API key if mapped\n if \"api_key\" in param_mapping and api_key:\n kwargs[param_mapping[\"api_key\"]] = api_key\n\n # Optional parameters with their values\n optional_params = {\n \"api_base\": self.api_base if self.api_base else None,\n \"dimensions\": int(self.dimensions) if self.dimensions else None,\n \"chunk_size\": int(self.chunk_size) if self.chunk_size else None,\n \"request_timeout\": float(self.request_timeout) if self.request_timeout else None,\n \"max_retries\": int(self.max_retries) if self.max_retries else None,\n \"show_progress_bar\": self.show_progress_bar if hasattr(self, \"show_progress_bar\") else None,\n \"model_kwargs\": self.model_kwargs if self.model_kwargs else None,\n }\n\n # Watson-specific parameters\n if provider in {\"IBM WatsonX\", \"IBM watsonx.ai\"}:\n # Map base_url_ibm_watsonx to \"url\" parameter for watsonx\n if \"url\" in param_mapping:\n url_value = (\n self.base_url_ibm_watsonx\n if hasattr(self, \"base_url_ibm_watsonx\") and self.base_url_ibm_watsonx\n else \"https://us-south.ml.cloud.ibm.com\"\n )\n kwargs[param_mapping[\"url\"]] = url_value\n # Map project_id for watsonx\n if hasattr(self, \"project_id\") and self.project_id and \"project_id\" in param_mapping:\n kwargs[param_mapping[\"project_id\"]] = self.project_id\n\n # Ollama-specific parameters\n if provider == \"Ollama\" and \"base_url\" in param_mapping:\n # Map api_base to \"base_url\" parameter for Ollama\n base_url_value = self.api_base if hasattr(self, \"api_base\") and self.api_base else \"http://localhost:11434\"\n kwargs[param_mapping[\"base_url\"]] = base_url_value\n\n # Add optional parameters if they have values and are mapped\n for param_name, param_value in optional_params.items():\n if param_value is not None and param_name in param_mapping:\n # Special handling for request_timeout with Google provider\n if param_name == \"request_timeout\":\n if provider == \"Google Generative AI\" and isinstance(param_value, (int, float)):\n kwargs[param_mapping[param_name]] = {\"timeout\": param_value}\n else:\n kwargs[param_mapping[param_name]] = param_value\n else:\n kwargs[param_mapping[param_name]] = param_value\n\n return kwargs\n\n def _build_kwargs(self, model: dict[str, Any], metadata: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Build kwargs dictionary using parameter mapping.\"\"\"\n param_mapping = metadata.get(\"param_mapping\", {})\n if not param_mapping:\n msg = \"Parameter mapping not found in metadata\"\n raise ValueError(msg)\n\n kwargs = {}\n\n # Required parameters - handle both \"model\" and \"model_id\" (for watsonx)\n if \"model\" in param_mapping:\n kwargs[param_mapping[\"model\"]] = model.get(\"name\")\n elif \"model_id\" in param_mapping:\n kwargs[param_mapping[\"model_id\"]] = model.get(\"name\")\n if \"api_key\" in param_mapping:\n kwargs[param_mapping[\"api_key\"]] = get_api_key_for_provider(\n self.user_id,\n model.get(\"provider\"),\n self.api_key,\n )\n\n # Optional parameters with their values\n provider = model.get(\"provider\")\n optional_params = {\n \"api_base\": self.api_base if self.api_base else None,\n \"dimensions\": int(self.dimensions) if self.dimensions else None,\n \"chunk_size\": int(self.chunk_size) if self.chunk_size else None,\n \"request_timeout\": float(self.request_timeout) if self.request_timeout else None,\n \"max_retries\": int(self.max_retries) if self.max_retries else None,\n \"show_progress_bar\": self.show_progress_bar if hasattr(self, \"show_progress_bar\") else None,\n \"model_kwargs\": self.model_kwargs if self.model_kwargs else None,\n }\n\n # Watson-specific parameters\n if provider in {\"IBM WatsonX\", \"IBM watsonx.ai\"}:\n # Map base_url_ibm_watsonx to \"url\" parameter for watsonx\n if \"url\" in param_mapping:\n url_value = (\n self.base_url_ibm_watsonx\n if hasattr(self, \"base_url_ibm_watsonx\") and self.base_url_ibm_watsonx\n else \"https://us-south.ml.cloud.ibm.com\"\n )\n kwargs[param_mapping[\"url\"]] = url_value\n # Map project_id for watsonx\n if hasattr(self, \"project_id\") and self.project_id and \"project_id\" in param_mapping:\n kwargs[param_mapping[\"project_id\"]] = self.project_id\n\n # Ollama-specific parameters\n if provider == \"Ollama\" and \"base_url\" in param_mapping:\n # Map api_base to \"base_url\" parameter for Ollama\n base_url_value = self.api_base if hasattr(self, \"api_base\") and self.api_base else \"http://localhost:11434\"\n kwargs[param_mapping[\"base_url\"]] = base_url_value\n\n # Add optional parameters if they have values and are mapped\n for param_name, param_value in optional_params.items():\n if param_value is not None and param_name in param_mapping:\n # Special handling for request_timeout with Google provider\n if param_name == \"request_timeout\":\n if provider == \"Google Generative AI\" and isinstance(param_value, (int, float)):\n kwargs[param_mapping[param_name]] = {\"timeout\": param_value}\n else:\n kwargs[param_mapping[param_name]] = param_value\n else:\n kwargs[param_mapping[param_name]] = param_value\n\n return kwargs\n" }, "dimensions": { "_input_type": "IntInput", @@ -5304,6 +5509,7 @@ "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", "list": false, "list_add_label": "Add More", + "load_from_db": false, "name": "dimensions", "override_skip": false, "placeholder": "", @@ -5316,27 +5522,6 @@ "type": "int", "value": "" }, - "fail_safe_mode": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Fail-Safe Mode", - "dynamic": false, - "info": "When enabled, errors will be logged instead of raising exceptions. The component will return None on error.", - "list": false, - "list_add_label": "Add More", - "name": "fail_safe_mode", - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "bool", - "value": true - }, "input_text": { "_input_type": "BoolInput", "advanced": true, @@ -5357,7 +5542,7 @@ "type": "bool", "value": true }, - "is_refresh": false, + "is_refresh": true, "max_retries": { "_input_type": "IntInput", "advanced": true, @@ -5379,35 +5564,148 @@ "value": 3 }, "model": { - "_input_type": "DropdownInput", + "_input_type": "ModelInput", "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Name", + "display_name": "Embedding Model", "dynamic": false, - "external_options": {}, - "info": "Select the embedding model to use", - "load_from_db": false, + "external_options": { + "fields": { + "data": { + "node": { + "display_name": "Connect other models", + "icon": "CornerDownLeft", + "name": "connect_other_models" + } + } + } + }, + "info": "Select your model provider", + "input_types": [ + "Embeddings" + ], + "list": false, + "list_add_label": "Add More", + "model_type": "embedding", "name": "model", "options": [ - "embeddinggemma:latest", - "mxbai-embed-large:latest", - "nomic-embed-text:latest" + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-small", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-large", + "provider": "OpenAI" + }, + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-ada-002", + "provider": "OpenAI" + }, + { + "category": "Google Generative AI", + "icon": "GoogleGenerativeAI", + "metadata": { + "is_disabled_provider": true, + "variable_name": null + }, + "name": "__enable_provider_Google Generative AI__", + "provider": "Google Generative AI" + }, + { + "category": "IBM WatsonX", + "icon": "WatsonxAI", + "metadata": { + "is_disabled_provider": true, + "variable_name": null + }, + "name": "__enable_provider_IBM WatsonX__", + "provider": "IBM WatsonX" + } ], - "options_metadata": [], "override_skip": false, - "placeholder": "", + "placeholder": "Setup Provider", "real_time_refresh": true, "refresh_button": true, - "required": false, + "required": true, "show": true, "title_case": false, - "toggle": false, "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "embeddinggemma:latest" + "trace_as_input": true, + "track_in_telemetry": false, + "type": "model", + "value": [ + { + "category": "OpenAI", + "icon": "OpenAI", + "metadata": { + "embedding_class": "OpenAIEmbeddings", + "model_type": "embeddings", + "param_mapping": { + "api_base": "base_url", + "api_key": "api_key", + "chunk_size": "chunk_size", + "dimensions": "dimensions", + "max_retries": "max_retries", + "model": "model", + "model_kwargs": "model_kwargs", + "request_timeout": "timeout", + "show_progress_bar": "show_progress_bar" + } + }, + "name": "text-embedding-3-small", + "provider": "OpenAI" + } + ] }, "model_kwargs": { "_input_type": "DictInput", @@ -5429,32 +5727,6 @@ "type": "dict", "value": {} }, - "ollama_base_url": { - "_input_type": "MessageTextInput", - "advanced": false, - "display_name": "Ollama API URL", - "dynamic": false, - "info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": true, - "name": "ollama_base_url", - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "OLLAMA_BASE_URL" - }, "project_id": { "_input_type": "MessageTextInput", "advanced": false, @@ -5466,7 +5738,7 @@ ], "list": false, "list_add_label": "Add More", - "load_from_db": false, + "load_from_db": true, "name": "project_id", "override_skip": false, "placeholder": "", @@ -5478,50 +5750,7 @@ "trace_as_metadata": true, "track_in_telemetry": false, "type": "str", - "value": "" - }, - "provider": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Provider", - "dynamic": false, - "external_options": {}, - "info": "Select the embedding model provider", - "load_from_db": false, - "name": "provider", - "options": [ - "OpenAI", - "Ollama", - "IBM watsonx.ai" - ], - "options_metadata": [ - { - "icon": "OpenAI" - }, - { - "icon": "Ollama" - }, - { - "icon": "WatsonxAI" - } - ], - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "selected_metadata": { - "icon": "Ollama" - }, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "Ollama" + "value": "WATSONX_PROJECT_ID" }, "request_timeout": { "_input_type": "FloatInput", @@ -5590,92 +5819,69 @@ "type": "EmbeddingModel" }, "dragging": false, - "id": "EmbeddingModel-muH88", + "id": "EmbeddingModel-Rp0iI", "measured": { - "height": 369, + "height": 207, "width": 320 }, "position": { - "x": 1699.406784507022, - "y": 2056.210282680243 + "x": 1333.0572399196485, + "y": 2065.5678700260246 }, "selected": false, "type": "genericNode" }, { "data": { - "description": "Generate embeddings using a specified provider.", - "display_name": "Embedding Model", - "id": "EmbeddingModel-Rp0iI", + "description": "Perform various operations on a DataFrame.", + "display_name": "DataFrame Operations", + "id": "DataFrameOperations-NpdW5", "node": { "base_classes": [ - "Embeddings" + "DataFrame" ], "beta": false, "conditional_paths": [], "custom_fields": {}, - "description": "Generate embeddings using a specified provider.", - "display_name": "Embedding Model", - "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": true, + "description": "Perform various operations on a DataFrame.", + "display_name": "DataFrame Operations", + "documentation": "https://docs.langflow.org/dataframe-operations", + "edited": false, "field_order": [ - "provider", - "api_base", - "ollama_base_url", - "base_url_ibm_watsonx", - "model", - "api_key", - "project_id", - "dimensions", - "chunk_size", - "request_timeout", - "max_retries", - "show_progress_bar", - "model_kwargs", - "truncate_input_tokens", - "input_text", - "fail_safe_mode" + "df", + "operation", + "column_name", + "filter_value", + "filter_operator", + "ascending", + "new_column_name", + "new_column_value", + "columns_to_select", + "num_rows", + "replace_value", + "replacement_value" ], "frozen": false, - "icon": "binary", - "last_updated": "2025-12-12T20:28:32.529Z", + "icon": "table", + "last_updated": "2025-12-12T20:28:32.649Z", "legacy": false, + "lf_version": "1.8.0", "metadata": { - "code_hash": "0e2d6fe67a26", + "code_hash": "904f4eaebccd", "dependencies": { "dependencies": [ { - "name": "requests", - "version": "2.32.5" - }, - { - "name": "ibm_watsonx_ai", - "version": "1.4.2" - }, - { - "name": "langchain_openai", - "version": "0.3.23" + "name": "pandas", + "version": "2.2.3" }, { "name": "lfx", - "version": "0.2.0.dev21" - }, - { - "name": "langchain_ollama", - "version": "0.3.10" - }, - { - "name": "langchain_community", - "version": "0.3.21" - }, - { - "name": "langchain_ibm", - "version": "0.3.19" + "version": null } ], - "total_dependencies": 7 + "total_dependencies": 2 }, - "module": "custom_components.embedding_model" + "module": "custom_components.dataframe_operations" }, "minimized": false, "output_types": [], @@ -5683,18 +5889,17 @@ { "allows_loop": false, "cache": true, - "display_name": "Embedding Model", + "display_name": "DataFrame", "group_outputs": false, - "hidden": null, "loop_types": null, - "method": "build_embeddings", - "name": "embeddings", + "method": "perform_operation", + "name": "output", "options": null, "required_inputs": null, - "selected": "Embeddings", + "selected": "DataFrame", "tool_mode": true, "types": [ - "Embeddings" + "DataFrame" ], "value": "__UNDEFINED__" } @@ -5708,480 +5913,413 @@ "value": "2bee9dd9-f030-469f-a568-6fcb3a6e7140" }, "_type": "Component", - "api_base": { - "_input_type": "MessageTextInput", + "ascending": { + "_input_type": "BoolInput", + "advanced": false, + "display_name": "Sort Ascending", + "dynamic": true, + "info": "Whether to sort in ascending order.", + "list": false, + "list_add_label": "Add More", + "name": "ascending", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "code": { "advanced": true, - "display_name": "API Base URL", - "dynamic": false, - "info": "Base URL for the API. Leave empty for default.", - "input_types": [ - "Message" - ], + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "import pandas as pd\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.inputs import SortableListInput\nfrom lfx.io import BoolInput, DataFrameInput, DropdownInput, IntInput, MessageTextInput, Output, StrInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dataframe import DataFrame\n\n\nclass DataFrameOperationsComponent(Component):\n display_name = \"DataFrame Operations\"\n description = \"Perform various operations on a DataFrame.\"\n documentation: str = \"https://docs.langflow.org/dataframe-operations\"\n icon = \"table\"\n name = \"DataFrameOperations\"\n\n OPERATION_CHOICES = [\n \"Add Column\",\n \"Drop Column\",\n \"Filter\",\n \"Head\",\n \"Rename Column\",\n \"Replace Value\",\n \"Select Columns\",\n \"Sort\",\n \"Tail\",\n \"Drop Duplicates\",\n ]\n\n inputs = [\n DataFrameInput(\n name=\"df\",\n display_name=\"DataFrame\",\n info=\"The input DataFrame to operate on.\",\n required=True,\n ),\n SortableListInput(\n name=\"operation\",\n display_name=\"Operation\",\n placeholder=\"Select Operation\",\n info=\"Select the DataFrame operation to perform.\",\n options=[\n {\"name\": \"Add Column\", \"icon\": \"plus\"},\n {\"name\": \"Drop Column\", \"icon\": \"minus\"},\n {\"name\": \"Filter\", \"icon\": \"filter\"},\n {\"name\": \"Head\", \"icon\": \"arrow-up\"},\n {\"name\": \"Rename Column\", \"icon\": \"pencil\"},\n {\"name\": \"Replace Value\", \"icon\": \"replace\"},\n {\"name\": \"Select Columns\", \"icon\": \"columns\"},\n {\"name\": \"Sort\", \"icon\": \"arrow-up-down\"},\n {\"name\": \"Tail\", \"icon\": \"arrow-down\"},\n {\"name\": \"Drop Duplicates\", \"icon\": \"copy-x\"},\n ],\n real_time_refresh=True,\n limit=1,\n ),\n StrInput(\n name=\"column_name\",\n display_name=\"Column Name\",\n info=\"The column name to use for the operation.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"filter_value\",\n display_name=\"Filter Value\",\n info=\"The value to filter rows by.\",\n dynamic=True,\n show=False,\n ),\n DropdownInput(\n name=\"filter_operator\",\n display_name=\"Filter Operator\",\n options=[\n \"equals\",\n \"not equals\",\n \"contains\",\n \"not contains\",\n \"starts with\",\n \"ends with\",\n \"greater than\",\n \"less than\",\n ],\n value=\"equals\",\n info=\"The operator to apply for filtering rows.\",\n advanced=False,\n dynamic=True,\n show=False,\n ),\n BoolInput(\n name=\"ascending\",\n display_name=\"Sort Ascending\",\n info=\"Whether to sort in ascending order.\",\n dynamic=True,\n show=False,\n value=True,\n ),\n StrInput(\n name=\"new_column_name\",\n display_name=\"New Column Name\",\n info=\"The new column name when renaming or adding a column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"new_column_value\",\n display_name=\"New Column Value\",\n info=\"The value to populate the new column with.\",\n dynamic=True,\n show=False,\n ),\n StrInput(\n name=\"columns_to_select\",\n display_name=\"Columns to Select\",\n dynamic=True,\n is_list=True,\n show=False,\n ),\n IntInput(\n name=\"num_rows\",\n display_name=\"Number of Rows\",\n info=\"Number of rows to return (for head/tail).\",\n dynamic=True,\n show=False,\n value=5,\n ),\n MessageTextInput(\n name=\"replace_value\",\n display_name=\"Value to Replace\",\n info=\"The value to replace in the column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"replacement_value\",\n display_name=\"Replacement Value\",\n info=\"The value to replace with.\",\n dynamic=True,\n show=False,\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"DataFrame\",\n name=\"output\",\n method=\"perform_operation\",\n info=\"The resulting DataFrame after the operation.\",\n )\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n dynamic_fields = [\n \"column_name\",\n \"filter_value\",\n \"filter_operator\",\n \"ascending\",\n \"new_column_name\",\n \"new_column_value\",\n \"columns_to_select\",\n \"num_rows\",\n \"replace_value\",\n \"replacement_value\",\n ]\n for field in dynamic_fields:\n build_config[field][\"show\"] = False\n\n if field_name == \"operation\":\n # Handle SortableListInput format\n if isinstance(field_value, list):\n operation_name = field_value[0].get(\"name\", \"\") if field_value else \"\"\n else:\n operation_name = field_value or \"\"\n\n # If no operation selected, all dynamic fields stay hidden (already set to False above)\n if not operation_name:\n return build_config\n\n if operation_name == \"Filter\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"filter_value\"][\"show\"] = True\n build_config[\"filter_operator\"][\"show\"] = True\n elif operation_name == \"Sort\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"ascending\"][\"show\"] = True\n elif operation_name == \"Drop Column\":\n build_config[\"column_name\"][\"show\"] = True\n elif operation_name == \"Rename Column\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"new_column_name\"][\"show\"] = True\n elif operation_name == \"Add Column\":\n build_config[\"new_column_name\"][\"show\"] = True\n build_config[\"new_column_value\"][\"show\"] = True\n elif operation_name == \"Select Columns\":\n build_config[\"columns_to_select\"][\"show\"] = True\n elif operation_name in {\"Head\", \"Tail\"}:\n build_config[\"num_rows\"][\"show\"] = True\n elif operation_name == \"Replace Value\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"replace_value\"][\"show\"] = True\n build_config[\"replacement_value\"][\"show\"] = True\n elif operation_name == \"Drop Duplicates\":\n build_config[\"column_name\"][\"show\"] = True\n\n return build_config\n\n def perform_operation(self) -> DataFrame:\n df_copy = self.df.copy()\n\n # Handle SortableListInput format for operation\n operation_input = getattr(self, \"operation\", [])\n if isinstance(operation_input, list) and len(operation_input) > 0:\n op = operation_input[0].get(\"name\", \"\")\n else:\n op = \"\"\n\n # If no operation selected, return original DataFrame\n if not op:\n return df_copy\n\n if op == \"Filter\":\n return self.filter_rows_by_value(df_copy)\n if op == \"Sort\":\n return self.sort_by_column(df_copy)\n if op == \"Drop Column\":\n return self.drop_column(df_copy)\n if op == \"Rename Column\":\n return self.rename_column(df_copy)\n if op == \"Add Column\":\n return self.add_column(df_copy)\n if op == \"Select Columns\":\n return self.select_columns(df_copy)\n if op == \"Head\":\n return self.head(df_copy)\n if op == \"Tail\":\n return self.tail(df_copy)\n if op == \"Replace Value\":\n return self.replace_values(df_copy)\n if op == \"Drop Duplicates\":\n return self.drop_duplicates(df_copy)\n msg = f\"Unsupported operation: {op}\"\n logger.error(msg)\n raise ValueError(msg)\n\n def filter_rows_by_value(self, df: DataFrame) -> DataFrame:\n column = df[self.column_name]\n filter_value = self.filter_value\n\n # Handle regular DropdownInput format (just a string value)\n operator = getattr(self, \"filter_operator\", \"equals\") # Default to equals for backward compatibility\n\n if operator == \"equals\":\n mask = column == filter_value\n elif operator == \"not equals\":\n mask = column != filter_value\n elif operator == \"contains\":\n mask = column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"not contains\":\n mask = ~column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"starts with\":\n mask = column.astype(str).str.startswith(str(filter_value), na=False)\n elif operator == \"ends with\":\n mask = column.astype(str).str.endswith(str(filter_value), na=False)\n elif operator == \"greater than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column > numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) > str(filter_value)\n elif operator == \"less than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column < numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) < str(filter_value)\n else:\n mask = column == filter_value # Fallback to equals\n\n return DataFrame(df[mask])\n\n def sort_by_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.sort_values(by=self.column_name, ascending=self.ascending))\n\n def drop_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop(columns=[self.column_name]))\n\n def rename_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.rename(columns={self.column_name: self.new_column_name}))\n\n def add_column(self, df: DataFrame) -> DataFrame:\n df[self.new_column_name] = [self.new_column_value] * len(df)\n return DataFrame(df)\n\n def select_columns(self, df: DataFrame) -> DataFrame:\n columns = [col.strip() for col in self.columns_to_select]\n return DataFrame(df[columns])\n\n def head(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.head(self.num_rows))\n\n def tail(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.tail(self.num_rows))\n\n def replace_values(self, df: DataFrame) -> DataFrame:\n df[self.column_name] = df[self.column_name].replace(self.replace_value, self.replacement_value)\n return DataFrame(df)\n\n def drop_duplicates(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop_duplicates(subset=self.column_name))\n" + }, + "column_name": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "Column Name", + "dynamic": true, + "info": "The column name to use for the operation.", "list": false, "list_add_label": "Add More", "load_from_db": false, - "name": "api_base", + "name": "column_name", "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, - "trace_as_input": true, "trace_as_metadata": true, "track_in_telemetry": false, "type": "str", "value": "" }, - "api_key": { - "_input_type": "SecretStrInput", + "columns_to_select": { + "_input_type": "StrInput", "advanced": false, - "display_name": "IBM watsonx.ai API Key", - "dynamic": false, - "info": "Model Provider API key", - "input_types": [], - "load_from_db": true, - "name": "api_key", + "display_name": "Columns to Select", + "dynamic": true, + "info": "", + "list": true, + "list_add_label": "Add More", + "load_from_db": false, + "name": "columns_to_select", "override_skip": false, - "password": true, "placeholder": "", - "real_time_refresh": true, "required": false, "show": true, "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, "track_in_telemetry": false, "type": "str", - "value": "WATSONX_API_KEY" + "value": [ + "title" + ] }, - "base_url_ibm_watsonx": { - "_input_type": "DropdownInput", + "df": { + "_input_type": "DataFrameInput", "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "watsonx API Endpoint", + "display_name": "DataFrame", "dynamic": false, - "external_options": {}, - "info": "The base URL of the API (IBM watsonx.ai only)", - "name": "base_url_ibm_watsonx", - "options": [ - "https://us-south.ml.cloud.ibm.com", - "https://eu-de.ml.cloud.ibm.com", - "https://eu-gb.ml.cloud.ibm.com", - "https://au-syd.ml.cloud.ibm.com", - "https://jp-tok.ml.cloud.ibm.com", - "https://ca-tor.ml.cloud.ibm.com" + "info": "The input DataFrame to operate on.", + "input_types": [ + "DataFrame" ], - "options_metadata": [], - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "https://us-south.ml.cloud.ibm.com" - }, - "chunk_size": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "Chunk Size", - "dynamic": false, - "info": "", "list": false, "list_add_label": "Add More", - "name": "chunk_size", + "name": "df", "override_skip": false, "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "int", - "value": 1000 - }, - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", "required": true, "show": true, "title_case": false, - "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"fail_safe_mode\",\n display_name=\"Fail-Safe Mode\",\n value=False,\n advanced=True,\n info=\"When enabled, errors will be logged instead of raising exceptions. \"\n \"The component will return None on error.\",\n real_time_refresh=True,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n async def fetch_ollama_models(self) -> list[str]:\n try:\n return await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n except Exception: # noqa: BLE001\n\n logger.exception(\"Error fetching models\")\n return []\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize OpenAI embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n try:\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await self.fetch_ollama_models()\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize Ollama embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to authenticate with IBM watsonx.ai: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n msg = f\"Unknown provider: {provider}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n # Handle fail_safe_mode changes first - set all required fields to False if enabled\n if field_name == \"fail_safe_mode\":\n if field_value: # If fail_safe_mode is enabled\n build_config[\"api_key\"][\"required\"] = False\n elif hasattr(self, \"provider\"):\n # If fail_safe_mode is disabled, restore required flags based on provider\n if self.provider in [\"OpenAI\", \"IBM watsonx.ai\"]:\n build_config[\"api_key\"][\"required\"] = True\n else: # Ollama\n build_config[\"api_key\"][\"required\"] = False\n\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" - }, - "dimensions": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "Dimensions", - "dynamic": false, - "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", - "list": false, - "list_add_label": "Add More", - "name": "dimensions", - "override_skip": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, "tool_mode": false, + "trace_as_input": true, "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "int", + "track_in_telemetry": false, + "type": "other", "value": "" }, - "fail_safe_mode": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Fail-Safe Mode", - "dynamic": false, - "info": "When enabled, errors will be logged instead of raising exceptions. The component will return None on error.", - "list": false, - "list_add_label": "Add More", - "name": "fail_safe_mode", - "override_skip": false, - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "bool", - "value": true - }, - "input_text": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Include the original text in the output", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "name": "input_text", - "override_skip": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "bool", - "value": true - }, - "is_refresh": false, - "max_retries": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "Max Retries", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "name": "max_retries", - "override_skip": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "int", - "value": 3 - }, - "model": { + "filter_operator": { "_input_type": "DropdownInput", "advanced": false, "combobox": false, "dialog_inputs": {}, - "display_name": "Model Name", - "dynamic": false, + "display_name": "Filter Operator", + "dynamic": true, "external_options": {}, - "info": "Select the embedding model to use", - "load_from_db": false, - "name": "model", + "info": "The operator to apply for filtering rows.", + "name": "filter_operator", "options": [ - "ibm/granite-embedding-278m-multilingual", - "ibm/slate-125m-english-rtrvr-v2", - "ibm/slate-30m-english-rtrvr-v2", - "intfloat/multilingual-e5-large", - "sentence-transformers/all-minilm-l6-v2" + "equals", + "not equals", + "contains", + "not contains", + "starts with", + "ends with", + "greater than", + "less than" ], "options_metadata": [], "override_skip": false, "placeholder": "", - "real_time_refresh": true, - "refresh_button": true, "required": false, - "show": true, + "show": false, "title_case": false, "toggle": false, "tool_mode": false, "trace_as_metadata": true, "track_in_telemetry": true, "type": "str", - "value": "ibm/granite-embedding-278m-multilingual" + "value": "equals" }, - "model_kwargs": { - "_input_type": "DictInput", - "advanced": true, - "display_name": "Model Kwargs", - "dynamic": false, - "info": "Additional keyword arguments to pass to the model.", + "filter_value": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "Filter Value", + "dynamic": true, + "info": "The value to filter rows by.", + "input_types": [ + "Message" + ], "list": false, "list_add_label": "Add More", - "name": "model_kwargs", + "load_from_db": false, + "name": "filter_value", "override_skip": false, "placeholder": "", "required": false, - "show": true, + "show": false, "title_case": false, "tool_mode": false, "trace_as_input": true, + "trace_as_metadata": true, "track_in_telemetry": false, - "type": "dict", - "value": {} + "type": "str", + "value": "" }, - "ollama_base_url": { - "_input_type": "MessageTextInput", + "is_refresh": false, + "new_column_name": { + "_input_type": "StrInput", "advanced": false, - "display_name": "Ollama API URL", - "dynamic": false, - "info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434", - "input_types": [ - "Message" - ], + "display_name": "New Column Name", + "dynamic": true, + "info": "The new column name when renaming or adding a column.", "list": false, "list_add_label": "Add More", - "load_from_db": true, - "name": "ollama_base_url", + "load_from_db": false, + "name": "new_column_name", "override_skip": false, "placeholder": "", - "real_time_refresh": true, "required": false, "show": false, "title_case": false, "tool_mode": false, - "trace_as_input": true, "trace_as_metadata": true, "track_in_telemetry": false, "type": "str", - "value": "OLLAMA_BASE_URL" + "value": "filename" }, - "project_id": { + "new_column_value": { "_input_type": "MessageTextInput", "advanced": false, - "display_name": "Project ID", - "dynamic": false, - "info": "IBM watsonx.ai Project ID (required for IBM watsonx.ai)", + "display_name": "New Column Value", + "dynamic": true, + "info": "The value to populate the new column with.", "input_types": [ "Message" ], "list": false, "list_add_label": "Add More", - "load_from_db": true, - "name": "project_id", + "load_from_db": false, + "name": "new_column_value", "override_skip": false, "placeholder": "", "required": false, - "show": true, + "show": false, "title_case": false, "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, "track_in_telemetry": false, "type": "str", - "value": "WATSONX_PROJECT_ID" + "value": "" }, - "provider": { - "_input_type": "DropdownInput", + "num_rows": { + "_input_type": "IntInput", "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Provider", - "dynamic": false, - "external_options": {}, - "info": "Select the embedding model provider", - "load_from_db": false, - "name": "provider", - "options": [ - "OpenAI", - "Ollama", - "IBM watsonx.ai" - ], - "options_metadata": [ - { - "icon": "OpenAI" - }, - { - "icon": "Ollama" - }, - { - "icon": "WatsonxAI" - } - ], + "display_name": "Number of Rows", + "dynamic": true, + "info": "Number of rows to return (for head/tail).", + "list": false, + "list_add_label": "Add More", + "name": "num_rows", "override_skip": false, "placeholder": "", - "real_time_refresh": true, "required": false, - "selected_metadata": { - "icon": "WatsonxAI" - }, - "show": true, + "show": false, "title_case": false, - "toggle": false, "tool_mode": false, "trace_as_metadata": true, "track_in_telemetry": true, - "type": "str", - "value": "IBM watsonx.ai" + "type": "int", + "value": 5 }, - "request_timeout": { - "_input_type": "FloatInput", - "advanced": true, - "display_name": "Request Timeout", + "operation": { + "_input_type": "SortableListInput", + "advanced": false, + "display_name": "Operation", "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "name": "request_timeout", + "info": "Select the DataFrame operation to perform.", + "limit": 1, + "load_from_db": false, + "name": "operation", + "options": [ + { + "icon": "plus", + "name": "Add Column" + }, + { + "icon": "minus", + "name": "Drop Column" + }, + { + "icon": "filter", + "name": "Filter" + }, + { + "icon": "arrow-up", + "name": "Head" + }, + { + "icon": "pencil", + "name": "Rename Column" + }, + { + "icon": "replace", + "name": "Replace Value" + }, + { + "icon": "columns", + "name": "Select Columns" + }, + { + "icon": "arrow-up-down", + "name": "Sort" + }, + { + "icon": "arrow-down", + "name": "Tail" + }, + { + "icon": "copy-x", + "name": "Drop Duplicates" + } + ], "override_skip": false, - "placeholder": "", + "placeholder": "Select Operation", + "real_time_refresh": true, "required": false, + "search_category": [], "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "float", - "value": "" + "track_in_telemetry": false, + "type": "sortableList", + "value": [ + { + "chosen": false, + "icon": "columns", + "name": "Select Columns", + "selected": false + } + ] }, - "show_progress_bar": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Show Progress Bar", - "dynamic": false, - "info": "", + "replace_value": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "Value to Replace", + "dynamic": true, + "info": "The value to replace in the column.", + "input_types": [ + "Message" + ], "list": false, "list_add_label": "Add More", - "name": "show_progress_bar", + "load_from_db": false, + "name": "replace_value", "override_skip": false, "placeholder": "", "required": false, - "show": true, + "show": false, "title_case": false, "tool_mode": false, + "trace_as_input": true, "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "bool", - "value": false + "track_in_telemetry": false, + "type": "str", + "value": "" }, - "truncate_input_tokens": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "Truncate Input Tokens", - "dynamic": false, - "info": "", + "replacement_value": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "Replacement Value", + "dynamic": true, + "info": "The value to replace with.", + "input_types": [ + "Message" + ], "list": false, "list_add_label": "Add More", - "name": "truncate_input_tokens", + "load_from_db": false, + "name": "replacement_value", "override_skip": false, "placeholder": "", "required": false, - "show": true, + "show": false, "title_case": false, "tool_mode": false, + "trace_as_input": true, "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "int", - "value": 200 + "track_in_telemetry": false, + "type": "str", + "value": "" } }, "tool_mode": false }, "showNode": true, - "type": "EmbeddingModel" + "type": "DataFrameOperations" }, "dragging": false, - "id": "EmbeddingModel-Rp0iI", + "id": "DataFrameOperations-NpdW5", "measured": { - "height": 534, + "height": 319, "width": 320 }, "position": { - "x": 1333.0572399196485, - "y": 2065.5678700260246 + "x": 822.8496956638812, + "y": 1640.105931333355 }, "selected": false, "type": "genericNode" }, { "data": { - "description": "Perform various operations on a DataFrame.", - "display_name": "DataFrame Operations", - "id": "DataFrameOperations-NpdW5", + "id": "ParserComponent-1eim1", "node": { "base_classes": [ - "DataFrame" + "Message" ], "beta": false, "conditional_paths": [], "custom_fields": {}, - "description": "Perform various operations on a DataFrame.", - "display_name": "DataFrame Operations", - "documentation": "https://docs.langflow.org/dataframe-operations", + "description": "Extracts text using a template.", + "display_name": "Parser", + "documentation": "https://docs.langflow.org/parser", "edited": false, "field_order": [ - "df", - "operation", - "column_name", - "filter_value", - "filter_operator", - "ascending", - "new_column_name", - "new_column_value", - "columns_to_select", - "num_rows", - "replace_value", - "replacement_value" + "input_data", + "mode", + "pattern", + "sep" ], "frozen": false, - "icon": "table", - "last_updated": "2025-12-12T20:28:32.649Z", + "icon": "braces", "legacy": false, - "lf_version": "1.7.0.dev21", + "lf_version": "1.8.0", "metadata": { - "code_hash": "904f4eaebccd", + "code_hash": "3cda25c3f7b5", "dependencies": { "dependencies": [ - { - "name": "pandas", - "version": "2.2.3" - }, { "name": "lfx", - "version": null + "version": "0.2.0.dev21" } ], - "total_dependencies": 2 + "total_dependencies": 1 }, - "module": "custom_components.dataframe_operations" + "module": "lfx.components.processing.parser.ParserComponent" }, "minimized": false, "output_types": [], @@ -6189,50 +6327,21 @@ { "allows_loop": false, "cache": true, - "display_name": "DataFrame", + "display_name": "Parsed Text", "group_outputs": false, - "loop_types": null, - "method": "perform_operation", - "name": "output", - "options": null, - "required_inputs": null, - "selected": "DataFrame", + "method": "parse_combined_text", + "name": "parsed_text", + "selected": "Message", "tool_mode": true, "types": [ - "DataFrame" + "Message" ], "value": "__UNDEFINED__" } ], "pinned": false, "template": { - "_frontend_node_flow_id": { - "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" - }, - "_frontend_node_folder_id": { - "value": "2bee9dd9-f030-469f-a568-6fcb3a6e7140" - }, "_type": "Component", - "ascending": { - "_input_type": "BoolInput", - "advanced": false, - "display_name": "Sort Ascending", - "dynamic": true, - "info": "Whether to sort in ascending order.", - "list": false, - "list_add_label": "Add More", - "name": "ascending", - "override_skip": false, - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "bool", - "value": true - }, "code": { "advanced": true, "dynamic": true, @@ -6249,64 +6358,70 @@ "show": true, "title_case": false, "type": "code", - "value": "import pandas as pd\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.inputs import SortableListInput\nfrom lfx.io import BoolInput, DataFrameInput, DropdownInput, IntInput, MessageTextInput, Output, StrInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dataframe import DataFrame\n\n\nclass DataFrameOperationsComponent(Component):\n display_name = \"DataFrame Operations\"\n description = \"Perform various operations on a DataFrame.\"\n documentation: str = \"https://docs.langflow.org/dataframe-operations\"\n icon = \"table\"\n name = \"DataFrameOperations\"\n\n OPERATION_CHOICES = [\n \"Add Column\",\n \"Drop Column\",\n \"Filter\",\n \"Head\",\n \"Rename Column\",\n \"Replace Value\",\n \"Select Columns\",\n \"Sort\",\n \"Tail\",\n \"Drop Duplicates\",\n ]\n\n inputs = [\n DataFrameInput(\n name=\"df\",\n display_name=\"DataFrame\",\n info=\"The input DataFrame to operate on.\",\n required=True,\n ),\n SortableListInput(\n name=\"operation\",\n display_name=\"Operation\",\n placeholder=\"Select Operation\",\n info=\"Select the DataFrame operation to perform.\",\n options=[\n {\"name\": \"Add Column\", \"icon\": \"plus\"},\n {\"name\": \"Drop Column\", \"icon\": \"minus\"},\n {\"name\": \"Filter\", \"icon\": \"filter\"},\n {\"name\": \"Head\", \"icon\": \"arrow-up\"},\n {\"name\": \"Rename Column\", \"icon\": \"pencil\"},\n {\"name\": \"Replace Value\", \"icon\": \"replace\"},\n {\"name\": \"Select Columns\", \"icon\": \"columns\"},\n {\"name\": \"Sort\", \"icon\": \"arrow-up-down\"},\n {\"name\": \"Tail\", \"icon\": \"arrow-down\"},\n {\"name\": \"Drop Duplicates\", \"icon\": \"copy-x\"},\n ],\n real_time_refresh=True,\n limit=1,\n ),\n StrInput(\n name=\"column_name\",\n display_name=\"Column Name\",\n info=\"The column name to use for the operation.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"filter_value\",\n display_name=\"Filter Value\",\n info=\"The value to filter rows by.\",\n dynamic=True,\n show=False,\n ),\n DropdownInput(\n name=\"filter_operator\",\n display_name=\"Filter Operator\",\n options=[\n \"equals\",\n \"not equals\",\n \"contains\",\n \"not contains\",\n \"starts with\",\n \"ends with\",\n \"greater than\",\n \"less than\",\n ],\n value=\"equals\",\n info=\"The operator to apply for filtering rows.\",\n advanced=False,\n dynamic=True,\n show=False,\n ),\n BoolInput(\n name=\"ascending\",\n display_name=\"Sort Ascending\",\n info=\"Whether to sort in ascending order.\",\n dynamic=True,\n show=False,\n value=True,\n ),\n StrInput(\n name=\"new_column_name\",\n display_name=\"New Column Name\",\n info=\"The new column name when renaming or adding a column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"new_column_value\",\n display_name=\"New Column Value\",\n info=\"The value to populate the new column with.\",\n dynamic=True,\n show=False,\n ),\n StrInput(\n name=\"columns_to_select\",\n display_name=\"Columns to Select\",\n dynamic=True,\n is_list=True,\n show=False,\n ),\n IntInput(\n name=\"num_rows\",\n display_name=\"Number of Rows\",\n info=\"Number of rows to return (for head/tail).\",\n dynamic=True,\n show=False,\n value=5,\n ),\n MessageTextInput(\n name=\"replace_value\",\n display_name=\"Value to Replace\",\n info=\"The value to replace in the column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"replacement_value\",\n display_name=\"Replacement Value\",\n info=\"The value to replace with.\",\n dynamic=True,\n show=False,\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"DataFrame\",\n name=\"output\",\n method=\"perform_operation\",\n info=\"The resulting DataFrame after the operation.\",\n )\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n dynamic_fields = [\n \"column_name\",\n \"filter_value\",\n \"filter_operator\",\n \"ascending\",\n \"new_column_name\",\n \"new_column_value\",\n \"columns_to_select\",\n \"num_rows\",\n \"replace_value\",\n \"replacement_value\",\n ]\n for field in dynamic_fields:\n build_config[field][\"show\"] = False\n\n if field_name == \"operation\":\n # Handle SortableListInput format\n if isinstance(field_value, list):\n operation_name = field_value[0].get(\"name\", \"\") if field_value else \"\"\n else:\n operation_name = field_value or \"\"\n\n # If no operation selected, all dynamic fields stay hidden (already set to False above)\n if not operation_name:\n return build_config\n\n if operation_name == \"Filter\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"filter_value\"][\"show\"] = True\n build_config[\"filter_operator\"][\"show\"] = True\n elif operation_name == \"Sort\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"ascending\"][\"show\"] = True\n elif operation_name == \"Drop Column\":\n build_config[\"column_name\"][\"show\"] = True\n elif operation_name == \"Rename Column\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"new_column_name\"][\"show\"] = True\n elif operation_name == \"Add Column\":\n build_config[\"new_column_name\"][\"show\"] = True\n build_config[\"new_column_value\"][\"show\"] = True\n elif operation_name == \"Select Columns\":\n build_config[\"columns_to_select\"][\"show\"] = True\n elif operation_name in {\"Head\", \"Tail\"}:\n build_config[\"num_rows\"][\"show\"] = True\n elif operation_name == \"Replace Value\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"replace_value\"][\"show\"] = True\n build_config[\"replacement_value\"][\"show\"] = True\n elif operation_name == \"Drop Duplicates\":\n build_config[\"column_name\"][\"show\"] = True\n\n return build_config\n\n def perform_operation(self) -> DataFrame:\n df_copy = self.df.copy()\n\n # Handle SortableListInput format for operation\n operation_input = getattr(self, \"operation\", [])\n if isinstance(operation_input, list) and len(operation_input) > 0:\n op = operation_input[0].get(\"name\", \"\")\n else:\n op = \"\"\n\n # If no operation selected, return original DataFrame\n if not op:\n return df_copy\n\n if op == \"Filter\":\n return self.filter_rows_by_value(df_copy)\n if op == \"Sort\":\n return self.sort_by_column(df_copy)\n if op == \"Drop Column\":\n return self.drop_column(df_copy)\n if op == \"Rename Column\":\n return self.rename_column(df_copy)\n if op == \"Add Column\":\n return self.add_column(df_copy)\n if op == \"Select Columns\":\n return self.select_columns(df_copy)\n if op == \"Head\":\n return self.head(df_copy)\n if op == \"Tail\":\n return self.tail(df_copy)\n if op == \"Replace Value\":\n return self.replace_values(df_copy)\n if op == \"Drop Duplicates\":\n return self.drop_duplicates(df_copy)\n msg = f\"Unsupported operation: {op}\"\n logger.error(msg)\n raise ValueError(msg)\n\n def filter_rows_by_value(self, df: DataFrame) -> DataFrame:\n column = df[self.column_name]\n filter_value = self.filter_value\n\n # Handle regular DropdownInput format (just a string value)\n operator = getattr(self, \"filter_operator\", \"equals\") # Default to equals for backward compatibility\n\n if operator == \"equals\":\n mask = column == filter_value\n elif operator == \"not equals\":\n mask = column != filter_value\n elif operator == \"contains\":\n mask = column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"not contains\":\n mask = ~column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"starts with\":\n mask = column.astype(str).str.startswith(str(filter_value), na=False)\n elif operator == \"ends with\":\n mask = column.astype(str).str.endswith(str(filter_value), na=False)\n elif operator == \"greater than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column > numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) > str(filter_value)\n elif operator == \"less than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column < numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) < str(filter_value)\n else:\n mask = column == filter_value # Fallback to equals\n\n return DataFrame(df[mask])\n\n def sort_by_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.sort_values(by=self.column_name, ascending=self.ascending))\n\n def drop_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop(columns=[self.column_name]))\n\n def rename_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.rename(columns={self.column_name: self.new_column_name}))\n\n def add_column(self, df: DataFrame) -> DataFrame:\n df[self.new_column_name] = [self.new_column_value] * len(df)\n return DataFrame(df)\n\n def select_columns(self, df: DataFrame) -> DataFrame:\n columns = [col.strip() for col in self.columns_to_select]\n return DataFrame(df[columns])\n\n def head(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.head(self.num_rows))\n\n def tail(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.tail(self.num_rows))\n\n def replace_values(self, df: DataFrame) -> DataFrame:\n df[self.column_name] = df[self.column_name].replace(self.replace_value, self.replacement_value)\n return DataFrame(df)\n\n def drop_duplicates(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop_duplicates(subset=self.column_name))\n" + "value": "from lfx.custom.custom_component.component import Component\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, HandleInput, MessageTextInput, MultilineInput, TabInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.template.field.base import Output\n\n\nclass ParserComponent(Component):\n display_name = \"Parser\"\n description = \"Extracts text using a template.\"\n documentation: str = \"https://docs.langflow.org/parser\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"input_data\",\n display_name=\"Data or DataFrame\",\n input_types=[\"DataFrame\", \"Data\"],\n info=\"Accepts either a DataFrame or a Data object.\",\n required=True,\n ),\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Parser\", \"Stringify\"],\n value=\"Parser\",\n info=\"Convert into raw string instead of using a template.\",\n real_time_refresh=True,\n ),\n MultilineInput(\n name=\"pattern\",\n display_name=\"Template\",\n info=(\n \"Use variables within curly brackets to extract column values for DataFrames \"\n \"or key values for Data.\"\n \"For example: `Name: {Name}, Age: {Age}, Country: {Country}`\"\n ),\n value=\"Text: {text}\", # Example default\n dynamic=True,\n show=True,\n required=True,\n ),\n MessageTextInput(\n name=\"sep\",\n display_name=\"Separator\",\n advanced=True,\n value=\"\\n\",\n info=\"String used to separate rows/items.\",\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"Parsed Text\",\n name=\"parsed_text\",\n info=\"Formatted text output.\",\n method=\"parse_combined_text\",\n ),\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n \"\"\"Dynamically hide/show `template` and enforce requirement based on `stringify`.\"\"\"\n if field_name == \"mode\":\n build_config[\"pattern\"][\"show\"] = self.mode == \"Parser\"\n build_config[\"pattern\"][\"required\"] = self.mode == \"Parser\"\n if field_value:\n clean_data = BoolInput(\n name=\"clean_data\",\n display_name=\"Clean Data\",\n info=(\n \"Enable to clean the data by removing empty rows and lines \"\n \"in each cell of the DataFrame/ Data object.\"\n ),\n value=True,\n advanced=True,\n required=False,\n )\n build_config[\"clean_data\"] = clean_data.to_dict()\n else:\n build_config.pop(\"clean_data\", None)\n\n return build_config\n\n def _clean_args(self):\n \"\"\"Prepare arguments based on input type.\"\"\"\n input_data = self.input_data\n\n match input_data:\n case list() if all(isinstance(item, Data) for item in input_data):\n msg = \"List of Data objects is not supported.\"\n raise ValueError(msg)\n case DataFrame():\n return input_data, None\n case Data():\n return None, input_data\n case dict() if \"data\" in input_data:\n try:\n if \"columns\" in input_data: # Likely a DataFrame\n return DataFrame.from_dict(input_data), None\n # Likely a Data object\n return None, Data(**input_data)\n except (TypeError, ValueError, KeyError) as e:\n msg = f\"Invalid structured input provided: {e!s}\"\n raise ValueError(msg) from e\n case _:\n msg = f\"Unsupported input type: {type(input_data)}. Expected DataFrame or Data.\"\n raise ValueError(msg)\n\n def parse_combined_text(self) -> Message:\n \"\"\"Parse all rows/items into a single text or convert input to string if `stringify` is enabled.\"\"\"\n # Early return for stringify option\n if self.mode == \"Stringify\":\n return self.convert_to_string()\n\n df, data = self._clean_args()\n\n lines = []\n if df is not None:\n for _, row in df.iterrows():\n formatted_text = self.pattern.format(**row.to_dict())\n lines.append(formatted_text)\n elif data is not None:\n # Use format_map with a dict that returns default_value for missing keys\n class DefaultDict(dict):\n def __missing__(self, key):\n return data.default_value or \"\"\n\n formatted_text = self.pattern.format_map(DefaultDict(data.data))\n lines.append(formatted_text)\n\n combined_text = self.sep.join(lines)\n self.status = combined_text\n return Message(text=combined_text)\n\n def convert_to_string(self) -> Message:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n result = \"\"\n if isinstance(self.input_data, list):\n result = \"\\n\".join([safe_convert(item, clean_data=self.clean_data or False) for item in self.input_data])\n else:\n result = safe_convert(self.input_data or False)\n self.log(f\"Converted to string with length: {len(result)}\")\n\n message = Message(text=result)\n self.status = message\n return message\n" }, - "column_name": { - "_input_type": "StrInput", + "input_data": { + "_input_type": "HandleInput", "advanced": false, - "display_name": "Column Name", - "dynamic": true, - "info": "The column name to use for the operation.", + "display_name": "Data or DataFrame", + "dynamic": false, + "info": "Accepts either a DataFrame or a Data object.", + "input_types": [ + "DataFrame", + "Data" + ], "list": false, "list_add_label": "Add More", - "load_from_db": false, - "name": "column_name", + "name": "input_data", "override_skip": false, "placeholder": "", - "required": false, - "show": false, + "required": true, + "show": true, "title_case": false, - "tool_mode": false, "trace_as_metadata": true, "track_in_telemetry": false, - "type": "str", + "type": "other", "value": "" }, - "columns_to_select": { - "_input_type": "StrInput", + "mode": { + "_input_type": "TabInput", "advanced": false, - "display_name": "Columns to Select", - "dynamic": true, - "info": "", - "list": true, - "list_add_label": "Add More", - "load_from_db": false, - "name": "columns_to_select", - "override_skip": false, - "placeholder": "", - "required": false, - "show": true, + "display_name": "Mode", + "dynamic": false, + "info": "Convert into raw string instead of using a template.", + "name": "mode", + "options": [ + "Parser", + "Stringify" + ], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": [ - "title" - ] + "track_in_telemetry": true, + "type": "tab", + "value": "Parser" }, - "df": { - "_input_type": "DataFrameInput", + "pattern": { + "_input_type": "MultilineInput", "advanced": false, - "display_name": "DataFrame", - "dynamic": false, - "info": "The input DataFrame to operate on.", + "ai_enabled": false, + "copy_field": false, + "display_name": "Template", + "dynamic": true, + "info": "Use variables within curly brackets to extract column values for DataFrames or key values for Data.For example: `Name: {Name}, Age: {Age}, Country: {Country}`", "input_types": [ - "DataFrame" + "Message" ], "list": false, "list_add_label": "Add More", - "name": "df", + "load_from_db": false, + "multiline": true, + "name": "pattern", "override_skip": false, "placeholder": "", "required": true, @@ -6316,277 +6431,362 @@ "trace_as_input": true, "trace_as_metadata": true, "track_in_telemetry": false, - "type": "other", - "value": "" - }, - "filter_operator": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Filter Operator", - "dynamic": true, - "external_options": {}, - "info": "The operator to apply for filtering rows.", - "name": "filter_operator", - "options": [ - "equals", - "not equals", - "contains", - "not contains", - "starts with", - "ends with", - "greater than", - "less than" - ], - "options_metadata": [], - "override_skip": false, - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, "type": "str", - "value": "equals" + "value": "{title}" }, - "filter_value": { + "sep": { "_input_type": "MessageTextInput", - "advanced": false, - "display_name": "Filter Value", - "dynamic": true, - "info": "The value to filter rows by.", + "advanced": true, + "display_name": "Separator", + "dynamic": false, + "info": "String used to separate rows/items.", "input_types": [ "Message" ], "list": false, "list_add_label": "Add More", "load_from_db": false, - "name": "filter_value", + "name": "sep", "override_skip": false, "placeholder": "", "required": false, - "show": false, + "show": true, "title_case": false, "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, "track_in_telemetry": false, "type": "str", - "value": "" + "value": "\n" + } + }, + "tool_mode": false + }, + "showNode": true, + "type": "ParserComponent" + }, + "dragging": false, + "id": "ParserComponent-1eim1", + "measured": { + "height": 331, + "width": 320 + }, + "position": { + "x": 1205.2726296612543, + "y": 1642.3744133698385 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "id": "TextInput-OPyEf", + "node": { + "base_classes": [ + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Get user text inputs.", + "display_name": "Text Input", + "documentation": "https://docs.langflow.org/text-input-and-output", + "edited": false, + "field_order": [ + "input_value", + "use_global_variable" + ], + "frozen": false, + "icon": "type", + "last_updated": "2026-03-05T16:40:36.785Z", + "legacy": false, + "metadata": { + "code_hash": "518f16485886", + "dependencies": { + "dependencies": [ + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 1 }, - "is_refresh": false, - "new_column_name": { - "_input_type": "StrInput", - "advanced": false, - "display_name": "New Column Name", + "module": "lfx.components.input_output.text.TextInputComponent" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Output Text", + "group_outputs": false, + "loop_types": null, + "method": "text_response", + "name": "text", + "options": null, + "required_inputs": null, + "selected": "Message", + "tool_mode": true, + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" + }, + "_frontend_node_folder_id": { + "value": "7a54b33c-ad03-4c78-b61b-f95adbb74e17" + }, + "_type": "Component", + "code": { + "advanced": true, "dynamic": true, - "info": "The new column name when renaming or adding a column.", + "fileTypes": [], + "file_path": "", + "info": "", "list": false, - "list_add_label": "Add More", "load_from_db": false, - "name": "new_column_name", - "override_skip": false, + "multiline": true, + "name": "code", + "password": false, "placeholder": "", - "required": false, - "show": false, + "required": true, + "show": true, "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "filename" + "type": "code", + "value": "from typing import Any\n\nfrom lfx.base.io.text import TextComponent\nfrom lfx.io import BoolInput, MultilineInput, Output\nfrom lfx.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get user text inputs.\"\n documentation: str = \"https://docs.langflow.org/text-input-and-output\"\n icon = \"type\"\n name = \"TextInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n BoolInput(\n name=\"use_global_variable\",\n display_name=\"Use Global Variable\",\n info=\"Enable to select from global variables (shows globe icon). Disables multiline editing.\",\n value=False,\n advanced=True,\n real_time_refresh=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\n ]\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name == \"use_global_variable\":\n if field_value:\n # Enable global variable mode: single-line with password masking and globe dropdown\n build_config[\"input_value\"][\"multiline\"] = False\n build_config[\"input_value\"][\"password\"] = True\n else:\n # Default mode: multiline text editing\n build_config[\"input_value\"][\"multiline\"] = True\n build_config[\"input_value\"][\"password\"] = False\n return build_config\n\n def text_response(self) -> Message:\n return Message(\n text=self.input_value,\n )\n" }, - "new_column_value": { - "_input_type": "MessageTextInput", + "input_value": { + "_input_type": "MultilineInput", "advanced": false, - "display_name": "New Column Value", - "dynamic": true, - "info": "The value to populate the new column with.", + "ai_enabled": false, + "copy_field": false, + "display_name": "Text", + "dynamic": false, + "info": "Text to be passed as input.", "input_types": [ "Message" ], "list": false, "list_add_label": "Add More", - "load_from_db": false, - "name": "new_column_value", + "load_from_db": true, + "multiline": false, + "name": "input_value", "override_skip": false, + "password": true, "placeholder": "", "required": false, - "show": false, + "show": true, "title_case": false, "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, "track_in_telemetry": false, "type": "str", - "value": "" + "value": "CONNECTOR_TYPE" }, - "num_rows": { - "_input_type": "IntInput", - "advanced": false, - "display_name": "Number of Rows", - "dynamic": true, - "info": "Number of rows to return (for head/tail).", + "is_refresh": false, + "use_global_variable": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Use Global Variable", + "dynamic": false, + "info": "Enable to select from global variables (shows globe icon). Disables multiline editing.", "list": false, "list_add_label": "Add More", - "name": "num_rows", + "name": "use_global_variable", "override_skip": false, "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "int", - "value": 5 - }, - "operation": { - "_input_type": "SortableListInput", - "advanced": false, - "display_name": "Operation", - "dynamic": false, - "info": "Select the DataFrame operation to perform.", - "limit": 1, - "load_from_db": false, - "name": "operation", - "options": [ - { - "icon": "plus", - "name": "Add Column" - }, - { - "icon": "minus", - "name": "Drop Column" - }, - { - "icon": "filter", - "name": "Filter" - }, - { - "icon": "arrow-up", - "name": "Head" - }, - { - "icon": "pencil", - "name": "Rename Column" - }, - { - "icon": "replace", - "name": "Replace Value" - }, - { - "icon": "columns", - "name": "Select Columns" - }, - { - "icon": "arrow-up-down", - "name": "Sort" - }, - { - "icon": "arrow-down", - "name": "Tail" - }, - { - "icon": "copy-x", - "name": "Drop Duplicates" - } - ], - "override_skip": false, - "placeholder": "Select Operation", "real_time_refresh": true, "required": false, - "search_category": [], "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "sortableList", - "value": [ + "track_in_telemetry": true, + "type": "bool", + "value": true + } + }, + "tool_mode": false + }, + "showNode": false, + "type": "TextInput" + }, + "dragging": false, + "id": "TextInput-OPyEf", + "measured": { + "height": 52, + "width": 192 + }, + "position": { + "x": 2046.1686011821303, + "y": 1025.3287669019733 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "id": "TextInput-G5CiZ", + "node": { + "base_classes": [ + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Get user text inputs.", + "display_name": "Text Input", + "documentation": "https://docs.langflow.org/text-input-and-output", + "edited": false, + "field_order": [ + "input_value", + "use_global_variable" + ], + "frozen": false, + "icon": "type", + "last_updated": "2026-03-05T16:40:36.785Z", + "legacy": false, + "metadata": { + "code_hash": "518f16485886", + "dependencies": { + "dependencies": [ { - "chosen": false, - "icon": "columns", - "name": "Select Columns", - "selected": false + "name": "lfx", + "version": null } - ] + ], + "total_dependencies": 1 + }, + "module": "lfx.components.input_output.text.TextInputComponent" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Output Text", + "group_outputs": false, + "loop_types": null, + "method": "text_response", + "name": "text", + "options": null, + "required_inputs": null, + "selected": "Message", + "tool_mode": true, + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" + }, + "_frontend_node_folder_id": { + "value": "7a54b33c-ad03-4c78-b61b-f95adbb74e17" + }, + "_type": "Component", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Any\n\nfrom lfx.base.io.text import TextComponent\nfrom lfx.io import BoolInput, MultilineInput, Output\nfrom lfx.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get user text inputs.\"\n documentation: str = \"https://docs.langflow.org/text-input-and-output\"\n icon = \"type\"\n name = \"TextInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n BoolInput(\n name=\"use_global_variable\",\n display_name=\"Use Global Variable\",\n info=\"Enable to select from global variables (shows globe icon). Disables multiline editing.\",\n value=False,\n advanced=True,\n real_time_refresh=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\n ]\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name == \"use_global_variable\":\n if field_value:\n # Enable global variable mode: single-line with password masking and globe dropdown\n build_config[\"input_value\"][\"multiline\"] = False\n build_config[\"input_value\"][\"password\"] = True\n else:\n # Default mode: multiline text editing\n build_config[\"input_value\"][\"multiline\"] = True\n build_config[\"input_value\"][\"password\"] = False\n return build_config\n\n def text_response(self) -> Message:\n return Message(\n text=self.input_value,\n )\n" }, - "replace_value": { - "_input_type": "MessageTextInput", + "input_value": { + "_input_type": "MultilineInput", "advanced": false, - "display_name": "Value to Replace", - "dynamic": true, - "info": "The value to replace in the column.", + "ai_enabled": false, + "copy_field": false, + "display_name": "Text", + "dynamic": false, + "info": "Text to be passed as input.", "input_types": [ "Message" ], "list": false, "list_add_label": "Add More", - "load_from_db": false, - "name": "replace_value", + "load_from_db": true, + "multiline": false, + "name": "input_value", "override_skip": false, + "password": true, "placeholder": "", "required": false, - "show": false, + "show": true, "title_case": false, "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, "track_in_telemetry": false, "type": "str", - "value": "" + "value": "OWNER" }, - "replacement_value": { - "_input_type": "MessageTextInput", - "advanced": false, - "display_name": "Replacement Value", - "dynamic": true, - "info": "The value to replace with.", - "input_types": [ - "Message" - ], + "is_refresh": false, + "use_global_variable": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Use Global Variable", + "dynamic": false, + "info": "Enable to select from global variables (shows globe icon). Disables multiline editing.", "list": false, "list_add_label": "Add More", - "load_from_db": false, - "name": "replacement_value", + "name": "use_global_variable", "override_skip": false, "placeholder": "", + "real_time_refresh": true, "required": false, - "show": false, + "show": true, "title_case": false, "tool_mode": false, - "trace_as_input": true, "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "" + "track_in_telemetry": true, + "type": "bool", + "value": true } }, "tool_mode": false }, - "showNode": true, - "type": "DataFrameOperations" + "showNode": false, + "type": "TextInput" }, "dragging": false, - "id": "DataFrameOperations-NpdW5", + "id": "TextInput-G5CiZ", "measured": { - "height": 317, - "width": 320 + "height": 52, + "width": 192 }, "position": { - "x": 856.1961817994918, - "y": 1687.1833235248055 + "x": 2047.7721855431944, + "y": 1107.9760140854928 }, "selected": false, "type": "genericNode" }, { "data": { - "id": "ParserComponent-1eim1", + "id": "TextInput-Rm9YX", "node": { "base_classes": [ "Message" @@ -6594,32 +6794,30 @@ "beta": false, "conditional_paths": [], "custom_fields": {}, - "description": "Extracts text using a template.", - "display_name": "Parser", - "documentation": "https://docs.langflow.org/parser", + "description": "Get user text inputs.", + "display_name": "Text Input", + "documentation": "https://docs.langflow.org/text-input-and-output", "edited": false, "field_order": [ - "input_data", - "mode", - "pattern", - "sep" + "input_value", + "use_global_variable" ], "frozen": false, - "icon": "braces", + "icon": "type", + "last_updated": "2026-03-05T16:40:36.785Z", "legacy": false, - "lf_version": "1.7.0.dev21", "metadata": { - "code_hash": "3cda25c3f7b5", + "code_hash": "518f16485886", "dependencies": { "dependencies": [ { "name": "lfx", - "version": "0.2.0.dev21" + "version": null } ], "total_dependencies": 1 }, - "module": "lfx.components.processing.parser.ParserComponent" + "module": "lfx.components.input_output.text.TextInputComponent" }, "minimized": false, "output_types": [], @@ -6627,10 +6825,13 @@ { "allows_loop": false, "cache": true, - "display_name": "Parsed Text", + "display_name": "Output Text", "group_outputs": false, - "method": "parse_combined_text", - "name": "parsed_text", + "loop_types": null, + "method": "text_response", + "name": "text", + "options": null, + "required_inputs": null, "selected": "Message", "tool_mode": true, "types": [ @@ -6641,6 +6842,12 @@ ], "pinned": false, "template": { + "_frontend_node_flow_id": { + "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" + }, + "_frontend_node_folder_id": { + "value": "7a54b33c-ad03-4c78-b61b-f95adbb74e17" + }, "_type": "Component", "code": { "advanced": true, @@ -6658,42 +6865,47 @@ "show": true, "title_case": false, "type": "code", - "value": "from lfx.custom.custom_component.component import Component\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, HandleInput, MessageTextInput, MultilineInput, TabInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.template.field.base import Output\n\n\nclass ParserComponent(Component):\n display_name = \"Parser\"\n description = \"Extracts text using a template.\"\n documentation: str = \"https://docs.langflow.org/parser\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"input_data\",\n display_name=\"Data or DataFrame\",\n input_types=[\"DataFrame\", \"Data\"],\n info=\"Accepts either a DataFrame or a Data object.\",\n required=True,\n ),\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Parser\", \"Stringify\"],\n value=\"Parser\",\n info=\"Convert into raw string instead of using a template.\",\n real_time_refresh=True,\n ),\n MultilineInput(\n name=\"pattern\",\n display_name=\"Template\",\n info=(\n \"Use variables within curly brackets to extract column values for DataFrames \"\n \"or key values for Data.\"\n \"For example: `Name: {Name}, Age: {Age}, Country: {Country}`\"\n ),\n value=\"Text: {text}\", # Example default\n dynamic=True,\n show=True,\n required=True,\n ),\n MessageTextInput(\n name=\"sep\",\n display_name=\"Separator\",\n advanced=True,\n value=\"\\n\",\n info=\"String used to separate rows/items.\",\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"Parsed Text\",\n name=\"parsed_text\",\n info=\"Formatted text output.\",\n method=\"parse_combined_text\",\n ),\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n \"\"\"Dynamically hide/show `template` and enforce requirement based on `stringify`.\"\"\"\n if field_name == \"mode\":\n build_config[\"pattern\"][\"show\"] = self.mode == \"Parser\"\n build_config[\"pattern\"][\"required\"] = self.mode == \"Parser\"\n if field_value:\n clean_data = BoolInput(\n name=\"clean_data\",\n display_name=\"Clean Data\",\n info=(\n \"Enable to clean the data by removing empty rows and lines \"\n \"in each cell of the DataFrame/ Data object.\"\n ),\n value=True,\n advanced=True,\n required=False,\n )\n build_config[\"clean_data\"] = clean_data.to_dict()\n else:\n build_config.pop(\"clean_data\", None)\n\n return build_config\n\n def _clean_args(self):\n \"\"\"Prepare arguments based on input type.\"\"\"\n input_data = self.input_data\n\n match input_data:\n case list() if all(isinstance(item, Data) for item in input_data):\n msg = \"List of Data objects is not supported.\"\n raise ValueError(msg)\n case DataFrame():\n return input_data, None\n case Data():\n return None, input_data\n case dict() if \"data\" in input_data:\n try:\n if \"columns\" in input_data: # Likely a DataFrame\n return DataFrame.from_dict(input_data), None\n # Likely a Data object\n return None, Data(**input_data)\n except (TypeError, ValueError, KeyError) as e:\n msg = f\"Invalid structured input provided: {e!s}\"\n raise ValueError(msg) from e\n case _:\n msg = f\"Unsupported input type: {type(input_data)}. Expected DataFrame or Data.\"\n raise ValueError(msg)\n\n def parse_combined_text(self) -> Message:\n \"\"\"Parse all rows/items into a single text or convert input to string if `stringify` is enabled.\"\"\"\n # Early return for stringify option\n if self.mode == \"Stringify\":\n return self.convert_to_string()\n\n df, data = self._clean_args()\n\n lines = []\n if df is not None:\n for _, row in df.iterrows():\n formatted_text = self.pattern.format(**row.to_dict())\n lines.append(formatted_text)\n elif data is not None:\n # Use format_map with a dict that returns default_value for missing keys\n class DefaultDict(dict):\n def __missing__(self, key):\n return data.default_value or \"\"\n\n formatted_text = self.pattern.format_map(DefaultDict(data.data))\n lines.append(formatted_text)\n\n combined_text = self.sep.join(lines)\n self.status = combined_text\n return Message(text=combined_text)\n\n def convert_to_string(self) -> Message:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n result = \"\"\n if isinstance(self.input_data, list):\n result = \"\\n\".join([safe_convert(item, clean_data=self.clean_data or False) for item in self.input_data])\n else:\n result = safe_convert(self.input_data or False)\n self.log(f\"Converted to string with length: {len(result)}\")\n\n message = Message(text=result)\n self.status = message\n return message\n" + "value": "from typing import Any\n\nfrom lfx.base.io.text import TextComponent\nfrom lfx.io import BoolInput, MultilineInput, Output\nfrom lfx.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get user text inputs.\"\n documentation: str = \"https://docs.langflow.org/text-input-and-output\"\n icon = \"type\"\n name = \"TextInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n BoolInput(\n name=\"use_global_variable\",\n display_name=\"Use Global Variable\",\n info=\"Enable to select from global variables (shows globe icon). Disables multiline editing.\",\n value=False,\n advanced=True,\n real_time_refresh=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\n ]\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name == \"use_global_variable\":\n if field_value:\n # Enable global variable mode: single-line with password masking and globe dropdown\n build_config[\"input_value\"][\"multiline\"] = False\n build_config[\"input_value\"][\"password\"] = True\n else:\n # Default mode: multiline text editing\n build_config[\"input_value\"][\"multiline\"] = True\n build_config[\"input_value\"][\"password\"] = False\n return build_config\n\n def text_response(self) -> Message:\n return Message(\n text=self.input_value,\n )\n" }, - "input_data": { - "_input_type": "HandleInput", + "input_value": { + "_input_type": "MultilineInput", "advanced": false, - "display_name": "Data or DataFrame", + "ai_enabled": false, + "copy_field": false, + "display_name": "Text", "dynamic": false, - "info": "Accepts either a DataFrame or a Data object.", + "info": "Text to be passed as input.", "input_types": [ - "DataFrame", - "Data" + "Message" ], "list": false, "list_add_label": "Add More", - "name": "input_data", + "load_from_db": true, + "multiline": false, + "name": "input_value", "override_skip": false, + "password": true, "placeholder": "", - "required": true, + "required": false, "show": true, "title_case": false, + "tool_mode": false, + "trace_as_input": true, "trace_as_metadata": true, "track_in_telemetry": false, - "type": "other", - "value": "" + "type": "str", + "value": "OWNER_EMAIL" }, - "mode": { - "_input_type": "TabInput", - "advanced": false, - "display_name": "Mode", + "is_refresh": false, + "use_global_variable": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Use Global Variable", "dynamic": false, - "info": "Convert into raw string instead of using a template.", - "name": "mode", - "options": [ - "Parser", - "Stringify" - ], + "info": "Enable to select from global variables (shows globe icon). Disables multiline editing.", + "list": false, + "list_add_label": "Add More", + "name": "use_global_variable", "override_skip": false, "placeholder": "", "real_time_refresh": true, @@ -6703,28 +6915,131 @@ "tool_mode": false, "trace_as_metadata": true, "track_in_telemetry": true, - "type": "tab", - "value": "Parser" + "type": "bool", + "value": true + } + }, + "tool_mode": false + }, + "showNode": false, + "type": "TextInput" + }, + "dragging": false, + "id": "TextInput-Rm9YX", + "measured": { + "height": 52, + "width": 192 + }, + "position": { + "x": 2046.9016349798983, + "y": 1190.6783175986243 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "id": "TextInput-tahMb", + "node": { + "base_classes": [ + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Get user text inputs.", + "display_name": "Text Input", + "documentation": "https://docs.langflow.org/text-input-and-output", + "edited": false, + "field_order": [ + "input_value", + "use_global_variable" + ], + "frozen": false, + "icon": "type", + "last_updated": "2026-03-05T16:40:36.785Z", + "legacy": false, + "metadata": { + "code_hash": "518f16485886", + "dependencies": { + "dependencies": [ + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 1 }, - "pattern": { + "module": "lfx.components.input_output.text.TextInputComponent" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Output Text", + "group_outputs": false, + "loop_types": null, + "method": "text_response", + "name": "text", + "options": null, + "required_inputs": null, + "selected": "Message", + "tool_mode": true, + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" + }, + "_frontend_node_folder_id": { + "value": "7a54b33c-ad03-4c78-b61b-f95adbb74e17" + }, + "_type": "Component", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Any\n\nfrom lfx.base.io.text import TextComponent\nfrom lfx.io import BoolInput, MultilineInput, Output\nfrom lfx.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get user text inputs.\"\n documentation: str = \"https://docs.langflow.org/text-input-and-output\"\n icon = \"type\"\n name = \"TextInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n BoolInput(\n name=\"use_global_variable\",\n display_name=\"Use Global Variable\",\n info=\"Enable to select from global variables (shows globe icon). Disables multiline editing.\",\n value=False,\n advanced=True,\n real_time_refresh=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\n ]\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name == \"use_global_variable\":\n if field_value:\n # Enable global variable mode: single-line with password masking and globe dropdown\n build_config[\"input_value\"][\"multiline\"] = False\n build_config[\"input_value\"][\"password\"] = True\n else:\n # Default mode: multiline text editing\n build_config[\"input_value\"][\"multiline\"] = True\n build_config[\"input_value\"][\"password\"] = False\n return build_config\n\n def text_response(self) -> Message:\n return Message(\n text=self.input_value,\n )\n" + }, + "input_value": { "_input_type": "MultilineInput", "advanced": false, "ai_enabled": false, "copy_field": false, - "display_name": "Template", - "dynamic": true, - "info": "Use variables within curly brackets to extract column values for DataFrames or key values for Data.For example: `Name: {Name}, Age: {Age}, Country: {Country}`", + "display_name": "Text", + "dynamic": false, + "info": "Text to be passed as input.", "input_types": [ "Message" ], "list": false, "list_add_label": "Add More", - "load_from_db": false, - "multiline": true, - "name": "pattern", + "load_from_db": true, + "multiline": false, + "name": "input_value", "override_skip": false, + "password": true, "placeholder": "", - "required": true, + "required": false, "show": true, "title_case": false, "tool_mode": false, @@ -6732,65 +7047,62 @@ "trace_as_metadata": true, "track_in_telemetry": false, "type": "str", - "value": "{title}" + "value": "OWNER_NAME" }, - "sep": { - "_input_type": "MessageTextInput", + "is_refresh": false, + "use_global_variable": { + "_input_type": "BoolInput", "advanced": true, - "display_name": "Separator", + "display_name": "Use Global Variable", "dynamic": false, - "info": "String used to separate rows/items.", - "input_types": [ - "Message" - ], + "info": "Enable to select from global variables (shows globe icon). Disables multiline editing.", "list": false, "list_add_label": "Add More", - "load_from_db": false, - "name": "sep", + "name": "use_global_variable", "override_skip": false, "placeholder": "", + "real_time_refresh": true, "required": false, "show": true, "title_case": false, "tool_mode": false, - "trace_as_input": true, "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "\n" + "track_in_telemetry": true, + "type": "bool", + "value": true } }, "tool_mode": false }, - "showNode": true, - "type": "ParserComponent" + "showNode": false, + "type": "TextInput" }, "dragging": false, - "id": "ParserComponent-1eim1", + "id": "TextInput-tahMb", "measured": { - "height": 329, - "width": 320 + "height": 52, + "width": 192 }, "position": { - "x": 1205.2726296612543, - "y": 1642.3744133698385 + "x": 2046.9016349798978, + "y": 1271.6395199851643 }, "selected": false, "type": "genericNode" } ], "viewport": { - "x": 294.64772127588185, - "y": -396.0401212124092, - "zoom": 0.5090092700849728 + "x": 98.81388650392034, + "y": 36.94035557328948, + "zoom": 0.2750946523415342 } }, "description": "This flow is to ingest the URL to open search.", "endpoint_name": null, "id": "72c3d17c-2dac-4a73-b48a-6518473d7830", "is_component": false, - "last_tested_version": "1.7.0.dev21", "mcp_enabled": true, + "last_tested_version": "1.8.0", "locked": true, "name": "OpenSearch URL Ingestion Flow", "tags": [ @@ -6799,4 +7111,4 @@ "rag", "q-a" ] -} +} \ No newline at end of file diff --git a/frontend/app/api/mutations/useOnboardingMutation.ts b/frontend/app/api/mutations/useOnboardingMutation.ts index fb8272a8d..9f3ebc06b 100644 --- a/frontend/app/api/mutations/useOnboardingMutation.ts +++ b/frontend/app/api/mutations/useOnboardingMutation.ts @@ -27,6 +27,7 @@ interface OnboardingResponse { message: string; edited: boolean; openrag_docs_filter_id?: string; + task_id?: string; } export const useOnboardingMutation = ( diff --git a/frontend/app/onboarding/_components/onboarding-card.tsx b/frontend/app/onboarding/_components/onboarding-card.tsx index 964795ee4..182f318f7 100644 --- a/frontend/app/onboarding/_components/onboarding-card.tsx +++ b/frontend/app/onboarding/_components/onboarding-card.tsx @@ -171,6 +171,8 @@ const OnboardingCard = ({ const [error, setError] = useState(null); + const [onboardingTaskId, setOnboardingTaskId] = useState(null); + // Track which tasks we've already handled to prevent infinite loops const handledFailedTasksRef = useRef>(new Set()); @@ -199,14 +201,58 @@ const OnboardingCard = ({ }, }); + // Mutations + const onboardingMutation = useOnboardingMutation({ + onSuccess: (data) => { + console.log("Onboarding completed successfully", data); + + if (data.task_id) { + setOnboardingTaskId(data.task_id); + } + + // Update provider health cache to healthy since backend just validated + const provider = + (isEmbedding ? settings.embedding_provider : settings.llm_provider) || + modelProvider; + const healthData: ProviderHealthResponse = { + status: "healthy", + message: "Provider is configured and working correctly", + provider: provider, + }; + queryClient.setQueryData(["provider", "health"], healthData); + setError(null); + if (!isEmbedding) { + setCurrentStep(totalSteps); + setTimeout(() => { + onComplete(); + }, 1000); + } else { + setCurrentStep(0); + } + }, + onError: (error) => { + setError(error.message); + setCurrentStep(totalSteps); + rollbackMutation.mutate(); + }, + }); + // Monitor tasks and call onComplete when all tasks are done useEffect(() => { if (currentStep === null || !tasks || !isEmbedding) { return; } + if (!onboardingMutation.isSuccess) { + return; + } + + const relevantTasks = onboardingTaskId + ? tasks.filter((task) => task.task_id === onboardingTaskId) + : []; + // Check if there are any active tasks (pending, running, or processing) - const activeTasks = tasks.find( + const activeTasks = relevantTasks.find( (task) => task.status === "pending" || task.status === "running" || @@ -214,12 +260,12 @@ const OnboardingCard = ({ ); // Check if any task failed at the top level - const failedTask = tasks.find( + const failedTask = relevantTasks.find( (task) => task.status === "failed" || task.status === "error", ); // Check if any completed task has at least one failed file - const completedTaskWithFailedFile = tasks.find((task) => { + const completedTaskWithFailedFile = relevantTasks.find((task) => { // Must have files object if (!task.files || typeof task.files !== "object") { return false; @@ -286,13 +332,17 @@ const OnboardingCard = ({ return; } + const hasSuccessfulTasks = relevantTasks.length > 0 && + (!activeTasks || (activeTasks.successful_files ?? 0) > 0); + + const hasIngestionDisabledOrDone = !onboardingTaskId && currentStep === totalSteps - 1; + // If at least one processed file, no failures, and we've started onboarding, complete it if ( - (((!activeTasks || (activeTasks.successful_files ?? 0) > 0) && - tasks.length > 0) || - (tasks.length === 0 && currentStep === totalSteps - 1)) && // Complete because no files were ingested + (hasSuccessfulTasks || hasIngestionDisabledOrDone) && !isCompleted && - !taskWithFailure + !taskWithFailure && + currentStep === totalSteps - 1 ) { // Set to final step to show "Done" setCurrentStep(totalSteps); @@ -309,40 +359,10 @@ const OnboardingCard = ({ isEmbedding, totalSteps, rollbackMutation, + onboardingMutation.isSuccess, + onboardingTaskId, ]); - // Mutations - const onboardingMutation = useOnboardingMutation({ - onSuccess: (data) => { - console.log("Onboarding completed successfully", data); - - // Update provider health cache to healthy since backend just validated - const provider = - (isEmbedding ? settings.embedding_provider : settings.llm_provider) || - modelProvider; - const healthData: ProviderHealthResponse = { - status: "healthy", - message: "Provider is configured and working correctly", - provider: provider, - }; - queryClient.setQueryData(["provider", "health"], healthData); - setError(null); - if (!isEmbedding) { - setCurrentStep(totalSteps); - setTimeout(() => { - onComplete(); - }, 1000); - } else { - setCurrentStep(0); - } - }, - onError: (error) => { - setError(error.message); - setCurrentStep(totalSteps); - rollbackMutation.mutate(); - }, - }); - const handleComplete = () => { const currentProvider = isEmbedding ? settings.embedding_provider diff --git a/kubernetes/helm/openrag/templates/langflow/deployment.yaml b/kubernetes/helm/openrag/templates/langflow/deployment.yaml index 09d401e17..058fcc815 100644 --- a/kubernetes/helm/openrag/templates/langflow/deployment.yaml +++ b/kubernetes/helm/openrag/templates/langflow/deployment.yaml @@ -256,7 +256,7 @@ spec: secretKeyRef: name: {{ include "openrag.fullname" . }}-llm-providers key: watsonx-api-key - - name: WATSONX_ENDPOINT + - name: WATSONX_URL valueFrom: secretKeyRef: name: {{ include "openrag.fullname" . }}-llm-providers @@ -269,7 +269,7 @@ spec: {{- else }} - name: WATSONX_API_KEY value: "None" - - name: WATSONX_ENDPOINT + - name: WATSONX_URL value: "None" - name: WATSONX_PROJECT_ID value: "None" diff --git a/kubernetes/helm/openrag/values.yaml b/kubernetes/helm/openrag/values.yaml index 176e58efc..97d42e781 100644 --- a/kubernetes/helm/openrag/values.yaml +++ b/kubernetes/helm/openrag/values.yaml @@ -123,7 +123,7 @@ langflow: host: "https://cloud.langfuse.com" # Variables to expose to flows - variablesToGetFromEnvironment: "JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OPENSEARCH_URL,OPENSEARCH_INDEX_NAME,DOCLING_SERVE_URL,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,DOCUMENT_ID,SOURCE_URL,ALLOWED_USERS,ALLOWED_GROUPS,FILENAME,MIMETYPE,FILESIZE,SELECTED_EMBEDDING_MODEL,OPENAI_API_KEY,ANTHROPIC_API_KEY,WATSONX_API_KEY,WATSONX_ENDPOINT,WATSONX_PROJECT_ID,OLLAMA_BASE_URL" + variablesToGetFromEnvironment: "JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OPENSEARCH_URL,OPENSEARCH_INDEX_NAME,DOCLING_SERVE_URL,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,DOCUMENT_ID,SOURCE_URL,ALLOWED_USERS,ALLOWED_GROUPS,FILENAME,MIMETYPE,FILESIZE,SELECTED_EMBEDDING_MODEL,OPENAI_API_KEY,ANTHROPIC_API_KEY,WATSONX_API_KEY,WATSONX_URL,WATSONX_PROJECT_ID,OLLAMA_BASE_URL" # Probes livenessProbe: diff --git a/src/api/settings.py b/src/api/settings.py index 562dac8e8..6b6c31607 100644 --- a/src/api/settings.py +++ b/src/api/settings.py @@ -168,6 +168,7 @@ class OnboardingResponse(BaseModel): edited: bool sample_data_ingested: bool openrag_docs_filter_id: Optional[str] = None + task_id: Optional[str] = None class DoclingConfig(BaseModel): do_ocr: bool @@ -975,6 +976,8 @@ async def onboarding( ) raise + task_id = None + # Initialize the OpenSearch index if embedding model is configured if body.embedding_model or body.embedding_provider: try: @@ -1004,7 +1007,7 @@ async def onboarding( # Import the function here to avoid circular imports from main import ingest_default_documents_when_ready - await ingest_default_documents_when_ready( + task_id = await ingest_default_documents_when_ready( document_service, task_service, langflow_file_service, @@ -1094,6 +1097,7 @@ async def onboarding( edited=True, # Confirm that config is now marked as edited sample_data_ingested=should_ingest_sample_data, openrag_docs_filter_id=openrag_docs_filter_id, + task_id=task_id, ) except Exception as e: @@ -1121,7 +1125,7 @@ async def _create_openrag_docs_filter( return None # Get JWT token - jwt_token = user.jwt_token + jwt_token = user.jwt_token # In no-auth mode, set owner to None so filter is visible to all users # In auth mode, use the actual user as owner @@ -1192,6 +1196,12 @@ async def _update_langflow_global_variables(config): ) logger.info("Set WATSONX_PROJECT_ID global variable in Langflow") + if config.providers.watsonx.endpoint: + await clients._create_langflow_global_variable( + "WATSONX_URL", config.providers.watsonx.endpoint, modify=True + ) + logger.info("Set WATSONX_URL global variable in Langflow") + # OpenAI global variables if config.providers.openai.api_key: await clients._create_langflow_global_variable( @@ -1208,11 +1218,19 @@ async def _update_langflow_global_variables(config): # Ollama global variables if config.providers.ollama.endpoint: - endpoint = transform_localhost_url(config.providers.ollama.endpoint) - await clients._create_langflow_global_variable( - "OLLAMA_BASE_URL", endpoint, modify=True - ) - logger.info("Set OLLAMA_BASE_URL global variable in Langflow") + + try: + endpoint = transform_localhost_url(config.providers.ollama.endpoint, is_langflow=True, is_podman=True) + await clients._create_langflow_global_variable( + "OLLAMA_BASE_URL", endpoint, modify=True + ) + logger.info("Set OLLAMA_BASE_URL global variable in Langflow (Podman)") + except Exception: + endpoint = transform_localhost_url(config.providers.ollama.endpoint, is_langflow=True, is_podman=False) + await clients._create_langflow_global_variable( + "OLLAMA_BASE_URL", endpoint, modify=True + ) + logger.info("Set OLLAMA_BASE_URL global variable in Langflow (Docker)") if config.knowledge.embedding_model: await clients._create_langflow_global_variable( @@ -1269,13 +1287,10 @@ async def _update_langflow_model_values(config, flows_service): try: # Update LLM model values llm_provider = config.agent.llm_provider.lower() - llm_provider_config = config.get_llm_provider_config() - llm_endpoint = getattr(llm_provider_config, "endpoint", None) await flows_service.change_langflow_model_value( llm_provider, llm_model=config.agent.llm_model, - endpoint=llm_endpoint, ) logger.info( f"Successfully updated Langflow flows for LLM provider {llm_provider}" @@ -1283,13 +1298,10 @@ async def _update_langflow_model_values(config, flows_service): # Update embedding model values embedding_provider = config.knowledge.embedding_provider.lower() - embedding_provider_config = config.get_embedding_provider_config() - embedding_endpoint = getattr(embedding_provider_config, "endpoint", None) await flows_service.change_langflow_model_value( embedding_provider, embedding_model=config.knowledge.embedding_model, - endpoint=embedding_endpoint, ) logger.info( f"Successfully updated Langflow flows for embedding provider {embedding_provider}" @@ -1517,6 +1529,7 @@ async def rollback_onboarding( # Mark config as not edited so user can go through onboarding again current_config.edited = False + current_config.onboarding.current_step = 0 # Save the rolled back configuration manually to avoid save_config_file setting edited=True try: diff --git a/src/config/settings.py b/src/config/settings.py index b4d1e9e28..1c7348d4c 100644 --- a/src/config/settings.py +++ b/src/config/settings.py @@ -697,6 +697,7 @@ async def _create_langflow_global_variable( variable_name=name, error=str(e), ) + raise e async def _update_langflow_global_variable(self, name: str, value: str): """Update an existing global variable in Langflow via API""" diff --git a/src/main.py b/src/main.py index 598263f78..76fbf0cf8 100644 --- a/src/main.py +++ b/src/main.py @@ -346,16 +346,18 @@ async def ingest_default_documents_when_ready( raise FileNotFoundError(f"No default documents found in {base_dir}") if DISABLE_INGEST_WITH_LANGFLOW: - await _ingest_default_documents_openrag( + task_id = await _ingest_default_documents_openrag( document_service, task_service, file_paths ) else: - await _ingest_default_documents_langflow( + task_id = await _ingest_default_documents_langflow( langflow_file_service, session_manager, task_service, file_paths ) await TelemetryClient.send_event(Category.DOCUMENT_INGESTION, MessageId.ORB_DOC_DEFAULT_COMPLETE) + return task_id + except Exception as e: logger.error("Default documents ingestion failed", error=str(e)) await TelemetryClient.send_event(Category.DOCUMENT_INGESTION, MessageId.ORB_DOC_DEFAULT_FAILED) @@ -391,19 +393,6 @@ async def _ingest_default_documents_langflow( if hasattr(session_manager, "_anonymous_jwt"): effective_jwt = session_manager._anonymous_jwt - # Prepare tweaks for default documents with anonymous user metadata - default_tweaks = { - "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4": { - "docs_metadata": [ - {"key": "owner", "value": None}, - {"key": "owner_name", "value": anonymous_user.name}, - {"key": "owner_email", "value": anonymous_user.email}, - {"key": "connector_type", "value": "system_default"}, - {"key": "is_sample_data", "value": "true"}, - ] - } - } - # Create a langflow upload task for trackable progress task_id = await task_service.create_langflow_upload_task( user_id=None, # Anonymous user @@ -414,7 +403,7 @@ async def _ingest_default_documents_langflow( owner_name=anonymous_user.name, owner_email=anonymous_user.email, session_id=None, # No session for default documents - tweaks=default_tweaks, + tweaks={}, settings=None, # Use default ingestion settings delete_after_ingest=True, # Clean up after ingestion replace_duplicates=True, @@ -426,6 +415,8 @@ async def _ingest_default_documents_langflow( file_count=len(file_paths), ) + return task_id + async def health_check(request: Request): """Simple liveness probe: Indicates that the OpenRAG Backend service is online and running.""" return JSONResponse({"status": "ok"}, status_code=200) @@ -480,6 +471,8 @@ async def _ingest_default_documents_openrag( file_count=len(file_paths), ) + return task_id + async def _update_mcp_servers_with_provider_credentials(services): """Update MCP servers with provider credentials at startup. diff --git a/src/models/processors.py b/src/models/processors.py index ec6eaaa18..9ad16f4fe 100644 --- a/src/models/processors.py +++ b/src/models/processors.py @@ -774,22 +774,6 @@ async def process_item( # Prepare metadata tweaks similar to API endpoint final_tweaks = self.tweaks.copy() if self.tweaks else {} - - metadata_tweaks = [] - if self.owner_user_id: - metadata_tweaks.append({"key": "owner", "value": self.owner_user_id}) - if self.owner_name: - metadata_tweaks.append({"key": "owner_name", "value": self.owner_name}) - if self.owner_email: - metadata_tweaks.append({"key": "owner_email", "value": self.owner_email}) - # Mark as local upload for connector_type - metadata_tweaks.append({"key": "connector_type", "value": "local"}) - - if metadata_tweaks: - # Initialize the OpenSearch component tweaks if not already present - if "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4" not in final_tweaks: - final_tweaks["OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4"] = {} - final_tweaks["OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4"]["docs_metadata"] = metadata_tweaks # Process file using langflow service result = await self.langflow_file_service.upload_and_ingest_file( diff --git a/src/services/flows_service.py b/src/services/flows_service.py index e97ac2d3a..d770b47fc 100644 --- a/src/services/flows_service.py +++ b/src/services/flows_service.py @@ -6,28 +6,15 @@ LANGFLOW_URL, LANGFLOW_CHAT_FLOW_ID, LANGFLOW_INGEST_FLOW_ID, - OLLAMA_LLM_TEXT_COMPONENT_PATH, OPENAI_EMBEDDING_COMPONENT_DISPLAY_NAME, OPENAI_LLM_COMPONENT_DISPLAY_NAME, - WATSONX_LLM_TEXT_COMPONENT_PATH, clients, - WATSONX_LLM_COMPONENT_PATH, - WATSONX_EMBEDDING_COMPONENT_PATH, - OLLAMA_LLM_COMPONENT_PATH, - OLLAMA_EMBEDDING_COMPONENT_PATH, - WATSONX_EMBEDDING_COMPONENT_DISPLAY_NAME, - WATSONX_LLM_COMPONENT_DISPLAY_NAME, - OLLAMA_EMBEDDING_COMPONENT_DISPLAY_NAME, - OLLAMA_LLM_COMPONENT_DISPLAY_NAME, get_openrag_config, ) import json import os -import re -import copy from datetime import datetime from utils.logging_config import get_logger -from utils.container_utils import transform_localhost_url from utils.telemetry import TelemetryClient, Category, MessageId logger = get_logger(__name__) @@ -418,10 +405,6 @@ async def reset_langflow_flow(self, flow_type: str): llm_provider = config.agent.llm_provider.lower() embedding_provider = config.knowledge.embedding_provider.lower() - # Get provider-specific endpoint if needed - llm_provider_config = config.get_llm_provider_config() - endpoint = getattr(llm_provider_config, "endpoint", None) - # Step 2: Update model values for the specific flow being reset single_flow_config = [ { @@ -439,12 +422,10 @@ async def reset_langflow_flow(self, flow_type: str): if flow_type == "retrieval": # Retrieval flow uses both LLM and embedding models # Update LLM first - llm_endpoint = getattr(llm_provider_config, "endpoint", None) llm_result = await self.change_langflow_model_value( provider=llm_provider, embedding_model=None, llm_model=config.agent.llm_model, - endpoint=llm_endpoint, flow_configs=single_flow_config, ) if not llm_result.get("success"): @@ -453,13 +434,10 @@ async def reset_langflow_flow(self, flow_type: str): ) # Update embedding model - embedding_provider_config = config.get_embedding_provider_config() - embedding_endpoint = getattr(embedding_provider_config, "endpoint", None) embedding_result = await self.change_langflow_model_value( provider=embedding_provider, embedding_model=config.knowledge.embedding_model, llm_model=None, - endpoint=embedding_endpoint, flow_configs=single_flow_config, ) if not embedding_result.get("success"): @@ -475,23 +453,18 @@ async def reset_langflow_flow(self, flow_type: str): } elif flow_type in ["ingest", "url_ingest"]: # Ingest flows only need embedding model - embedding_provider_config = config.get_embedding_provider_config() - embedding_endpoint = getattr(embedding_provider_config, "endpoint", None) update_result = await self.change_langflow_model_value( provider=embedding_provider, embedding_model=config.knowledge.embedding_model, llm_model=None, - endpoint=embedding_endpoint, flow_configs=single_flow_config, ) else: # Other flows (nudges) only need LLM model - llm_endpoint = getattr(llm_provider_config, "endpoint", None) update_result = await self.change_langflow_model_value( provider=llm_provider, embedding_model=None, llm_model=config.agent.llm_model, - endpoint=llm_endpoint, flow_configs=single_flow_config, ) @@ -1107,7 +1080,6 @@ async def change_langflow_model_value( provider: str, embedding_model: str = None, llm_model: str = None, - endpoint: str = None, flow_configs: list = None, ): """ @@ -1126,12 +1098,9 @@ async def change_langflow_model_value( if provider not in ["watsonx", "ollama", "openai", "anthropic"]: raise ValueError("provider must be 'watsonx', 'ollama', 'openai', or 'anthropic'") - if provider == "watsonx" and not endpoint: - raise ValueError("endpoint is required for watsonx provider") - try: logger.info( - f"Changing dropdown values for provider {provider}, embedding: {embedding_model}, llm: {llm_model}, endpoint: {endpoint}" + f"Changing dropdown values for provider {provider}, embedding: {embedding_model}, llm: {llm_model}" ) # Use provided flow_configs or default to all flows @@ -1167,7 +1136,6 @@ async def change_langflow_model_value( provider, embedding_model, llm_model, - endpoint, ) results.append(result) logger.info( @@ -1190,7 +1158,6 @@ async def change_langflow_model_value( "provider": provider, "embedding_model": embedding_model, "llm_model": llm_model, - "endpoint": endpoint, "results": results, } @@ -1204,25 +1171,12 @@ async def change_langflow_model_value( "error": f"Failed to change provider models: {str(e)}", } - # def _get_provider_component_ids(self, provider: str): - # """Get the component IDs for a specific provider""" - # if provider == "watsonx": - # return WATSONX_EMBEDDING_COMPONENT_DISPLAY_NAME, WATSONX_LLM_COMPONENT_DISPLAY_NAME - # elif provider == "ollama": - # return OLLAMA_EMBEDDING_COMPONENT_DISPLAY_NAME, OLLAMA_LLM_COMPONENT_DISPLAY_NAME - # elif provider == "openai": - # # OpenAI components are the default ones - # return OPENAI_EMBEDDING_COMPONENT_DISPLAY_NAME, OPENAI_LLM_COMPONENT_DISPLAY_NAME - # else: - # raise ValueError(f"Unsupported provider: {provider}") - async def _update_provider_components( self, config, provider: str, embedding_model: str = None, llm_model: str = None, - endpoint: str = None, ): """Update provider components and their dropdown values in a flow""" flow_name = config["name"] @@ -1245,7 +1199,7 @@ async def _update_provider_components( embedding_node, _ = self._find_node_in_flow(flow_data, display_name=OPENAI_EMBEDDING_COMPONENT_DISPLAY_NAME) if embedding_node: if await self._update_component_fields( - embedding_node, provider, embedding_model, endpoint + embedding_node, provider, embedding_model ): updates_made.append(f"embedding model: {embedding_model}") @@ -1254,14 +1208,14 @@ async def _update_provider_components( llm_node, _ = self._find_node_in_flow(flow_data, display_name=OPENAI_LLM_COMPONENT_DISPLAY_NAME) if llm_node: if await self._update_component_fields( - llm_node, provider, llm_model, endpoint + llm_node, provider, llm_model ): updates_made.append(f"llm model: {llm_model}") # Update LLM component (if exists in this flow) agent_node, _ = self._find_node_in_flow(flow_data, display_name=AGENT_COMPONENT_DISPLAY_NAME) if agent_node: if await self._update_component_fields( - agent_node, provider, llm_model, endpoint + agent_node, provider, llm_model ): updates_made.append(f"agent model: {llm_model}") @@ -1293,12 +1247,46 @@ async def _update_provider_components( "flow_id": flow_id, } + async def _update_component_langflow(self, template, model: str): + # Call custom_component/update endpoint to get updated template + # Only call if code field exists (custom components should have code) + if "code" in template and "value" in template["code"]: + code_value = template["code"]["value"] + + try: + update_payload = { + "code": code_value, + "template": template, + "field": "model", + "field_value": model, + "tool_mode": False, + } + + response = await clients.langflow_request( + "POST", "/api/v1/custom_component/update", json=update_payload + ) + + if response.status_code == 200: + response_data = response.json() + # Update template with the new template from response.data + if "template" in response_data: + # Update the template in component_node + return response_data["template"] + else: + logger.warning("Response from custom_component/update missing 'data' field") + else: + logger.warning( + f"Failed to call custom_component/update: HTTP {response.status_code} - {response.text}" + ) + except Exception as e: + logger.error(f"Error calling custom_component/update: {str(e)}") + # Continue with manual updates even if API call fails + async def _update_component_fields( self, component_node, provider: str, model_value: str, - endpoint: str = None, ): """Update fields in a component node based on provider and component type""" template = component_node.get("data", {}).get("node", {}).get("template", {}) @@ -1308,144 +1296,73 @@ async def _update_component_fields( updated = False - provider_name = "IBM watsonx.ai" if provider == "watsonx" else "Ollama" if provider == "ollama" else "Anthropic" if provider == "anthropic" else "OpenAI" + provider_name = "IBM WatsonX" if provider == "watsonx" else "Ollama" if provider == "ollama" else "Anthropic" if provider == "anthropic" else "OpenAI" - field_name = "provider" if "provider" in template else "agent_llm" - - # Update provider field and call custom_component/update endpoint - if field_name in template: - # First, update the provider value - template[field_name]["value"] = provider_name + try: + enable_payload = [{ + "provider": provider_name, + "model_id": model_value, + "enabled": True + }] - # Call custom_component/update endpoint to get updated template - # Only call if code field exists (custom components should have code) - if "code" in template and "value" in template["code"]: - code_value = template["code"]["value"] - field_value = provider_name - - try: - update_payload = { - "code": code_value, - "template": template, - "field": field_name, - "field_value": field_value, - "tool_mode": False, - } - - response = await clients.langflow_request( - "POST", "/api/v1/custom_component/update", json=update_payload - ) - - if response.status_code == 200: - response_data = response.json() - # Update template with the new template from response.data - if "template" in response_data: - # Update the template in component_node - component_node["data"]["node"]["template"] = response_data["template"] - # Update local template reference - template = response_data["template"] - logger.info(f"Successfully updated template via custom_component/update for provider: {provider_name}") - else: - logger.warning("Response from custom_component/update missing 'data' field") - else: - logger.warning( - f"Failed to call custom_component/update: HTTP {response.status_code} - {response.text}" - ) - except Exception as e: - logger.error(f"Error calling custom_component/update: {str(e)}") - # Continue with manual updates even if API call fails + enable_response = await clients.langflow_request( + "POST", "/api/v1/models/enabled_models", json=enable_payload + ) - updated = True + if enable_response.status_code == 200: + logger.info(f"Successfully enabled model {model_value} for provider {provider_name}") + else: + logger.warning( + f"Failed to enable model: HTTP {enable_response.status_code} - {enable_response.text}" + ) + except Exception as e: + logger.error(f"Error enabling model {model_value}: {str(e)}") - - # Update model_name field (common to all providers) + # Update provider field and call custom_component/update endpoint if "model" in template: - template["model"]["value"] = model_value - template["model"]["options"] = [model_value] - template["model"]["advanced"] = False - updated = True - elif "model_name" in template: - template["model_name"]["value"] = model_value - template["model_name"]["options"] = [model_value] - template["model_name"]["advanced"] = False - updated = True + if "options" not in template["model"]: + return False - # Update endpoint/URL field based on provider - if endpoint: - if provider == "watsonx" and "base_url" in template: - # Watson uses "url" field - template["base_url"]["value"] = endpoint - template["base_url"]["options"] = [endpoint] - template["base_url"]["show"] = True - template["base_url"]["advanced"] = False - updated = True - if provider == "watsonx" and "base_url_ibm_watsonx" in template: - # Watson uses "url" field - template["base_url_ibm_watsonx"]["value"] = endpoint - template["base_url_ibm_watsonx"]["show"] = True - template["base_url_ibm_watsonx"]["advanced"] = False - updated = True - - if provider == "openai" and "api_key" in template: - template["api_key"]["value"] = "OPENAI_API_KEY" - template["api_key"]["load_from_db"] = True - template["api_key"]["show"] = True - template["api_key"]["advanced"] = False - updated = True - if provider == "openai" and "api_base" in template: - template["api_base"]["value"] = "" - template["api_base"]["load_from_db"] = False - template["api_base"]["show"] = True - template["api_base"]["advanced"] = False - updated = True + model = [template["model"]["options"][0]] - if provider == "anthropic" and "api_key" in template: - template["api_key"]["value"] = "ANTHROPIC_API_KEY" - template["api_key"]["load_from_db"] = True - template["api_key"]["show"] = True - template["api_key"]["advanced"] = False - updated = True - - if provider == "anthropic" and "base_url" in template: - template["base_url"]["value"] = "https://api.anthropic.com" - template["base_url"]["load_from_db"] = False - template["base_url"]["show"] = True - template["base_url"]["advanced"] = True - updated = True + template = await self._update_component_langflow(template, model_value) - if provider == "ollama" and "base_url" in template: - template["base_url"]["value"] = "OLLAMA_BASE_URL" - template["base_url"]["load_from_db"] = True - template["base_url"]["show"] = True - template["base_url"]["advanced"] = False - updated = True - - if provider == "ollama" and "api_base" in template: - template["api_base"]["value"] = "OLLAMA_BASE_URL" - template["api_base"]["load_from_db"] = True - template["api_base"]["show"] = True - template["api_base"]["advanced"] = False - updated = True + component_node["data"]["node"]["template"] = template + + model = [item for item in template["model"]["options"] if item["provider"] == provider_name and item["name"] == model_value] - if provider == "ollama" and "ollama_base_url" in template: - template["ollama_base_url"]["value"] = "OLLAMA_BASE_URL" - template["ollama_base_url"]["load_from_db"] = True - template["ollama_base_url"]["show"] = True - template["ollama_base_url"]["advanced"] = False - updated = True + template = await self._update_component_langflow(template, model_value) + + template["model"]["value"] = model + + component_node["data"]["node"]["template"] = template + + if len(model) == 0: + logger.warning(f"Model {model_value} not found for provider {provider_name}") + return False - if provider == "watsonx" and "project_id" in template: - template["project_id"]["value"] = "WATSONX_PROJECT_ID" - template["project_id"]["load_from_db"] = True - template["project_id"]["show"] = True - template["project_id"]["advanced"] = False updated = True - - if provider == "watsonx" and "api_key" in template: - template["api_key"]["value"] = "WATSONX_API_KEY" - template["api_key"]["load_from_db"] = True - template["api_key"]["show"] = True - template["api_key"]["advanced"] = False + + + if "api_base" in template: + if provider == "ollama": + template["api_base"]["value"] = "OLLAMA_BASE_URL" + template["api_base"]["load_from_db"] = True + elif provider == "watsonx": + # Watson uses "url" field + template["api_base"]["value"] = "WATSONX_URL" + template["api_base"]["load_from_db"] = True + else: + template["api_base"]["value"] = "" + template["api_base"]["load_from_db"] = False updated = True + if "project_id" in template: + if provider == "watsonx": + template["project_id"]["value"] = "WATSONX_PROJECT_ID" + template["project_id"]["load_from_db"] = True + else: + template["project_id"]["value"] = "" + template["project_id"]["load_from_db"] = False + updated = True return updated diff --git a/src/services/langflow_file_service.py b/src/services/langflow_file_service.py index ac02be607..c1732489b 100644 --- a/src/services/langflow_file_service.py +++ b/src/services/langflow_file_service.py @@ -93,16 +93,6 @@ async def run_ingestion_flow( # Pass files via tweaks to File component (File-PSU37 from the flow) if file_paths: tweaks["DoclingRemote-Dp3PX"] = {"path": file_paths} - - - - # Pass JWT token via tweaks using the x-langflow-global-var- pattern - if jwt_token: - # Using the global variable pattern that Langflow expects for OpenSearch components - tweaks["OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4"] = {"jwt_token": jwt_token} - logger.debug("[LF] Added JWT token to tweaks for OpenSearch components") - else: - logger.warning("[LF] No JWT token provided") # Pass metadata via tweaks to OpenSearch component metadata_tweaks = [] @@ -115,14 +105,7 @@ async def run_ingestion_flow( if connector_type: metadata_tweaks.append({"key": "connector_type", "value": connector_type}) logger.info(f"[LF] Metadata tweaks {metadata_tweaks}") - # if metadata_tweaks: - # # Initialize the OpenSearch component tweaks if not already present - # if "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4" not in tweaks: - # tweaks["OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4"] = {} - # tweaks["OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4"]["docs_metadata"] = metadata_tweaks - # logger.debug( - # "[LF] Added metadata to tweaks", metadata_count=len(metadata_tweaks) - # ) + if tweaks: payload["tweaks"] = tweaks logger.debug(f"[LF] Tweaks {tweaks}") diff --git a/src/services/search_service.py b/src/services/search_service.py index ef88bb9d4..a2cb3c517 100644 --- a/src/services/search_service.py +++ b/src/services/search_service.py @@ -330,7 +330,7 @@ async def embed_with_model(model_name): search_body = { "query": query_block, "aggs": { - "data_sources": {"terms": {"field": "filename.keyword", "size": 20}}, + "data_sources": {"terms": {"field": "filename", "size": 20}}, "document_types": {"terms": {"field": "mimetype", "size": 10}}, "owners": {"terms": {"field": "owner_name.keyword", "size": 10}}, "connector_types": {"terms": {"field": "connector_type", "size": 10}}, diff --git a/src/utils/container_utils.py b/src/utils/container_utils.py index 746379e83..f0f14ccbb 100644 --- a/src/utils/container_utils.py +++ b/src/utils/container_utils.py @@ -102,7 +102,7 @@ def get_container_host() -> str | None: return None -def transform_localhost_url(url: str) -> str: +def transform_localhost_url(url: str, is_langflow: bool = False, is_podman: bool = True) -> str: """Transform localhost URLs to container-accessible hosts when running in a container. Automatically detects if running inside a container and finds the appropriate host @@ -113,6 +113,8 @@ def transform_localhost_url(url: str) -> str: Args: url: The original URL + is_langflow: Detect the container host for the langflow container, not the current one, if True. + is_podman: Use host.containers.internal instead of host.docker.internal if True. Returns: Transformed URL with container-accessible host if applicable, otherwise the original URL. @@ -123,7 +125,10 @@ def transform_localhost_url(url: str) -> str: # Returns "http://172.17.0.1:5001" if running in Docker on Linux (gateway IP fallback) # Returns "http://localhost:5001" if not in a container """ - container_host = get_container_host() + if is_langflow : + container_host = "host.containers.internal" if is_podman else "host.docker.internal" + else: + container_host = get_container_host() if not container_host: return url diff --git a/src/utils/langflow_headers.py b/src/utils/langflow_headers.py index e3447e611..bc8dac48f 100644 --- a/src/utils/langflow_headers.py +++ b/src/utils/langflow_headers.py @@ -28,7 +28,7 @@ def add_provider_credentials_to_headers(headers: Dict[str, str], config) -> None # Add Ollama endpoint (with localhost transformation) if config.providers.ollama.endpoint: - ollama_endpoint = transform_localhost_url(config.providers.ollama.endpoint) + ollama_endpoint = transform_localhost_url(config.providers.ollama.endpoint, is_langflow=True) headers["X-LANGFLOW-GLOBAL-VAR-OLLAMA_BASE_URL"] = str(ollama_endpoint) @@ -60,7 +60,7 @@ def build_mcp_global_vars_from_config(config) -> Dict[str, str]: # Add Ollama endpoint (with localhost transformation) if config.providers.ollama.endpoint: - ollama_endpoint = transform_localhost_url(config.providers.ollama.endpoint) + ollama_endpoint = transform_localhost_url(config.providers.ollama.endpoint, is_langflow=True) global_vars["OLLAMA_BASE_URL"] = ollama_endpoint # Add selected embedding model From 226845f24d6424fc86b499730c0be00d49dbe49a Mon Sep 17 00:00:00 2001 From: Rico Furtado Date: Mon, 9 Mar 2026 19:11:10 -0400 Subject: [PATCH 34/43] fix: Refactor EnvManager to use class-level assignment pattern for environment variable matching --- src/tui/managers/env_manager.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/tui/managers/env_manager.py b/src/tui/managers/env_manager.py index ad467d6bd..a349ae04f 100644 --- a/src/tui/managers/env_manager.py +++ b/src/tui/managers/env_manager.py @@ -97,6 +97,8 @@ class EnvConfig: class EnvManager: """Manages environment configuration for OpenRAG.""" + assignment_pattern = re.compile(r"^\s*(?:export\s+)?([A-Za-z_][A-Za-z0-9_]*)\s*=") + def __init__(self, env_file: Optional[Path] = None): if env_file: self.env_file = env_file @@ -120,8 +122,7 @@ def __init__(self, env_file: Optional[Path] = None): logger.warning(f"Failed to migrate .env file: {e}") self.config = EnvConfig() - self.assignment_pattern = re.compile(r"^\s*(?:export\s+)?([A-Za-z_][A-Za-z0-9_]*)\s*=") - + def generate_secure_password(self) -> str: """Generate a secure password for OpenSearch.""" # Ensure at least one character from each category @@ -218,7 +219,7 @@ def _collect_preserved_env_lines(self) -> list[str]: if not stripped or stripped.startswith("#"): continue - match = self.assignment_pattern.match(raw_line) + match = EnvManager.assignment_pattern.match(raw_line) if not match: continue From 2d542b45d863d476d382668865e030d8e17afbb9 Mon Sep 17 00:00:00 2001 From: Rico Furtado Date: Mon, 9 Mar 2026 19:33:28 -0400 Subject: [PATCH 35/43] Update tests/unit/test_env_manager.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- tests/unit/test_env_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_env_manager.py b/tests/unit/test_env_manager.py index 49dfcfe08..e07eb51d8 100644 --- a/tests/unit/test_env_manager.py +++ b/tests/unit/test_env_manager.py @@ -160,7 +160,7 @@ def test_preserves_unmanaged_continued_line(self, env_manager, tmp_path): assert continued_block in content assert content.count("UNMANAGED_LONG_VALUE=") == 1 # Managed password should be updated, not duplicated. - assert 'OPENSEARCH_PASSWORD="AnotherNewPass!789"' in content + assert "OPENSEARCH_PASSWORD='AnotherNewPass!789'" in content assert 'OPENSEARCH_PASSWORD="old-password"' not in content # --------------------------------------------------------------------------- # ensure_openrag_version From b971c4b5a9961b8b712f9bc9be401aaac66c89c9 Mon Sep 17 00:00:00 2001 From: matano Date: Tue, 10 Mar 2026 12:03:05 +0200 Subject: [PATCH 36/43] feat(sdk): add onboarding endpoint support for embedding model initialization --- sdks/python/openrag_sdk/__init__.py | 2 ++ sdks/python/openrag_sdk/client.py | 26 ++++++++++++++++++++++++++ sdks/python/openrag_sdk/models.py | 8 ++++++++ 3 files changed, 36 insertions(+) diff --git a/sdks/python/openrag_sdk/__init__.py b/sdks/python/openrag_sdk/__init__.py index 287067cf7..a0019312b 100644 --- a/sdks/python/openrag_sdk/__init__.py +++ b/sdks/python/openrag_sdk/__init__.py @@ -57,6 +57,7 @@ KnowledgeFilterSearchResponse, KnowledgeSettings, Message, + OnboardingResponse, SearchFilters, SearchResponse, SearchResult, @@ -105,6 +106,7 @@ "SettingsUpdateResponse", "AgentSettings", "KnowledgeSettings", + "OnboardingResponse", # Knowledge filter models "KnowledgeFilter", "KnowledgeFilterQueryData", diff --git a/sdks/python/openrag_sdk/client.py b/sdks/python/openrag_sdk/client.py index 2f5cf4dc1..f05b1a6b1 100644 --- a/sdks/python/openrag_sdk/client.py +++ b/sdks/python/openrag_sdk/client.py @@ -85,6 +85,31 @@ async def update(self, options): return SettingsUpdateResponse(message=data.get("message", "Settings updated")) +class OnboardingClient: + """Client for onboarding operations.""" + + def __init__(self, client: "OpenRAGClient"): + self._client = client + + async def onboarding(self, embedding_model: str): + """ + Initialize OpenRAG with embedding model configuration. + + Args: + embedding_model: Embedding model identifier (e.g., 'text-embedding-3-small'). + + Returns: + OnboardingResponse with success message. + """ + from .models import OnboardingResponse + + body: dict[str, Any] = {"embedding_model": embedding_model} + + response = await self._client._request("POST", "/onboarding", json=body) + data = response.json() + return OnboardingResponse(**data) + + class OpenRAGClient: """ OpenRAG API client. @@ -159,6 +184,7 @@ def __init__( self.settings = SettingsClient(self) self.models = ModelsClient(self) self.knowledge_filters = KnowledgeFiltersClient(self) + self.onboarding = OnboardingClient(self) @property def _headers(self) -> dict[str, str]: diff --git a/sdks/python/openrag_sdk/models.py b/sdks/python/openrag_sdk/models.py index 94530f4e2..0a81bbd1a 100644 --- a/sdks/python/openrag_sdk/models.py +++ b/sdks/python/openrag_sdk/models.py @@ -285,3 +285,11 @@ class DeleteKnowledgeFilterResponse(BaseModel): success: bool error: str | None = None + + +# Onboarding models +class OnboardingResponse(BaseModel): + """Response from onboarding endpoint.""" + + message: str + success: bool = True From 23e995b6b4acafc1a66e72aac90d3ef92a6ea120 Mon Sep 17 00:00:00 2001 From: matano Date: Tue, 10 Mar 2026 14:21:07 +0200 Subject: [PATCH 37/43] feat: add v1 onboarding endpoint with SDK client support --- sdks/python/openrag_sdk/client.py | 2 +- src/api/v1/onboarding.py | 45 +++++++++++++++++++++++++++++++ src/main.py | 5 +++- 3 files changed, 50 insertions(+), 2 deletions(-) create mode 100644 src/api/v1/onboarding.py diff --git a/sdks/python/openrag_sdk/client.py b/sdks/python/openrag_sdk/client.py index f05b1a6b1..843472545 100644 --- a/sdks/python/openrag_sdk/client.py +++ b/sdks/python/openrag_sdk/client.py @@ -105,7 +105,7 @@ async def onboarding(self, embedding_model: str): body: dict[str, Any] = {"embedding_model": embedding_model} - response = await self._client._request("POST", "/onboarding", json=body) + response = await self._client._request("POST", "/api/v1/onboarding", json=body) data = response.json() return OnboardingResponse(**data) diff --git a/src/api/v1/onboarding.py b/src/api/v1/onboarding.py new file mode 100644 index 000000000..a120a5cc6 --- /dev/null +++ b/src/api/v1/onboarding.py @@ -0,0 +1,45 @@ +""" +Public API v1 Onboarding endpoint. + +Provides onboarding configuration setup. +Uses API key authentication. +""" +from fastapi import Depends + +from api.settings import OnboardingBody +from dependencies import ( + get_api_key_user_async, + get_document_service, + get_flows_service, + get_knowledge_filter_service, + get_langflow_file_service, + get_session_manager, + get_task_service, +) +from session_manager import User + + +async def onboarding_endpoint( + body: OnboardingBody, + flows_service=Depends(get_flows_service), + session_manager=Depends(get_session_manager), + document_service=Depends(get_document_service), + task_service=Depends(get_task_service), + langflow_file_service=Depends(get_langflow_file_service), + knowledge_filter_service=Depends(get_knowledge_filter_service), + user: User = Depends(get_api_key_user_async), +): + """Initialize OpenRAG with configuration. POST /v1/onboarding""" + from api.settings import onboarding + + return await onboarding( + body=body, + flows_service=flows_service, + session_manager=session_manager, + document_service=document_service, + task_service=task_service, + langflow_file_service=langflow_file_service, + knowledge_filter_service=knowledge_filter_service, + user=user, + ) + diff --git a/src/main.py b/src/main.py index 0443d6823..1c78eb030 100644 --- a/src/main.py +++ b/src/main.py @@ -40,7 +40,7 @@ from api.connector_router import ConnectorRouter from services.api_key_service import APIKeyService from api import keys as api_keys -from api.v1 import chat as v1_chat, search as v1_search, documents as v1_documents, settings as v1_settings, models as v1_models, knowledge_filters as v1_knowledge_filters +from api.v1 import chat as v1_chat, search as v1_search, documents as v1_documents, settings as v1_settings, models as v1_models, knowledge_filters as v1_knowledge_filters, onboarding as v1_onboarding # Configuration and setup from config.settings import ( @@ -851,6 +851,9 @@ async def create_app(): app.add_api_route("/v1/knowledge-filters/{filter_id}", v1_knowledge_filters.update_endpoint, methods=["PUT"], tags=["public"]) app.add_api_route("/v1/knowledge-filters/{filter_id}", v1_knowledge_filters.delete_endpoint, methods=["DELETE"], tags=["public"]) + # Onboarding endpoint + app.add_api_route("/v1/onboarding", v1_onboarding.onboarding_endpoint, methods=["POST"], tags=["public"]) + # Add startup event handler @app.on_event("startup") async def startup_event(): From 89c6354aef7f7153d504580a97c2009b210601ff Mon Sep 17 00:00:00 2001 From: Mike Pawlowski Date: Mon, 2 Mar 2026 15:55:01 -0800 Subject: [PATCH 38/43] feat: Settings UX should let you remove a model provider MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Issues: - #833 Summary Adds the ability for users to remove a configured model provider (OpenAI, Anthropic, IBM watsonx.ai, Ollama) from the Settings UI, with backend validation and automatic fallback to another configured provider. Frontend – Settings Dialogs - Added a "Remove" button to the Ollama, OpenAI, Anthropic, and watsonx settings dialogs, each gated by a tooltip explaining when removal is disabled - Added an inline confirmation footer (with animated slide-in) that appears before executing a destructive remove action, replacing the dialog close behavior - Disabled the remove action when no other provider is configured (prevents being locked out), with a tooltip explaining the reason - Displayed an inline error message from the mutation if the remove request fails Frontend – Mutation Layer - Extended useUpdateSettingsMutation to support the new remove_*_config boolean fields (remove_ollama_config, remove_openai_config, remove_anthropic_config, remove_watsonx_config) Backend – Settings API - Added remove_ollama_config, remove_openai_config, remove_anthropic_config, remove_watsonx_config optional boolean fields to SettingsUpdateBody - Implemented removal logic for each provider: clears credentials/endpoint, marks configured = False, and rejects the request with a 400 if no other provider is available - Added _first_configured_llm_provider and _first_configured_embedding_provider helpers to auto-select a fallback provider/model when the active LLM or embedding provider is removed - Ensured Langflow global variables and model values are refreshed after a provider removal (same as a provider update) --- .../mutations/useUpdateSettingsMutation.ts | 4 + .../_components/anthropic-settings-dialog.tsx | 134 +++++++++++++++--- .../_components/ollama-settings-dialog.tsx | 125 +++++++++++++--- .../_components/openai-settings-dialog.tsx | 133 ++++++++++++++--- .../_components/watsonx-settings-dialog.tsx | 133 ++++++++++++++--- src/api/settings.py | 119 +++++++++++++++- 6 files changed, 565 insertions(+), 83 deletions(-) diff --git a/frontend/app/api/mutations/useUpdateSettingsMutation.ts b/frontend/app/api/mutations/useUpdateSettingsMutation.ts index e1a7e9ddf..aa5d933aa 100644 --- a/frontend/app/api/mutations/useUpdateSettingsMutation.ts +++ b/frontend/app/api/mutations/useUpdateSettingsMutation.ts @@ -34,6 +34,10 @@ export interface UpdateSettingsRequest { watsonx_endpoint?: string; watsonx_project_id?: string; ollama_endpoint?: string; + remove_ollama_config?: boolean; + remove_openai_config?: boolean; + remove_anthropic_config?: boolean; + remove_watsonx_config?: boolean; } export interface UpdateSettingsResponse { diff --git a/frontend/app/settings/_components/anthropic-settings-dialog.tsx b/frontend/app/settings/_components/anthropic-settings-dialog.tsx index 2cc1e2232..55bc4d9a2 100644 --- a/frontend/app/settings/_components/anthropic-settings-dialog.tsx +++ b/frontend/app/settings/_components/anthropic-settings-dialog.tsx @@ -6,6 +6,7 @@ import { FormProvider, useForm } from "react-hook-form"; import { toast } from "sonner"; import { useUpdateSettingsMutation } from "@/app/api/mutations/useUpdateSettingsMutation"; import { useGetAnthropicModelsQuery } from "@/app/api/queries/useGetModelsQuery"; +import { useGetSettingsQuery } from "@/app/api/queries/useGetSettingsQuery"; import type { ProviderHealthResponse } from "@/app/api/queries/useProviderHealthQuery"; import AnthropicLogo from "@/components/icons/anthropic-logo"; import { Button } from "@/components/ui/button"; @@ -16,6 +17,13 @@ import { DialogHeader, DialogTitle, } from "@/components/ui/dialog"; +import { + Tooltip, + TooltipContent, + TooltipProvider, + TooltipTrigger, +} from "@/components/ui/tooltip"; +import { useAuth } from "@/contexts/auth-context"; import { AnthropicSettingsForm, type AnthropicSettingsFormData, @@ -28,11 +36,26 @@ const AnthropicSettingsDialog = ({ open: boolean; setOpen: (open: boolean) => void; }) => { + const { isAuthenticated, isNoAuthMode } = useAuth(); const queryClient = useQueryClient(); const [isValidating, setIsValidating] = useState(false); const [validationError, setValidationError] = useState(null); + const [showRemoveConfirm, setShowRemoveConfirm] = useState(false); const router = useRouter(); + const { data: settings = {} } = useGetSettingsQuery({ + enabled: isAuthenticated || isNoAuthMode, + }); + + const isAnthropicConfigured = + settings.providers?.anthropic?.configured === true; + + const canRemoveAnthropic = + isAnthropicConfigured && + (settings.providers?.openai?.configured === true || + settings.providers?.watsonx?.configured === true || + settings.providers?.ollama?.configured === true); + const methods = useForm({ mode: "onSubmit", defaultValues: { @@ -78,6 +101,14 @@ const AnthropicSettingsDialog = ({ }, }); + const removeMutation = useUpdateSettingsMutation({ + onSuccess: () => { + toast.success("Anthropic configuration removed"); + setShowRemoveConfirm(false); + setOpen(false); + }, + }); + const onSubmit = async (data: AnthropicSettingsFormData) => { // Clear any previous validation errors setValidationError(null); @@ -108,7 +139,7 @@ const AnthropicSettingsDialog = ({ }; return ( - + { setShowRemoveConfirm(false); setOpen(o); }}>
@@ -139,26 +170,89 @@ const AnthropicSettingsDialog = ({

)} + {removeMutation.isError && ( + +

+ {removeMutation.error?.message} +

+
+ )} - - - - + + {showRemoveConfirm ? ( + +
+ Remove configuration? +
+ + +
+ ) : ( + + {isAnthropicConfigured && ( + + + + + + + + {!canRemoveAnthropic && ( + + Configure another model provider before removing + Anthropic + + )} + + + )} + + + + )}
diff --git a/frontend/app/settings/_components/ollama-settings-dialog.tsx b/frontend/app/settings/_components/ollama-settings-dialog.tsx index e0759600e..d079a0a9a 100644 --- a/frontend/app/settings/_components/ollama-settings-dialog.tsx +++ b/frontend/app/settings/_components/ollama-settings-dialog.tsx @@ -17,6 +17,12 @@ import { DialogHeader, DialogTitle, } from "@/components/ui/dialog"; +import { + Tooltip, + TooltipContent, + TooltipProvider, + TooltipTrigger, +} from "@/components/ui/tooltip"; import { useAuth } from "@/contexts/auth-context"; import { OllamaSettingsForm, @@ -34,6 +40,7 @@ const OllamaSettingsDialog = ({ const queryClient = useQueryClient(); const [isValidating, setIsValidating] = useState(false); const [validationError, setValidationError] = useState(null); + const [showRemoveConfirm, setShowRemoveConfirm] = useState(false); const router = useRouter(); const { data: settings = {} } = useGetSettingsQuery({ @@ -42,6 +49,13 @@ const OllamaSettingsDialog = ({ const isOllamaConfigured = settings.providers?.ollama?.configured === true; + const otherProviderConfigured = + settings.providers?.openai?.configured === true || + settings.providers?.anthropic?.configured === true || + settings.providers?.watsonx?.configured === true; + + const canRemoveOllama = isOllamaConfigured && otherProviderConfigured; + const methods = useForm({ mode: "onSubmit", defaultValues: { @@ -90,6 +104,14 @@ const OllamaSettingsDialog = ({ }, }); + const removeMutation = useUpdateSettingsMutation({ + onSuccess: () => { + toast.success("Ollama configuration removed"); + setShowRemoveConfirm(false); + setOpen(false); + }, + }); + const onSubmit = async (data: OllamaSettingsFormData) => { // Clear any previous validation errors setValidationError(null); @@ -110,7 +132,7 @@ const OllamaSettingsDialog = ({ }; return ( - + { setShowRemoveConfirm(false); setOpen(o); }}>
@@ -141,26 +163,89 @@ const OllamaSettingsDialog = ({

)} + {removeMutation.isError && ( + +

+ {removeMutation.error?.message} +

+
+ )} - - - - + + {showRemoveConfirm ? ( + +
+ Remove configuration? +
+ + +
+ ) : ( + + {isOllamaConfigured && ( + + + + + + + + {!canRemoveOllama && ( + + Configure another model provider before removing + Ollama + + )} + + + )} + + + + )}
diff --git a/frontend/app/settings/_components/openai-settings-dialog.tsx b/frontend/app/settings/_components/openai-settings-dialog.tsx index 6d2ba8736..5da4225d3 100644 --- a/frontend/app/settings/_components/openai-settings-dialog.tsx +++ b/frontend/app/settings/_components/openai-settings-dialog.tsx @@ -6,6 +6,7 @@ import { FormProvider, useForm } from "react-hook-form"; import { toast } from "sonner"; import { useUpdateSettingsMutation } from "@/app/api/mutations/useUpdateSettingsMutation"; import { useGetOpenAIModelsQuery } from "@/app/api/queries/useGetModelsQuery"; +import { useGetSettingsQuery } from "@/app/api/queries/useGetSettingsQuery"; import type { ProviderHealthResponse } from "@/app/api/queries/useProviderHealthQuery"; import OpenAILogo from "@/components/icons/openai-logo"; import { Button } from "@/components/ui/button"; @@ -16,6 +17,13 @@ import { DialogHeader, DialogTitle, } from "@/components/ui/dialog"; +import { + Tooltip, + TooltipContent, + TooltipProvider, + TooltipTrigger, +} from "@/components/ui/tooltip"; +import { useAuth } from "@/contexts/auth-context"; import { OpenAISettingsForm, type OpenAISettingsFormData, @@ -28,11 +36,25 @@ const OpenAISettingsDialog = ({ open: boolean; setOpen: (open: boolean) => void; }) => { + const { isAuthenticated, isNoAuthMode } = useAuth(); const queryClient = useQueryClient(); const [isValidating, setIsValidating] = useState(false); const [validationError, setValidationError] = useState(null); + const [showRemoveConfirm, setShowRemoveConfirm] = useState(false); const router = useRouter(); + const { data: settings = {} } = useGetSettingsQuery({ + enabled: isAuthenticated || isNoAuthMode, + }); + + const isOpenAIConfigured = settings.providers?.openai?.configured === true; + + const canRemoveOpenAI = + isOpenAIConfigured && + (settings.providers?.anthropic?.configured === true || + settings.providers?.watsonx?.configured === true || + settings.providers?.ollama?.configured === true); + const methods = useForm({ mode: "onSubmit", defaultValues: { @@ -79,6 +101,14 @@ const OpenAISettingsDialog = ({ }, }); + const removeMutation = useUpdateSettingsMutation({ + onSuccess: () => { + toast.success("OpenAI configuration removed"); + setShowRemoveConfirm(false); + setOpen(false); + }, + }); + const onSubmit = async (data: OpenAISettingsFormData) => { // Clear any previous validation errors setValidationError(null); @@ -109,7 +139,7 @@ const OpenAISettingsDialog = ({ }; return ( - + { setShowRemoveConfirm(false); setOpen(o); }}>
@@ -140,26 +170,89 @@ const OpenAISettingsDialog = ({

)} + {removeMutation.isError && ( + +

+ {removeMutation.error?.message} +

+
+ )} - - - - + + {showRemoveConfirm ? ( + +
+ Remove configuration? +
+ + +
+ ) : ( + + {isOpenAIConfigured && ( + + + + + + + + {!canRemoveOpenAI && ( + + Configure another model provider before removing + OpenAI + + )} + + + )} + + + + )}
diff --git a/frontend/app/settings/_components/watsonx-settings-dialog.tsx b/frontend/app/settings/_components/watsonx-settings-dialog.tsx index 1c781272a..2c0e7d557 100644 --- a/frontend/app/settings/_components/watsonx-settings-dialog.tsx +++ b/frontend/app/settings/_components/watsonx-settings-dialog.tsx @@ -6,6 +6,7 @@ import { FormProvider, useForm } from "react-hook-form"; import { toast } from "sonner"; import { useUpdateSettingsMutation } from "@/app/api/mutations/useUpdateSettingsMutation"; import { useGetIBMModelsQuery } from "@/app/api/queries/useGetModelsQuery"; +import { useGetSettingsQuery } from "@/app/api/queries/useGetSettingsQuery"; import type { ProviderHealthResponse } from "@/app/api/queries/useProviderHealthQuery"; import IBMLogo from "@/components/icons/ibm-logo"; import { Button } from "@/components/ui/button"; @@ -16,6 +17,13 @@ import { DialogHeader, DialogTitle, } from "@/components/ui/dialog"; +import { + Tooltip, + TooltipContent, + TooltipProvider, + TooltipTrigger, +} from "@/components/ui/tooltip"; +import { useAuth } from "@/contexts/auth-context"; import { WatsonxSettingsForm, type WatsonxSettingsFormData, @@ -28,11 +36,25 @@ const WatsonxSettingsDialog = ({ open: boolean; setOpen: (open: boolean) => void; }) => { + const { isAuthenticated, isNoAuthMode } = useAuth(); const queryClient = useQueryClient(); const [isValidating, setIsValidating] = useState(false); const [validationError, setValidationError] = useState(null); + const [showRemoveConfirm, setShowRemoveConfirm] = useState(false); const router = useRouter(); + const { data: settings = {} } = useGetSettingsQuery({ + enabled: isAuthenticated || isNoAuthMode, + }); + + const isWatsonxConfigured = settings.providers?.watsonx?.configured === true; + + const canRemoveWatsonx = + isWatsonxConfigured && + (settings.providers?.openai?.configured === true || + settings.providers?.anthropic?.configured === true || + settings.providers?.ollama?.configured === true); + const methods = useForm({ mode: "onSubmit", defaultValues: { @@ -85,6 +107,14 @@ const WatsonxSettingsDialog = ({ }, }); + const removeMutation = useUpdateSettingsMutation({ + onSuccess: () => { + toast.success("IBM watsonx.ai configuration removed"); + setShowRemoveConfirm(false); + setOpen(false); + }, + }); + const onSubmit = async (data: WatsonxSettingsFormData) => { // Clear any previous validation errors setValidationError(null); @@ -118,7 +148,7 @@ const WatsonxSettingsDialog = ({ }; return ( - + { setShowRemoveConfirm(false); setOpen(o); }}>
@@ -149,26 +179,89 @@ const WatsonxSettingsDialog = ({

)} + {removeMutation.isError && ( + +

+ {removeMutation.error?.message} +

+
+ )} - - - - + + {showRemoveConfirm ? ( + +
+ Remove configuration? +
+ + +
+ ) : ( + + {isWatsonxConfigured && ( + + + + + + + + {!canRemoveWatsonx && ( + + Configure another model provider before removing IBM + watsonx.ai + + )} + + + )} + + + + )}
diff --git a/src/api/settings.py b/src/api/settings.py index 6b6c31607..c89695311 100644 --- a/src/api/settings.py +++ b/src/api/settings.py @@ -55,6 +55,10 @@ class SettingsUpdateBody(BaseModel): watsonx_endpoint: Optional[str] = Field(None, min_length=1) watsonx_project_id: Optional[str] = Field(None, min_length=1) ollama_endpoint: Optional[str] = Field(None, min_length=1) + remove_ollama_config: Optional[bool] = None + remove_openai_config: Optional[bool] = None + remove_anthropic_config: Optional[bool] = None + remove_watsonx_config: Optional[bool] = None class OnboardingBody(BaseModel): @@ -361,6 +365,22 @@ async def get_settings( ) +def _first_configured_llm_provider(config, excluding: str) -> str: + """Return the first configured LLM provider that isn't `excluding`.""" + for p in ["openai", "anthropic", "watsonx", "ollama"]: + if p != excluding and getattr(config.providers, p).configured: + return p + return "openai" + + +def _first_configured_embedding_provider(config, excluding: str) -> str: + """Return the first configured embedding provider (openai/watsonx/ollama) that isn't `excluding`.""" + for p in ["openai", "watsonx", "ollama"]: + if p != excluding and getattr(config.providers, p).configured: + return p + return "openai" + + async def update_settings( body: SettingsUpdateBody, session_manager=Depends(get_session_manager), @@ -673,6 +693,99 @@ async def update_settings( config_updated = True provider_updated = True + if body.remove_ollama_config: + other_providers_configured = ( + current_config.providers.openai.configured + or current_config.providers.anthropic.configured + or current_config.providers.watsonx.configured + ) + if not other_providers_configured: + return JSONResponse( + {"error": "Cannot remove Ollama configuration: configure another model provider first."}, + status_code=400, + ) + current_config.providers.ollama.endpoint = "" + current_config.providers.ollama.configured = False + if current_config.agent.llm_provider == "ollama": + current_config.agent.llm_provider = _first_configured_llm_provider(current_config, "ollama") + current_config.agent.llm_model = "" + if current_config.knowledge.embedding_provider == "ollama": + current_config.knowledge.embedding_provider = _first_configured_embedding_provider(current_config, "ollama") + current_config.knowledge.embedding_model = "" + config_updated = True + provider_updated = True + + if body.remove_openai_config: + other_providers_configured = ( + current_config.providers.anthropic.configured + or current_config.providers.watsonx.configured + or current_config.providers.ollama.configured + ) + if not other_providers_configured: + return JSONResponse( + {"error": "Cannot remove OpenAI configuration: configure another model provider first."}, + status_code=400, + ) + current_config.providers.openai.api_key = "" + current_config.providers.openai.configured = False + if current_config.agent.llm_provider == "openai": + fb = _first_configured_llm_provider(current_config, "openai") + current_config.agent.llm_provider = fb + current_config.agent.llm_model = "" + if current_config.knowledge.embedding_provider == "openai": + fb = _first_configured_embedding_provider(current_config, "openai") + current_config.knowledge.embedding_provider = fb + current_config.knowledge.embedding_model = "" + config_updated = True + provider_updated = True + + if body.remove_anthropic_config: + other_providers_configured = ( + current_config.providers.openai.configured + or current_config.providers.watsonx.configured + or current_config.providers.ollama.configured + ) + if not other_providers_configured: + return JSONResponse( + {"error": "Cannot remove Anthropic configuration: configure another model provider first."}, + status_code=400, + ) + current_config.providers.anthropic.api_key = "" + current_config.providers.anthropic.configured = False + if current_config.agent.llm_provider == "anthropic": + fb = _first_configured_llm_provider(current_config, "anthropic") + current_config.agent.llm_provider = fb + current_config.agent.llm_model = "" + # Anthropic is not a valid embedding provider; no embedding reset needed + config_updated = True + provider_updated = True + + if body.remove_watsonx_config: + other_providers_configured = ( + current_config.providers.openai.configured + or current_config.providers.anthropic.configured + or current_config.providers.ollama.configured + ) + if not other_providers_configured: + return JSONResponse( + {"error": "Cannot remove IBM watsonx.ai configuration: configure another model provider first."}, + status_code=400, + ) + current_config.providers.watsonx.api_key = "" + current_config.providers.watsonx.endpoint = "" + current_config.providers.watsonx.project_id = "" + current_config.providers.watsonx.configured = False + if current_config.agent.llm_provider == "watsonx": + fb = _first_configured_llm_provider(current_config, "watsonx") + current_config.agent.llm_provider = fb + current_config.agent.llm_model = "" + if current_config.knowledge.embedding_provider == "watsonx": + fb = _first_configured_embedding_provider(current_config, "watsonx") + current_config.knowledge.embedding_provider = fb + current_config.knowledge.embedding_model = "" + config_updated = True + provider_updated = True + if provider_updated: await TelemetryClient.send_event( Category.SETTINGS_OPERATIONS, @@ -693,7 +806,7 @@ async def update_settings( # Update Langflow global variables and model values if provider settings changed await clients.refresh_patched_client() - if should_validate: + if should_validate or provider_updated: try: flows_service = _get_flows_service() @@ -706,8 +819,8 @@ async def update_settings( current_config, session_manager ) - # Update model values if provider or model changed - if body.llm_provider is not None or body.llm_model is not None or body.embedding_provider is not None or body.embedding_model is not None: + # Update model values if provider or model changed (including removals that trigger fallback) + if body.llm_provider is not None or body.llm_model is not None or body.embedding_provider is not None or body.embedding_model is not None or provider_updated: await _update_langflow_model_values(current_config, flows_service) except Exception as e: From cc9e953a1e772fbe04d5ad4034f52c1fa40cea6d Mon Sep 17 00:00:00 2001 From: Mike Pawlowski Date: Mon, 9 Mar 2026 18:56:50 -0700 Subject: [PATCH 39/43] feat: Settings UX should let you remove a model provider Issues: - #833 Summary Polished settings dialogs: form reset on open, autocomplete suppression, and label cleanup Form State & UX Fixes - Added useEffect to reset form state whenever a settings dialog is opened, preventing stale values from persisting across dialog sessions (Anthropic, OpenAI, Ollama, WatsonX) Password Field Improvements - Added autoComplete="new-password" to API key inputs in Anthropic, OpenAI, and WatsonX settings forms to suppress unwanted browser autofill suggestions Button Label Cleanup - Shortened the remove confirmation button label from "Confirm Remove" to "Remove" across all four provider dialogs (Anthropic, OpenAI, Ollama, WatsonX) --- .../settings/_components/anthropic-settings-dialog.tsx | 9 +++++++-- .../app/settings/_components/anthropic-settings-form.tsx | 1 + .../app/settings/_components/ollama-settings-dialog.tsx | 9 +++++++-- .../app/settings/_components/openai-settings-dialog.tsx | 9 +++++++-- .../app/settings/_components/openai-settings-form.tsx | 1 + .../app/settings/_components/watsonx-settings-dialog.tsx | 9 +++++++-- .../app/settings/_components/watsonx-settings-form.tsx | 1 + 7 files changed, 31 insertions(+), 8 deletions(-) diff --git a/frontend/app/settings/_components/anthropic-settings-dialog.tsx b/frontend/app/settings/_components/anthropic-settings-dialog.tsx index 55bc4d9a2..f1b7e1542 100644 --- a/frontend/app/settings/_components/anthropic-settings-dialog.tsx +++ b/frontend/app/settings/_components/anthropic-settings-dialog.tsx @@ -1,7 +1,7 @@ import { useQueryClient } from "@tanstack/react-query"; import { AnimatePresence, motion } from "motion/react"; import { useRouter } from "next/navigation"; -import { useState } from "react"; +import { useEffect, useState } from "react"; import { FormProvider, useForm } from "react-hook-form"; import { toast } from "sonner"; import { useUpdateSettingsMutation } from "@/app/api/mutations/useUpdateSettingsMutation"; @@ -63,6 +63,11 @@ const AnthropicSettingsDialog = ({ }, }); + useEffect(() => { + // Reset form state on dialog open + if (open) methods.reset(); + }, [open]); + const { handleSubmit, watch } = methods; const apiKey = watch("apiKey"); @@ -204,7 +209,7 @@ const AnthropicSettingsDialog = ({ removeMutation.mutate({ remove_anthropic_config: true }) } > - {removeMutation.isPending ? "Removing..." : "Confirm Remove"} + {removeMutation.isPending ? "Removing..." : "Remove"} ) : ( diff --git a/frontend/app/settings/_components/anthropic-settings-form.tsx b/frontend/app/settings/_components/anthropic-settings-form.tsx index 60bdd3468..14d09e702 100644 --- a/frontend/app/settings/_components/anthropic-settings-form.tsx +++ b/frontend/app/settings/_components/anthropic-settings-form.tsx @@ -38,6 +38,7 @@ export function AnthropicSettingsForm({ className={apiKeyError ? "!border-destructive" : ""} id="api-key" type="password" + autoComplete="new-password" placeholder="sk-ant-..." /> diff --git a/frontend/app/settings/_components/ollama-settings-dialog.tsx b/frontend/app/settings/_components/ollama-settings-dialog.tsx index d079a0a9a..0437d796f 100644 --- a/frontend/app/settings/_components/ollama-settings-dialog.tsx +++ b/frontend/app/settings/_components/ollama-settings-dialog.tsx @@ -1,7 +1,7 @@ import { useQueryClient } from "@tanstack/react-query"; import { AnimatePresence, motion } from "motion/react"; import { useRouter } from "next/navigation"; -import { useState } from "react"; +import { useEffect, useState } from "react"; import { FormProvider, useForm } from "react-hook-form"; import { toast } from "sonner"; import { useUpdateSettingsMutation } from "@/app/api/mutations/useUpdateSettingsMutation"; @@ -65,6 +65,11 @@ const OllamaSettingsDialog = ({ }, }); + useEffect(() => { + // Reset form state on dialog open + if (open) methods.reset(); + }, [open]); + const { handleSubmit, watch } = methods; const endpoint = watch("endpoint"); @@ -197,7 +202,7 @@ const OllamaSettingsDialog = ({ removeMutation.mutate({ remove_ollama_config: true }) } > - {removeMutation.isPending ? "Removing..." : "Confirm Remove"} + {removeMutation.isPending ? "Removing..." : "Remove"} ) : ( diff --git a/frontend/app/settings/_components/openai-settings-dialog.tsx b/frontend/app/settings/_components/openai-settings-dialog.tsx index 5da4225d3..b0930e9d9 100644 --- a/frontend/app/settings/_components/openai-settings-dialog.tsx +++ b/frontend/app/settings/_components/openai-settings-dialog.tsx @@ -1,7 +1,7 @@ import { useQueryClient } from "@tanstack/react-query"; import { AnimatePresence, motion } from "motion/react"; import { useRouter } from "next/navigation"; -import { useState } from "react"; +import { useEffect, useState } from "react"; import { FormProvider, useForm } from "react-hook-form"; import { toast } from "sonner"; import { useUpdateSettingsMutation } from "@/app/api/mutations/useUpdateSettingsMutation"; @@ -62,6 +62,11 @@ const OpenAISettingsDialog = ({ }, }); + useEffect(() => { + // Reset form state on dialog open + if (open) methods.reset(); + }, [open]); + const { handleSubmit, watch } = methods; const apiKey = watch("apiKey"); @@ -204,7 +209,7 @@ const OpenAISettingsDialog = ({ removeMutation.mutate({ remove_openai_config: true }) } > - {removeMutation.isPending ? "Removing..." : "Confirm Remove"} + {removeMutation.isPending ? "Removing..." : "Remove"} ) : ( diff --git a/frontend/app/settings/_components/openai-settings-form.tsx b/frontend/app/settings/_components/openai-settings-form.tsx index acdc4bd3a..b47ecac98 100644 --- a/frontend/app/settings/_components/openai-settings-form.tsx +++ b/frontend/app/settings/_components/openai-settings-form.tsx @@ -38,6 +38,7 @@ export function OpenAISettingsForm({ className={apiKeyError ? "!border-destructive" : ""} id="api-key" type="password" + autoComplete="new-password" placeholder="sk-..." /> diff --git a/frontend/app/settings/_components/watsonx-settings-dialog.tsx b/frontend/app/settings/_components/watsonx-settings-dialog.tsx index 2c0e7d557..6c87e3385 100644 --- a/frontend/app/settings/_components/watsonx-settings-dialog.tsx +++ b/frontend/app/settings/_components/watsonx-settings-dialog.tsx @@ -1,7 +1,7 @@ import { useQueryClient } from "@tanstack/react-query"; import { AnimatePresence, motion } from "motion/react"; import { useRouter } from "next/navigation"; -import { useState } from "react"; +import { useEffect, useState } from "react"; import { FormProvider, useForm } from "react-hook-form"; import { toast } from "sonner"; import { useUpdateSettingsMutation } from "@/app/api/mutations/useUpdateSettingsMutation"; @@ -64,6 +64,11 @@ const WatsonxSettingsDialog = ({ }, }); + useEffect(() => { + // Reset form state on dialog open + if (open) methods.reset(); + }, [open]); + const { handleSubmit, watch } = methods; const endpoint = watch("endpoint"); const apiKey = watch("apiKey"); @@ -213,7 +218,7 @@ const WatsonxSettingsDialog = ({ removeMutation.mutate({ remove_watsonx_config: true }) } > - {removeMutation.isPending ? "Removing..." : "Confirm Remove"} + {removeMutation.isPending ? "Removing..." : "Remove"} ) : ( diff --git a/frontend/app/settings/_components/watsonx-settings-form.tsx b/frontend/app/settings/_components/watsonx-settings-form.tsx index d07d2393c..a31004c07 100644 --- a/frontend/app/settings/_components/watsonx-settings-form.tsx +++ b/frontend/app/settings/_components/watsonx-settings-form.tsx @@ -122,6 +122,7 @@ export function WatsonxSettingsForm({ } id="api-key" type="password" + autoComplete="new-password" placeholder="your-api-key" /> From a578776454d81b76aaa0858a7256d4c219ce6ca7 Mon Sep 17 00:00:00 2001 From: Mike Pawlowski Date: Tue, 10 Mar 2026 07:32:56 -0700 Subject: [PATCH 40/43] feat: Settings UX should let you remove a model provider MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Issues: - #833 Summary - Extracted duplicated dialog footer logic into a shared ModelProviderDialogFooter component across all four provider settings dialogs. Refactor: Shared Dialog Footer Component - Created model-provider-dialog-footer.tsx — a new reusable ModelProviderDialogFooter component that encapsulates both the remove-confirmation footer state and the standard save/cancel footer with optional "Remove" button - Defined a typed ModelProviderDialogFooterProps interface covering all props needed to drive both footer states (pending flags, configured/canRemove guards, tooltip text, and all action callbacks) Refactored: Provider Settings Dialogs - Replaced the duplicated inline DialogFooter JSX (including Tooltip/TooltipProvider wrappers) in anthropic-settings-dialog.tsx, ollama-settings-dialog.tsx, openai-settings-dialog.tsx, and watsonx-settings-dialog.tsx with a single usage each - Removed per-dialog imports of DialogFooter, Tooltip, TooltipContent, TooltipProvider, and TooltipTrigger that were no longer needed after extraction --- .../_components/anthropic-settings-dialog.tsx | 92 ++++-------------- .../model-provider-dialog-footer.tsx | 94 +++++++++++++++++++ .../_components/ollama-settings-dialog.tsx | 92 ++++-------------- .../_components/openai-settings-dialog.tsx | 92 ++++-------------- .../_components/watsonx-settings-dialog.tsx | 92 ++++-------------- 5 files changed, 158 insertions(+), 304 deletions(-) create mode 100644 frontend/app/settings/_components/model-provider-dialog-footer.tsx diff --git a/frontend/app/settings/_components/anthropic-settings-dialog.tsx b/frontend/app/settings/_components/anthropic-settings-dialog.tsx index f1b7e1542..4652a26e6 100644 --- a/frontend/app/settings/_components/anthropic-settings-dialog.tsx +++ b/frontend/app/settings/_components/anthropic-settings-dialog.tsx @@ -13,21 +13,15 @@ import { Button } from "@/components/ui/button"; import { Dialog, DialogContent, - DialogFooter, DialogHeader, DialogTitle, } from "@/components/ui/dialog"; -import { - Tooltip, - TooltipContent, - TooltipProvider, - TooltipTrigger, -} from "@/components/ui/tooltip"; import { useAuth } from "@/contexts/auth-context"; import { AnthropicSettingsForm, type AnthropicSettingsFormData, } from "./anthropic-settings-form"; +import ModelProviderDialogFooter from "./model-provider-dialog-footer"; const AnthropicSettingsDialog = ({ open, @@ -189,75 +183,21 @@ const AnthropicSettingsDialog = ({ )} - {showRemoveConfirm ? ( - -
- Remove configuration? -
- - -
- ) : ( - - {isAnthropicConfigured && ( - - - - - - - - {!canRemoveAnthropic && ( - - Configure another model provider before removing - Anthropic - - )} - - - )} - - - - )} + setShowRemoveConfirm(false)} + onConfirmRemove={() => + removeMutation.mutate({ remove_anthropic_config: true }) + } + isRemovePending={removeMutation.isPending} + isConfigured={isAnthropicConfigured} + canRemove={canRemoveAnthropic} + removeDisabledTooltip="Configure another model provider before removing Anthropic" + onRequestRemove={() => setShowRemoveConfirm(true)} + onCancel={() => setOpen(false)} + isSavePending={settingsMutation.isPending} + isValidating={isValidating} + /> diff --git a/frontend/app/settings/_components/model-provider-dialog-footer.tsx b/frontend/app/settings/_components/model-provider-dialog-footer.tsx new file mode 100644 index 000000000..1e891ca04 --- /dev/null +++ b/frontend/app/settings/_components/model-provider-dialog-footer.tsx @@ -0,0 +1,94 @@ +import { Button } from "@/components/ui/button"; +import { DialogFooter } from "@/components/ui/dialog"; +import { + Tooltip, + TooltipContent, + TooltipProvider, + TooltipTrigger, +} from "@/components/ui/tooltip"; + +type ModelProviderDialogFooterProps = { + showRemoveConfirm: boolean; + onCancelRemove: () => void; + onConfirmRemove: () => void; + isRemovePending: boolean; + + isConfigured: boolean; + canRemove: boolean; + removeDisabledTooltip: string; + onRequestRemove: () => void; + + onCancel: () => void; + isSavePending: boolean; + isValidating: boolean; +}; + +const ModelProviderDialogFooter = ({ + showRemoveConfirm, + onCancelRemove, + onConfirmRemove, + isRemovePending, + isConfigured, + canRemove, + removeDisabledTooltip, + onRequestRemove, + onCancel, + isSavePending, + isValidating, +}: ModelProviderDialogFooterProps) => { + if (showRemoveConfirm) { + return ( + +
+ Remove configuration? +
+ + +
+ ); + } + + return ( + + {isConfigured && ( + + + + + + + + {!canRemove && ( + {removeDisabledTooltip} + )} + + + )} + + + + ); +}; + +export default ModelProviderDialogFooter; diff --git a/frontend/app/settings/_components/ollama-settings-dialog.tsx b/frontend/app/settings/_components/ollama-settings-dialog.tsx index 0437d796f..b60665bf3 100644 --- a/frontend/app/settings/_components/ollama-settings-dialog.tsx +++ b/frontend/app/settings/_components/ollama-settings-dialog.tsx @@ -13,21 +13,15 @@ import { Button } from "@/components/ui/button"; import { Dialog, DialogContent, - DialogFooter, DialogHeader, DialogTitle, } from "@/components/ui/dialog"; -import { - Tooltip, - TooltipContent, - TooltipProvider, - TooltipTrigger, -} from "@/components/ui/tooltip"; import { useAuth } from "@/contexts/auth-context"; import { OllamaSettingsForm, type OllamaSettingsFormData, } from "./ollama-settings-form"; +import ModelProviderDialogFooter from "./model-provider-dialog-footer"; const OllamaSettingsDialog = ({ open, @@ -182,75 +176,21 @@ const OllamaSettingsDialog = ({ )} - {showRemoveConfirm ? ( - -
- Remove configuration? -
- - -
- ) : ( - - {isOllamaConfigured && ( - - - - - - - - {!canRemoveOllama && ( - - Configure another model provider before removing - Ollama - - )} - - - )} - - - - )} + setShowRemoveConfirm(false)} + onConfirmRemove={() => + removeMutation.mutate({ remove_ollama_config: true }) + } + isRemovePending={removeMutation.isPending} + isConfigured={isOllamaConfigured} + canRemove={canRemoveOllama} + removeDisabledTooltip="Configure another model provider before removing Ollama" + onRequestRemove={() => setShowRemoveConfirm(true)} + onCancel={() => setOpen(false)} + isSavePending={settingsMutation.isPending} + isValidating={isValidating} + /> diff --git a/frontend/app/settings/_components/openai-settings-dialog.tsx b/frontend/app/settings/_components/openai-settings-dialog.tsx index b0930e9d9..0eb08f9e4 100644 --- a/frontend/app/settings/_components/openai-settings-dialog.tsx +++ b/frontend/app/settings/_components/openai-settings-dialog.tsx @@ -13,21 +13,15 @@ import { Button } from "@/components/ui/button"; import { Dialog, DialogContent, - DialogFooter, DialogHeader, DialogTitle, } from "@/components/ui/dialog"; -import { - Tooltip, - TooltipContent, - TooltipProvider, - TooltipTrigger, -} from "@/components/ui/tooltip"; import { useAuth } from "@/contexts/auth-context"; import { OpenAISettingsForm, type OpenAISettingsFormData, } from "./openai-settings-form"; +import ModelProviderDialogFooter from "./model-provider-dialog-footer"; const OpenAISettingsDialog = ({ open, @@ -189,75 +183,21 @@ const OpenAISettingsDialog = ({ )} - {showRemoveConfirm ? ( - -
- Remove configuration? -
- - -
- ) : ( - - {isOpenAIConfigured && ( - - - - - - - - {!canRemoveOpenAI && ( - - Configure another model provider before removing - OpenAI - - )} - - - )} - - - - )} + setShowRemoveConfirm(false)} + onConfirmRemove={() => + removeMutation.mutate({ remove_openai_config: true }) + } + isRemovePending={removeMutation.isPending} + isConfigured={isOpenAIConfigured} + canRemove={canRemoveOpenAI} + removeDisabledTooltip="Configure another model provider before removing OpenAI" + onRequestRemove={() => setShowRemoveConfirm(true)} + onCancel={() => setOpen(false)} + isSavePending={settingsMutation.isPending} + isValidating={isValidating} + /> diff --git a/frontend/app/settings/_components/watsonx-settings-dialog.tsx b/frontend/app/settings/_components/watsonx-settings-dialog.tsx index 6c87e3385..d6b1d9334 100644 --- a/frontend/app/settings/_components/watsonx-settings-dialog.tsx +++ b/frontend/app/settings/_components/watsonx-settings-dialog.tsx @@ -13,21 +13,15 @@ import { Button } from "@/components/ui/button"; import { Dialog, DialogContent, - DialogFooter, DialogHeader, DialogTitle, } from "@/components/ui/dialog"; -import { - Tooltip, - TooltipContent, - TooltipProvider, - TooltipTrigger, -} from "@/components/ui/tooltip"; import { useAuth } from "@/contexts/auth-context"; import { WatsonxSettingsForm, type WatsonxSettingsFormData, } from "./watsonx-settings-form"; +import ModelProviderDialogFooter from "./model-provider-dialog-footer"; const WatsonxSettingsDialog = ({ open, @@ -198,75 +192,21 @@ const WatsonxSettingsDialog = ({ )} - {showRemoveConfirm ? ( - -
- Remove configuration? -
- - -
- ) : ( - - {isWatsonxConfigured && ( - - - - - - - - {!canRemoveWatsonx && ( - - Configure another model provider before removing IBM - watsonx.ai - - )} - - - )} - - - - )} + setShowRemoveConfirm(false)} + onConfirmRemove={() => + removeMutation.mutate({ remove_watsonx_config: true }) + } + isRemovePending={removeMutation.isPending} + isConfigured={isWatsonxConfigured} + canRemove={canRemoveWatsonx} + removeDisabledTooltip="Configure another model provider before removing IBM watsonx.ai" + onRequestRemove={() => setShowRemoveConfirm(true)} + onCancel={() => setOpen(false)} + isSavePending={settingsMutation.isPending} + isValidating={isValidating} + /> From 3f02bb7ccad7a2d034b77948810f399e570d0351 Mon Sep 17 00:00:00 2001 From: Lucas Oliveira <62335616+lucaseduoli@users.noreply.github.com> Date: Tue, 10 Mar 2026 12:42:47 -0300 Subject: [PATCH 41/43] fix: use correct error handling for chat messages and ingestion (#859) * Show error messages on the frontend * Handle errors on backend and pass them to the frontend * Update frontend/hooks/useChatStreaming.ts Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update src/services/langflow_file_service.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update frontend/app/chat/page.tsx Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../app/chat/_components/error-message.tsx | 51 ++++++++ frontend/app/chat/_types/types.ts | 1 + frontend/app/chat/page.tsx | 47 ++++--- frontend/hooks/useChatStreaming.ts | 43 ++++-- src/agent.py | 123 +++++++++++------- src/services/chat_service.py | 3 + src/services/langflow_file_service.py | 30 ++++- src/services/langflow_history_service.py | 12 +- 8 files changed, 230 insertions(+), 80 deletions(-) create mode 100644 frontend/app/chat/_components/error-message.tsx diff --git a/frontend/app/chat/_components/error-message.tsx b/frontend/app/chat/_components/error-message.tsx new file mode 100644 index 000000000..329f4e266 --- /dev/null +++ b/frontend/app/chat/_components/error-message.tsx @@ -0,0 +1,51 @@ +import { AlertCircle } from "lucide-react"; +import { motion } from "motion/react"; +import DogIcon from "@/components/icons/dog-icon"; +import { Message } from "./message"; + +interface ErrorMessageProps { + content: string; + animate?: boolean; + delay?: number; +} + +export function ErrorMessage({ + content, + animate = true, + delay = 0.2, +}: ErrorMessageProps) { + return ( + + + + + } + > +
+
+ +
+

Error

+

{content}

+
+
+
+
+
+ ); +} + +// Made with Bob diff --git a/frontend/app/chat/_types/types.ts b/frontend/app/chat/_types/types.ts index c605da7c3..7787c8d11 100644 --- a/frontend/app/chat/_types/types.ts +++ b/frontend/app/chat/_types/types.ts @@ -17,6 +17,7 @@ export interface Message { functionCalls?: FunctionCall[]; isStreaming?: boolean; source?: "langflow" | "chat"; + error?: boolean; usage?: TokenUsage; } diff --git a/frontend/app/chat/page.tsx b/frontend/app/chat/page.tsx index fe0d660e9..57317b08c 100644 --- a/frontend/app/chat/page.tsx +++ b/frontend/app/chat/page.tsx @@ -15,6 +15,7 @@ import { useGetNudgesQuery } from "../api/queries/useGetNudgesQuery"; import { useGetSettingsQuery } from "../api/queries/useGetSettingsQuery"; import { AssistantMessage } from "./_components/assistant-message"; import { ChatInput, type ChatInputHandle } from "./_components/chat-input"; +import { ErrorMessage } from "./_components/error-message"; import Nudges from "./_components/nudges"; import { UserMessage } from "./_components/user-message"; import type { @@ -283,6 +284,7 @@ function ChatPage() { role: "assistant", content: `❌ Failed to process document. Please try again.`, timestamp: new Date(), + error: true, }; setMessages((prev) => [...prev.slice(0, -1), errorMessage]); } finally { @@ -367,6 +369,7 @@ function ChatPage() { content: string; timestamp?: string; response_id?: string; + error?: boolean; chunks?: Array<{ item?: { type?: string; @@ -394,6 +397,7 @@ function ChatPage() { role: msg.role as "user" | "assistant", content: msg.content, timestamp: new Date(msg.timestamp || new Date()), + error: msg.error || false, }; // Extract function calls from chunks or response_data @@ -905,6 +909,7 @@ function ChatPage() { role: "assistant", content: "Sorry, I encountered an error. Please try again.", timestamp: new Date(), + error: true, }; setMessages((prev) => [...prev, errorMessage]); } @@ -917,6 +922,7 @@ function ChatPage() { content: "Sorry, I couldn't connect to the chat service. Please try again.", timestamp: new Date(), + error: true, }; setMessages((prev) => [...prev, errorMessage]); } @@ -1149,23 +1155,30 @@ function ChatPage() { }-${index}-${message.timestamp?.getTime()}`} className="space-y-6 group" > - handleForkConversation(index, e)} - animate={false} - isInactive={index < messages.length - 1} - isInitialGreeting={ - index === 0 && - messages.length === 1 && - message.content === "How can I assist?" - } - usage={message.usage} - /> + {message.error ? ( + + ) : ( + handleForkConversation(index, e)} + animate={false} + isInactive={index < messages.length - 1} + isInitialGreeting={ + index === 0 && + messages.length === 1 && + message.content === "How can I assist?" + } + usage={message.usage} + /> + )} ), )} diff --git a/frontend/hooks/useChatStreaming.ts b/frontend/hooks/useChatStreaming.ts index 13144e910..eee28360c 100644 --- a/frontend/hooks/useChatStreaming.ts +++ b/frontend/hooks/useChatStreaming.ts @@ -134,6 +134,7 @@ export function useChatStreaming({ let currentContent = ""; const currentFunctionCalls: FunctionCall[] = []; let newResponseId: string | null = null; + let isError = false; let usageData: TokenUsage | undefined; // Initialize streaming message @@ -454,6 +455,17 @@ export function useChatStreaming({ currentFunctionCalls.push(newFunctionCall); } } + + // Check for error status from Langflow + if (chunk.finish_reason === "error" || chunk.status === "failed") { + console.error("Error detected in stream"); + + // Mark this as an error message and complete the stream + isError = true; + + // Exit the streaming loop by throwing so the reader stops promptly on error + throw new Error("Error detected in stream"); + } // Handle text output streaming (Realtime API) else if (chunk.type === "response.output_text.delta") { currentContent += chunk.delta || ""; @@ -468,15 +480,15 @@ export function useChatStreaming({ // Handle OpenRAG backend format else if (chunk.output_text) { currentContent += chunk.output_text; - } else if (chunk.delta) { + } + // Note: chunk.delta.content is already handled in the OpenAI format section above (line 271) + // Only handle delta if it's a string or has text (not content) + else if (chunk.delta) { if (typeof chunk.delta === "string") { currentContent += chunk.delta; - } else if (typeof chunk.delta === "object") { - if (chunk.delta.content) { - currentContent += chunk.delta.content; - } else if (chunk.delta.text) { - currentContent += chunk.delta.text; - } + } else if (typeof chunk.delta === "object" && chunk.delta.text && !chunk.delta.content) { + // Only add text if content wasn't already processed + currentContent += chunk.delta.text; } } @@ -605,6 +617,7 @@ export function useChatStreaming({ currentFunctionCalls.length > 0 ? currentFunctionCalls : undefined, timestamp: new Date(), isStreaming: false, + error: isError, // Mark as error if Langflow returned error status usage: usageData, }; @@ -633,10 +646,10 @@ export function useChatStreaming({ setStreamingMessage(null); // Create user-friendly error message - let errorContent = - "Sorry, I couldn't connect to the chat service. Please try again."; - const errorMessage = (error as Error).message; + let errorContent = errorMessage; // Default to the actual error message + + // Only override with generic messages for specific infrastructure errors if (errorMessage?.includes("timed out")) { errorContent = "The request timed out. The server took too long to respond. Please try again."; @@ -648,9 +661,8 @@ export function useChatStreaming({ ) { errorContent = "Network error. Please check your connection and try again."; - } else if (errorMessage?.includes("Server error")) { - errorContent = errorMessage; // Use the detailed server error message } + // For all other errors (including Langflow errors), use the actual error message onError?.(error as Error); @@ -659,8 +671,15 @@ export function useChatStreaming({ content: errorContent, timestamp: new Date(), isStreaming: false, + error: true, }; + // Pass error message to onComplete so it gets added to chat history + // This ensures errors appear immediately and persist on page refresh + if (!streamAbortRef.current?.signal.aborted) { + onComplete?.(errorMessageObj, null); + } + return errorMessageObj; } finally { if (timeoutId) clearTimeout(timeoutId); diff --git a/src/agent.py b/src/agent.py index 76cf8b1d6..1fa2f4b76 100644 --- a/src/agent.py +++ b/src/agent.py @@ -135,14 +135,13 @@ async def async_response_stream( full_response = "" chunk_count = 0 - detected_tool_call = False # Track if we've detected a tool call + detected_tool_call = False async for chunk in response: chunk_count += 1 logger.debug( "Stream chunk received", chunk_count=chunk_count, chunk=str(chunk) ) - # Yield the raw event as JSON for the UI to process import json # Also extract text content for logging @@ -186,7 +185,7 @@ async def async_response_stream( if isinstance(chunk_data, dict): # Check for any fields that might indicate tool usage potential_tool_fields = { - k: v for k, v in chunk_data.items() + k: v for k, v in chunk_data.items() if any(keyword in str(k).lower() for keyword in ['tool', 'call', 'retrieval', 'function', 'result', 'output']) } if potential_tool_fields: @@ -235,8 +234,8 @@ async def async_response_stream( "tool_name": "Retrieval", "status": "completed", "inputs": {"implicit": True, "backend_detected": True}, - "results": chunk_data.get('results') or chunk_data.get('outputs') or - chunk_data.get('retrieved_documents') or + "results": chunk_data.get('results') or chunk_data.get('outputs') or + chunk_data.get('retrieved_documents') or chunk_data.get('retrieval_results') or [] } } @@ -698,47 +697,55 @@ async def async_langflow_chat_stream( response_id = None usage_data = None collected_chunks = [] # Store all chunks for function call data + error_occurred = False - async for chunk in async_stream( - langflow_client, - prompt, - flow_id, - extra_headers=extra_headers, - previous_response_id=previous_response_id, - log_prefix="langflow", - ): - # Extract text content to build full response for history - try: - import json - - chunk_data = json.loads(chunk.decode("utf-8")) - collected_chunks.append(chunk_data) # Collect all chunk data - - if "delta" in chunk_data and "content" in chunk_data["delta"]: - full_response += chunk_data["delta"]["content"] - # Extract response_id from chunk - if "id" in chunk_data: - response_id = chunk_data["id"] - elif "response_id" in chunk_data: - response_id = chunk_data["response_id"] - # Capture usage from response.completed event - if chunk_data.get("type") == "response.completed": - response_obj = chunk_data.get("response", {}) - usage_data = response_obj.get("usage") - except: - pass - yield chunk + try: + async for chunk in async_stream( + langflow_client, + prompt, + flow_id, + extra_headers=extra_headers, + previous_response_id=previous_response_id, + log_prefix="langflow", + ): + # Extract text content to build full response for history + try: + import json + + chunk_data = json.loads(chunk.decode("utf-8")) + collected_chunks.append(chunk_data) # Collect all chunk data + + if "delta" in chunk_data and "content" in chunk_data["delta"]: + full_response += chunk_data["delta"]["content"] + # Extract response_id from chunk + if "id" in chunk_data: + response_id = chunk_data["id"] + elif "response_id" in chunk_data: + response_id = chunk_data["response_id"] + + # Check for error status + if chunk_data.get("finish_reason") == "error" or chunk_data.get("status") == "failed": + error_occurred = True + logger.error("Error detected in Langflow stream chunk") + # Capture usage from response.completed event + if chunk_data.get("type") == "response.completed": + response_obj = chunk_data.get("response", {}) + usage_data = response_obj.get("usage") + except: + pass + yield chunk - # Add the complete assistant response to message history with response_id, timestamp, and function call data - if full_response: - assistant_message = { - "role": "assistant", - "content": full_response, - "response_id": response_id, - "timestamp": datetime.now(), - "chunks": collected_chunks, # Store complete chunk data for function calls - } - # Store usage data if available (from response.completed event) + # Add the complete assistant response to message history with response_id, timestamp, and function call data + if full_response: + assistant_message = { + "role": "assistant", + "content": full_response, + "response_id": response_id, + "timestamp": datetime.now(), + "chunks": collected_chunks, # Store complete chunk data for function calls + "error": error_occurred, # Mark if this was an error response + } + # Store usage data if available (from response.completed event) if usage_data: assistant_message["response_data"] = {"usage": usage_data} conversation_state["messages"].append(assistant_message) @@ -760,6 +767,34 @@ async def async_langflow_chat_stream( logger.debug( f"Stored langflow conversation thread for user {user_id} with response_id: {response_id}" ) + except Exception as e: + # Log the error + logger.error(f"Error in langflow chat stream: {e}", exc_info=True) + error_occurred = True + + # Store error message in conversation history so it persists + error_message = { + "role": "assistant", + "content": f"Sorry, I encountered an error: {str(e)}", + "timestamp": datetime.now(), + "error": True, + } + conversation_state["messages"].append(error_message) + + # Try to store the conversation with error message + # Use a temporary response_id if we don't have one + if not response_id: + response_id = f"error_{user_id}_{int(datetime.now().timestamp())}" + + try: + conversation_state["last_activity"] = datetime.now() + await store_conversation_thread(user_id, response_id, conversation_state) + logger.debug(f"Stored conversation with error for user {user_id}") + except Exception as store_error: + logger.error(f"Failed to store error conversation: {store_error}") + + # Re-raise the exception so it propagates to the API layer + raise async def delete_user_conversation(user_id: str, response_id: str) -> bool: diff --git a/src/services/chat_service.py b/src/services/chat_service.py index 92c834a84..55d4a98bd 100644 --- a/src/services/chat_service.py +++ b/src/services/chat_service.py @@ -516,6 +516,9 @@ async def get_langflow_history(self, user_id: str): "source": "langflow", } + if msg.get("error"): + message_data["error"] = True + # Include function call data if present if msg.get("chunks"): message_data["chunks"] = msg["chunks"] diff --git a/src/services/langflow_file_service.py b/src/services/langflow_file_service.py index c1732489b..a569b4d9f 100644 --- a/src/services/langflow_file_service.py +++ b/src/services/langflow_file_service.py @@ -1,5 +1,5 @@ -from typing import Any, Dict, List, Optional import json +from typing import Any, Dict, List, Optional from config.settings import LANGFLOW_INGEST_FLOW_ID, clients from utils.logging_config import get_logger @@ -62,7 +62,7 @@ async def run_ingestion_flow( self, file_paths: List[str], file_tuples: list[tuple[str, str, str]], - jwt_token: str, + jwt_token: Optional[str] = None, session_id: Optional[str] = None, tweaks: Optional[Dict[str, Any]] = None, owner: Optional[str] = None, @@ -180,7 +180,28 @@ async def run_ingestion_flow( reason=resp.reason_phrase, body=resp.text[:1000], ) - resp.raise_for_status() + + # Extract error message from Langflow response + error_message = f"Server error '{resp.status_code} {resp.reason_phrase}'" + try: + error_data = resp.json() + if isinstance(error_data, dict) and "detail" in error_data: + detail = error_data["detail"] + if isinstance(detail, str): + try: + detail_obj = json.loads(detail) + if isinstance(detail_obj, dict) and "message" in detail_obj: + error_message = detail_obj["message"] + else: + error_message = detail + except json.JSONDecodeError: + error_message = detail + elif isinstance(detail, dict) and "message" in detail: + error_message = detail["message"] + except Exception: + pass + + raise Exception(error_message) # Check if response is actually JSON before parsing content_type = resp.headers.get("content-type", "") @@ -319,8 +340,7 @@ async def upload_and_ingest_file( "[LF] Ingestion failed during combined operation", extra={"error": str(e), "file_path": file_path}, ) - # Note: We could optionally delete the uploaded file here if ingestion fails - raise Exception(f"Ingestion failed: {str(e)}") + raise # Step 4: Delete file from Langflow (optional) file_id = upload_result.get("id") diff --git a/src/services/langflow_history_service.py b/src/services/langflow_history_service.py index c9a77cfa7..dc4111486 100644 --- a/src/services/langflow_history_service.py +++ b/src/services/langflow_history_service.py @@ -71,19 +71,27 @@ def _convert_langflow_messages(self, langflow_messages: List[Dict[str, Any]]) -> for msg in langflow_messages: try: + content = msg.get("text", "") + + # Detect error messages - check explicit error flag or common error patterns + is_error = msg.get("error", False) or ( + msg.get("sender") != "User" and content.startswith("Error:") + ) + # Map Langflow message format to OpenRAG format converted_msg = { "role": "user" if msg.get("sender") == "User" else "assistant", - "content": msg.get("text", ""), + "content": content, "timestamp": msg.get("timestamp"), "langflow_message_id": msg.get("id"), "langflow_session_id": msg.get("session_id"), "langflow_flow_id": msg.get("flow_id"), "sender": msg.get("sender"), "sender_name": msg.get("sender_name"), + "source": "langflow", "files": msg.get("files", []), "properties": msg.get("properties", {}), - "error": msg.get("error", False), + "error": is_error, "edit": msg.get("edit", False) } From 685ecd31a5c97a3c77f8193b3b7fdc425f9b80fe Mon Sep 17 00:00:00 2001 From: Lucas Oliveira <62335616+lucaseduoli@users.noreply.github.com> Date: Tue, 10 Mar 2026 16:40:47 -0300 Subject: [PATCH 42/43] fix: pass effective jwt token to API calls (#1088) * pass effective JWT token to API calls * Added comment * added user id to effective jwt token * added better get effective jwt token * add integration tests to test tool calling * handle case where user isn't already in the users dict * fix sources * improved test --------- Co-authored-by: phact --- sdks/python/tests/test_integration.py | 39 +++++++++++++++++++++++++++ src/agent.py | 21 +++++++++++++-- src/api/v1/chat.py | 22 +++++++++++++++ src/dependencies.py | 10 ++++--- src/services/chat_service.py | 6 +++-- src/session_manager.py | 28 +++++++++---------- 6 files changed, 103 insertions(+), 23 deletions(-) diff --git a/sdks/python/tests/test_integration.py b/sdks/python/tests/test_integration.py index 2df299491..df98a034c 100644 --- a/sdks/python/tests/test_integration.py +++ b/sdks/python/tests/test_integration.py @@ -127,6 +127,22 @@ async def test_update_settings(self, client): assert updated_settings.knowledge.chunk_size == current_chunk_size +class TestModels: + """Test models endpoint.""" + + @pytest.mark.asyncio + async def test_list_models(self, client): + """Test listing models for a provider.""" + # This tests both the API key auth and the minted JWT + # since models_service often needs credentials/JWT + models = await client.models.list("openai") + + assert models.language_models is not None + assert isinstance(models.language_models, list) + assert models.embedding_models is not None + assert isinstance(models.embedding_models, list) + + class TestKnowledgeFilters: """Test knowledge filter operations.""" @@ -406,3 +422,26 @@ async def test_delete_conversation(self, client): result = await client.chat.delete(response.chat_id) assert result is True + + @pytest.mark.asyncio + async def test_chat_with_sources(self, client, test_file: Path): + """Test chat uses embedded knowledge (RAG), not just pure LLM.""" + # 1. Ingest document + result = await client.documents.ingest(file_path=str(test_file)) + if result.status == "failed" or result.successful_files == 0: + pytest.skip("Document ingestion failed — cannot test RAG sources") + + # 2. Wait for indexing + import asyncio + await asyncio.sleep(3) + + # 3. Chat about document content + response = await client.chat.create( + message="What is the color of the dancing animals mentioned in my documents?" + ) + + # 4. Verify sources — proves RAG retrieval worked + assert response.sources is not None + assert len(response.sources) > 0 + source_filenames = [s.filename for s in response.sources] + assert any(test_file.name in name for name in source_filenames) diff --git a/src/agent.py b/src/agent.py index 1fa2f4b76..172bf23df 100644 --- a/src/agent.py +++ b/src/agent.py @@ -624,8 +624,25 @@ async def async_langflow_chat( message_count=len(conversation_state["messages"]), ) + # Extract sources from retrieval tool calls in the response + sources = [] + if hasattr(response_obj, "output") and response_obj.output: + for output_item in response_obj.output: + item_type = getattr(output_item, "type", None) + if item_type in ("tool_call", "retrieval_call"): + for result in getattr(output_item, "results", None) or []: + rd = result.model_dump() if hasattr(result, "model_dump") else (result if isinstance(result, dict) else {}) + if "text" in rd: + sources.append({ + "filename": rd.get("filename", ""), + "text": rd.get("text", ""), + "score": rd.get("score", 0), + "page": rd.get("page"), + "mimetype": rd.get("mimetype"), + }) + if not store_conversation: - return response_text, response_id + return response_text, response_id, sources # Store the conversation thread with its response_id if response_id: @@ -661,7 +678,7 @@ async def async_langflow_chat( else: logger.warning("No response_id received from langflow, conversation not stored") - return response_text, response_id + return response_text, response_id, sources # Async langflow function with conversation storage (streaming) diff --git a/src/api/v1/chat.py b/src/api/v1/chat.py index ebb191417..b87507afe 100644 --- a/src/api/v1/chat.py +++ b/src/api/v1/chat.py @@ -28,6 +28,21 @@ class ChatV1Body(BaseModel): filter_id: Optional[str] = None +def _extract_sources(item: dict) -> list[dict]: + """Extract sources from a retrieval tool call item.""" + sources = [] + for result in item.get("results", []): + if isinstance(result, dict) and "text" in result: + sources.append({ + "filename": result.get("filename", ""), + "text": result.get("text", ""), + "score": result.get("score", 0), + "page": result.get("page"), + "mimetype": result.get("mimetype"), + }) + return sources + + async def _transform_stream_to_sse(raw_stream, chat_id_container: dict): """Transform raw Langflow streaming format to clean SSE events for v1 API.""" full_text = "" @@ -64,6 +79,13 @@ async def _transform_stream_to_sse(raw_stream, chat_id_container: dict): full_text += delta_text yield f"data: {json.dumps({'type': 'content', 'delta': delta_text})}\n\n" + # Emit sources from retrieval tool calls + item = chunk_data.get("item", {}) + if item.get("type") in ("retrieval_call", "tool_call") and item.get("results"): + sources = _extract_sources(item) + if sources: + yield f"data: {json.dumps({'type': 'sources', 'sources': sources})}\n\n" + if not chat_id: chat_id = chunk_data.get("id") or chunk_data.get("response_id") diff --git a/src/dependencies.py b/src/dependencies.py index 8429fdf24..37bcdbf30 100644 --- a/src/dependencies.py +++ b/src/dependencies.py @@ -162,6 +162,7 @@ def get_optional_user( async def get_api_key_user_async( request: Request, api_key_service=Depends(get_api_key_service), + session_manager=Depends(get_session_manager), ) -> User: """ Async dependency: require API key authentication. @@ -208,9 +209,12 @@ async def get_api_key_user_async( provider="api_key", ) - # API Key users don't typically have a JWT for OpenSearch OIDC, - # but we can try to get an effective one if needed - user_with_token = dataclasses.replace(user, jwt_token=None) + # Register the API key user so get_effective_jwt_token can find them + if user.user_id not in session_manager.users: + session_manager.users[user.user_id] = user + + effective_token = session_manager.get_effective_jwt_token(user.user_id, None) + user_with_token = dataclasses.replace(user, jwt_token=effective_token) request.state.user = user_with_token request.state.api_key_id = user_info["key_id"] diff --git a/src/services/chat_service.py b/src/services/chat_service.py index 55d4a98bd..d2bfa8b8f 100644 --- a/src/services/chat_service.py +++ b/src/services/chat_service.py @@ -156,7 +156,7 @@ async def langflow_chat( else: from agent import async_langflow_chat - response_text, response_id = await async_langflow_chat( + response_text, response_id, sources = await async_langflow_chat( langflow_client, LANGFLOW_CHAT_FLOW_ID, prompt, @@ -168,6 +168,8 @@ async def langflow_chat( response_data = {"response": response_text} if response_id: response_data["response_id"] = response_id + if sources: + response_data["sources"] = sources return response_data async def langflow_nudges_chat( @@ -289,7 +291,7 @@ async def langflow_nudges_chat( from agent import async_langflow_chat - response_text, response_id = await async_langflow_chat( + response_text, response_id, _sources = await async_langflow_chat( langflow_client, NUDGES_FLOW_ID, prompt, diff --git a/src/session_manager.py b/src/session_manager.py index f9dee81aa..2de2d9653 100644 --- a/src/session_manager.py +++ b/src/session_manager.py @@ -264,26 +264,22 @@ def get_effective_jwt_token(self, user_id: str, jwt_token: str) -> str: """Get the effective JWT token, creating anonymous JWT if needed in no-auth mode""" from config.settings import is_no_auth_mode - logger.debug( - "get_effective_jwt_token", - user_id=user_id, - jwt_token_present=(jwt_token is not None), - no_auth_mode=is_no_auth_mode(), - ) + if jwt_token is not None: + return jwt_token - # In no-auth mode, create anonymous JWT if needed - if jwt_token is None and (is_no_auth_mode() or user_id in (None, AnonymousUser().user_id)): + # No token — create one + if is_no_auth_mode() or user_id in (None, AnonymousUser().user_id): + # anonymous JWT (cached) if not hasattr(self, "_anonymous_jwt"): - # Create anonymous JWT token for OpenSearch OIDC - logger.debug("Creating anonymous JWT") self._anonymous_jwt = self._create_anonymous_jwt() - logger.debug( - "Anonymous JWT created", jwt_prefix=self._anonymous_jwt[:50] - ) - jwt_token = self._anonymous_jwt - logger.debug("Using anonymous JWT") + return self._anonymous_jwt - return jwt_token + # Auth mode, real user, no token — mint a JWT for them + user = self.get_user(user_id) + if user: + return self.create_jwt_token(user) + + return None def _create_anonymous_jwt(self) -> str: """Create JWT token for anonymous user in no-auth mode""" From 79332118830fbf4d8bf41e075b919cfcbfdd4a4b Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 11 Mar 2026 09:49:29 +0000 Subject: [PATCH 43/43] chore: update uv.lock files after version bump --- sdks/python/uv.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdks/python/uv.lock b/sdks/python/uv.lock index ee4c491ec..33645d8fa 100644 --- a/sdks/python/uv.lock +++ b/sdks/python/uv.lock @@ -121,7 +121,7 @@ wheels = [ [[package]] name = "openrag-sdk" -version = "0.1.4" +version = "0.1.5" source = { editable = "." } dependencies = [ { name = "httpx" },