diff --git a/.env.example b/.env.example
index 3bf38a6..755a274 100644
--- a/.env.example
+++ b/.env.example
@@ -2,6 +2,34 @@ HOST_AGENT_CONFIG=https://services.fluxi.com
OPENAI_API_KEY=dasdasdasdasdas
ANTHROPIC_API_KEY=dasdasdasdas
HOST_DEEP_SEEK=https://55.188.190.83:11434/v1
+RAPIDAPI_HOST=https://aliexpress-datahub.p.rapidapi.com
AGENT_RECOMMEND_PRODUCTS_ID=recommend_agent
+AGENT_RECOMMEND_SIMILAR_PRODUCTS_ID=recommend_agent_similar
RAPIDAPI_KEY=dsadasdasdasda
-RAPIDAPI_HOST=https://aliexpress-datahub.p.rapidapi.com
\ No newline at end of file
+S3_UPLOAD_API=http://lambdahost
+
+API_KEY=tu_clave_api_secreta_aqui
+AUTH_SERVICE_URL=https://develop.api.fluxi.com.co/api/v1/users/user-info
+GOOGLE_VISION_API_KEY=dsadadasda
+REPLICATE_API_KEY=dsadadasda
+SCRAPERAPI_KEY=dsadsadsadasdsadas
+URL_SCRAPER_LAMBDA=https://localhost:8000/
+GOOGLE_GEMINI_API_KEY=sadasadasdasd
+ENVIRONMENT=dev
+
+DROPI_HOST=https://test-api.dropi.co
+DROPI_S3_BASE_URL=https://d39ru7awumhhs2.cloudfront.net/
+DROPI_API_KEY=dasdsadadasdas
+
+# Dropi - API Keys por País (opcional, si no se especifica usa DROPI_API_KEY)
+DROPI_API_KEY_CO=your_dropi_api_key_colombia
+DROPI_API_KEY_MX=your_dropi_api_key_mexico
+DROPI_API_KEY_AR=your_dropi_api_key_argentina
+DROPI_API_KEY_CL=your_dropi_api_key_chile
+DROPI_API_KEY_PE=your_dropi_api_key_peru
+DROPI_API_KEY_PY=your_dropi_api_key_paraguay
+
+LANGCHAIN_TRACING_V2=true
+LANGCHAIN_ENDPOINT=https://api.smith.langchain.com
+LANGCHAIN_API_KEY=tu_api_key_aqui
+LANGCHAIN_PROJECT=develop
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 626aa8c..a1e59b0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -34,6 +34,7 @@ logs/
# Environment variables file
.env
+.venv
# Pinecone-related cache
pinecone.cache
\ No newline at end of file
diff --git a/Dockerfile b/Dockerfile
index da216c2..9174a8e 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,5 +1,5 @@
# Usar una imagen base de Python
-FROM python:3.9-slim
+FROM python:3.10-slim
# Establecer el directorio de trabajo
WORKDIR /app
diff --git a/app/configurations/config.py b/app/configurations/config.py
index 5a09e87..1849c2a 100644
--- a/app/configurations/config.py
+++ b/app/configurations/config.py
@@ -9,7 +9,54 @@
DEEP_SEEK_HOST = os.getenv('HOST_DEEP_SEEK')
AGENT_RECOMMEND_PRODUCTS_ID = os.getenv('AGENT_RECOMMEND_PRODUCTS_ID')
-
+AGENT_RECOMMEND_SIMILAR_PRODUCTS_ID = os.getenv('AGENT_RECOMMEND_SIMILAR_PRODUCTS_ID')
RAPIDAPI_KEY = os.getenv('RAPIDAPI_KEY')
RAPIDAPI_HOST = os.getenv('RAPIDAPI_HOST')
+
+S3_UPLOAD_API = os.getenv('S3_UPLOAD_API')
+
+AGENT_IMAGE_VARIATIONS = "agent_image_variations"
+SCRAPER_AGENT = "scraper_agent"
+SCRAPER_AGENT_DIRECT = "scraper_agent_direct_code"
+
+AUTH_SERVICE_URL: str = os.getenv('AUTH_SERVICE_URL')
+
+GOOGLE_VISION_API_KEY: str = os.getenv('GOOGLE_VISION_API_KEY')
+REPLICATE_API_KEY: str = os.getenv('REPLICATE_API_KEY')
+SCRAPERAPI_KEY: str = os.getenv('SCRAPERAPI_KEY')
+URL_SCRAPER_LAMBDA: str = os.getenv('URL_SCRAPER_LAMBDA')
+
+API_KEY: str = os.getenv('API_KEY')
+GOOGLE_GEMINI_API_KEY: str = os.getenv('GOOGLE_GEMINI_API_KEY')
+
+ENVIRONMENT: str = os.getenv('ENVIRONMENT')
+
+OPENAI_API_KEY: str = os.getenv('OPENAI_API_KEY')
+
+DROPI_S3_BASE_URL: str = os.getenv('DROPI_S3_BASE_URL', 'https://d39ru7awumhhs2.cloudfront.net/')
+DROPI_HOST: str = os.getenv('DROPI_HOST', 'https://test-api.dropi.co')
+DROPI_API_KEY: str = os.getenv('DROPI_API_KEY')
+DROPI_API_KEY_CO: str = os.getenv('DROPI_API_KEY_CO', os.getenv('DROPI_API_KEY'))
+DROPI_API_KEY_MX: str = os.getenv('DROPI_API_KEY_MX', os.getenv('DROPI_API_KEY'))
+DROPI_API_KEY_AR: str = os.getenv('DROPI_API_KEY_AR', os.getenv('DROPI_API_KEY'))
+DROPI_API_KEY_CL: str = os.getenv('DROPI_API_KEY_CL', os.getenv('DROPI_API_KEY'))
+DROPI_API_KEY_PE: str = os.getenv('DROPI_API_KEY_PE', os.getenv('DROPI_API_KEY'))
+DROPI_API_KEY_PY: str = os.getenv('DROPI_API_KEY_PY', os.getenv('DROPI_API_KEY'))
+DROPI_API_KEY_EC: str = os.getenv('DROPI_API_KEY_EC', os.getenv('DROPI_API_KEY'))
+
+
+def get_dropi_api_key(country: str = "co") -> str:
+ country_keys = {
+ "co": DROPI_API_KEY_CO,
+ "mx": DROPI_API_KEY_MX,
+ "ar": DROPI_API_KEY_AR,
+ "cl": DROPI_API_KEY_CL,
+ "pe": DROPI_API_KEY_PE,
+ "py": DROPI_API_KEY_PY,
+ "ec": DROPI_API_KEY_EC,
+ }
+ return country_keys.get(country.lower(), DROPI_API_KEY)
+
+
+FAL_AI_API_KEY: str = os.getenv('FAL_AI_API_KEY')
diff --git a/app/configurations/copies_config.py b/app/configurations/copies_config.py
new file mode 100644
index 0000000..7c94b76
--- /dev/null
+++ b/app/configurations/copies_config.py
@@ -0,0 +1,8 @@
+AGENT_COPIES = [
+ "agent_prompt_copies_use_cases_v1",
+ "agent_prompt_copies_pain_points_v1",
+ "agent_prompt_copies_benefits_v1",
+ "agent_prompt_copies_features_v1",
+ "agent_prompt_copies_testimonials_v1",
+ "agent_prompt_copies_faqs_v1"
+]
\ No newline at end of file
diff --git a/app/configurations/pdf_manual_config.py b/app/configurations/pdf_manual_config.py
new file mode 100644
index 0000000..42480f7
--- /dev/null
+++ b/app/configurations/pdf_manual_config.py
@@ -0,0 +1,37 @@
+PDF_MANUAL_SECTIONS_TRANSLATIONS = {
+ "es": {
+ "introduction": "Introducción",
+ "main_features": "Características principales",
+ "usage_instructions": "Instrucciones de uso",
+ "troubleshooting": "Solución de problemas",
+ "faq": "Preguntas frecuentes"
+ },
+ "en": {
+ "introduction": "Introduction",
+ "main_features": "Main Features",
+ "usage_instructions": "Usage Instructions",
+ "troubleshooting": "Troubleshooting",
+ "faq": "Frequently Asked Questions"
+ },
+ "pt": {
+ "introduction": "Introdução",
+ "main_features": "Características Principais",
+ "usage_instructions": "Instruções de Uso",
+ "troubleshooting": "Solução de Problemas",
+ "faq": "Perguntas Frequentes"
+ }
+}
+
+PDF_MANUAL_SECTIONS = PDF_MANUAL_SECTIONS_TRANSLATIONS["es"]
+
+PDF_MANUAL_SECTION_ORDER = [
+ "introduction",
+ "main_features",
+ "usage_instructions",
+ "troubleshooting",
+ "faq"
+]
+
+
+def get_sections_for_language(language: str = "es") -> dict:
+ return PDF_MANUAL_SECTIONS_TRANSLATIONS.get(language, PDF_MANUAL_SECTIONS_TRANSLATIONS["es"])
diff --git a/app/controllers/handle_controller.py b/app/controllers/handle_controller.py
index c819b7b..df4b513 100644
--- a/app/controllers/handle_controller.py
+++ b/app/controllers/handle_controller.py
@@ -1,14 +1,51 @@
-from app.requests.recommend_product_request import RecommendProductRequest
-from fastapi import APIRouter, Depends, Request
+import base64
+import httpx
+from app.requests.brand_context_resolver_request import BrandContextResolverRequest
+from app.requests.copy_request import CopyRequest
+from app.requests.direct_scrape_request import DirectScrapeRequest
+from app.requests.generate_image_request import GenerateImageRequest
+from app.requests.generate_pdf_request import GeneratePdfRequest
+from app.requests.recommend_product_request import RecommendProductRequest
+from app.requests.resolve_funnel_request import ResolveFunnelRequest
+from fastapi import APIRouter, Depends, Request, HTTPException
from app.requests.message_request import MessageRequest
+from app.requests.variation_image_request import VariationImageRequest
+from app.requests.product_scraping_request import ProductScrapingRequest
+from app.services.image_service_interface import ImageServiceInterface
from app.services.message_service_interface import MessageServiceInterface
+from app.services.product_scraping_service_interface import ProductScrapingServiceInterface
+from app.middlewares.auth_middleware import require_auth, require_api_key
+from app.requests.generate_video_request import GenerateVideoRequest
+
+# Importaciones para Dropi
+from app.services.dropi_service_interface import DropiServiceInterface
+from app.services.dropi_service import DropiService
+from app.services.video_service_interface import VideoServiceInterface
+from app.services.video_service import VideoService
+from app.services.audio_service_interface import AudioServiceInterface
+from app.services.audio_service import AudioService
+from app.requests.generate_audio_request import GenerateAudioRequest
router = APIRouter(
prefix="/api/ms/conversational-engine",
tags=["conversational-agent"]
)
+@router.get("/integration/dropi/departments")
+async def get_departments(
+ country: str = "co",
+ service: DropiServiceInterface = Depends(DropiService)
+):
+ return await service.get_departments(country)
+
+@router.get("/integration/dropi/departments/{department_id}/cities")
+async def get_cities_by_department(
+ department_id: int,
+ country: str = "co",
+ service: DropiServiceInterface = Depends(DropiService)
+):
+ return await service.get_cities_by_department(department_id, country)
@router.post("/handle-message")
async def handle_message(
@@ -19,6 +56,15 @@ async def handle_message(
return response
+@router.post("/handle-message-json")
+async def handle_message(
+ request: MessageRequest,
+ message_service: MessageServiceInterface = Depends()
+):
+ response = await message_service.handle_message_json(request)
+ return response
+
+
@router.post("/recommend-product")
async def recommend_products(
request: RecommendProductRequest,
@@ -28,6 +74,152 @@ async def recommend_products(
return response
+@router.post("/generate-pdf")
+async def generate_pdf(
+ request: GeneratePdfRequest,
+ message_service: MessageServiceInterface = Depends()
+):
+ response = await message_service.generate_pdf(request)
+ return response
+
+
+@router.post("/generate-variation-images")
+@require_auth
+async def generate_variation_images(
+ request: Request,
+ variation_request: VariationImageRequest,
+ service: ImageServiceInterface = Depends()
+):
+ user_info = request.state.user_info
+ response = await service.generate_variation_images(variation_request, user_info.get("data", {}).get("id"))
+ return response
+
+
+@router.post("/generate-images-from")
+@require_auth
+async def generate_images_from(
+ request: Request,
+ generate_image_request: GenerateImageRequest,
+ service: ImageServiceInterface = Depends()
+):
+ if not generate_image_request.file and generate_image_request.file_url:
+ async with httpx.AsyncClient() as client:
+ try:
+ response = await client.get(generate_image_request.file_url)
+ response.raise_for_status()
+ generate_image_request.file = base64.b64encode(response.content).decode()
+ except Exception as e:
+ raise HTTPException(status_code=400, detail=f"Error for get file: {str(e)}")
+
+ user_info = request.state.user_info
+ response = await service.generate_images_from(generate_image_request, user_info.get("data", {}).get("id"))
+ return response
+
+
+@router.post("/generate-images-from/api-key")
+@require_api_key
+async def generate_images_from_api_key(
+ request: Request,
+ generate_image_request: GenerateImageRequest,
+ service: ImageServiceInterface = Depends()
+):
+ if not generate_image_request.file and generate_image_request.file_url:
+ async with httpx.AsyncClient() as client:
+ try:
+ response = await client.get(generate_image_request.file_url)
+ response.raise_for_status()
+ generate_image_request.file = base64.b64encode(response.content).decode()
+ except Exception as e:
+ raise HTTPException(status_code=400, detail=f"Error for get file: {str(e)}")
+ response = await service.generate_images_from(generate_image_request, generate_image_request.owner_id)
+ return response
+
+
+@router.post("/generate-images-from-agent/api-key")
+@require_api_key
+async def generate_images_from_agent_api_key(
+ request: Request,
+ generate_image_request: GenerateImageRequest,
+ service: ImageServiceInterface = Depends()
+):
+ if not generate_image_request.file and generate_image_request.file_url:
+ async with httpx.AsyncClient() as client:
+ try:
+ response = await client.get(generate_image_request.file_url)
+ response.raise_for_status()
+ generate_image_request.file = base64.b64encode(response.content).decode()
+ except Exception as e:
+ raise HTTPException(status_code=400, detail=f"Error for get file: {str(e)}")
+ response = await service.generate_images_from_agent(generate_image_request, generate_image_request.owner_id)
+ return response
+
+
+@router.post("/generate-copies")
+async def generate_copies(
+ copy_request: CopyRequest,
+ message_service: MessageServiceInterface = Depends()
+):
+ response = await message_service.generate_copies(copy_request)
+ return response
+
+
+@router.post("/scrape-product")
+@require_auth
+async def scrape_product(
+ request: Request,
+ scraping_request: ProductScrapingRequest,
+ service: ProductScrapingServiceInterface = Depends()
+):
+ response = await service.scrape_product(scraping_request)
+ return response
+
+@router.post("/scrape-direct-html")
+@require_auth
+async def scrape_product_direct(
+ request: Request,
+ scraping_request: DirectScrapeRequest,
+ service: ProductScrapingServiceInterface = Depends()
+):
+ response = await service.scrape_direct(scraping_request.html)
+ return response
+
+
+@router.post("/resolve-info-funnel")
+async def resolve_funnel(
+ request: ResolveFunnelRequest,
+ message_service: MessageServiceInterface = Depends()
+):
+ response = await message_service.resolve_funnel(request)
+ return response
+
+@router.post("/store/brand-context-resolver")
+@require_auth
+async def brand_context_resolver(
+ request: Request,
+ requestBrand: BrandContextResolverRequest,
+ message_service: MessageServiceInterface = Depends()
+):
+ response = await message_service.resolve_brand_context(requestBrand)
+ return response
+
+@router.post("/generate-video")
+async def generate_video(
+ request: Request,
+ requestGenerateVideo: GenerateVideoRequest,
+ video_service: VideoServiceInterface = Depends(VideoService)
+):
+ return await video_service.generate_video(requestGenerateVideo)
+
+
+@router.post("/generate-audio")
+async def generate_audio(
+ request: Request,
+ requestGenerateAudio: GenerateAudioRequest,
+ audio_service: AudioServiceInterface = Depends(AudioService)
+):
+ return await audio_service.generate_audio(requestGenerateAudio)
+
+
@router.get("/health")
async def health_check():
return {"status": "OK"}
diff --git a/app/externals/agent_config/agent_config_client.py b/app/externals/agent_config/agent_config_client.py
index bfa44b5..e1a177d 100644
--- a/app/externals/agent_config/agent_config_client.py
+++ b/app/externals/agent_config/agent_config_client.py
@@ -11,7 +11,7 @@ async def get_agent(data: AgentConfigRequest) -> AgentConfigResponse:
headers = {'Content-Type': 'application/json'}
async with httpx.AsyncClient() as client:
- response = await client.post(url, json=data.dict(), headers=headers)
+ response = await client.post(url, json=data.model_dump(), headers=headers)
response.raise_for_status()
return AgentConfigResponse(**response.json())
diff --git a/app/externals/agent_config/requests/agent_config_request.py b/app/externals/agent_config/requests/agent_config_request.py
index a91d1ca..f892dcc 100644
--- a/app/externals/agent_config/requests/agent_config_request.py
+++ b/app/externals/agent_config/requests/agent_config_request.py
@@ -1,9 +1,11 @@
from typing import List, Dict, Optional, Any
from pydantic import BaseModel
+from app.requests.message_request import MetadataFilter
+
class AgentConfigRequest(BaseModel):
agent_id: Optional[str] = None
query: str
- metadata_filter: Optional[List[Dict[str, str]]] = None
+ metadata_filter: Optional[List[MetadataFilter]] = None
parameter_prompt: Optional[Dict[str, Any]] = None
diff --git a/app/externals/agent_config/responses/agent_config_response.py b/app/externals/agent_config/responses/agent_config_response.py
index 63a8da5..4d39d4d 100644
--- a/app/externals/agent_config/responses/agent_config_response.py
+++ b/app/externals/agent_config/responses/agent_config_response.py
@@ -1,11 +1,12 @@
-from typing import Optional, Dict, List
-from pydantic import BaseModel
+from typing import Optional, Dict, List, Any
+from pydantic import BaseModel, Field
class AgentPreferences(BaseModel):
- temperature: float
- max_tokens: int
- top_p: float
+ temperature: float = 0.7
+ max_tokens: int = 1000
+ top_p: float = 1.0
+ extra_parameters: Optional[Dict[str, Any]] = None
class Property(BaseModel):
@@ -43,4 +44,5 @@ class AgentConfigResponse(BaseModel):
provider_ai: str
model_ai: str
preferences: AgentPreferences
- tools: Optional[List[dict]]
+ tools: Optional[List[Dict[str, Any]]] = Field(default_factory=list)
+ mcp_config: Optional[Dict[str, Any]] = None
diff --git a/app/externals/aliexpress/aliexpress_client.py b/app/externals/aliexpress/aliexpress_client.py
index eaeb8d4..8a758ce 100644
--- a/app/externals/aliexpress/aliexpress_client.py
+++ b/app/externals/aliexpress/aliexpress_client.py
@@ -28,3 +28,28 @@ async def search_products(data: AliexpressSearchRequest) -> AliexpressSearchResp
response.raise_for_status()
return AliexpressSearchResponse(**response.json())
+
+
+async def get_item_detail(item_id: str):
+ endpoint = '/item_detail_6'
+ url = f"{RAPIDAPI_HOST}{endpoint}"
+
+ headers = {
+ 'Content-Type': 'application/json',
+ 'x-rapidapi-host': 'aliexpress-datahub.p.rapidapi.com',
+ 'x-rapidapi-key': RAPIDAPI_KEY
+ }
+
+ params = {
+ 'itemId': item_id
+ }
+
+ async with httpx.AsyncClient() as client:
+ response = await client.get(
+ url,
+ params=params,
+ headers=headers
+ )
+ response.raise_for_status()
+
+ return response.json()
diff --git a/app/externals/amazon/amazon_client.py b/app/externals/amazon/amazon_client.py
new file mode 100644
index 0000000..b44d76b
--- /dev/null
+++ b/app/externals/amazon/amazon_client.py
@@ -0,0 +1,59 @@
+import httpx
+from app.configurations.config import RAPIDAPI_KEY
+from app.externals.amazon.requests.amazon_search_request import AmazonSearchRequest
+from app.externals.amazon.responses.amazon_search_response import AmazonSearchResponse
+from typing import Dict, Any
+
+
+async def search_products(request: AmazonSearchRequest) -> AmazonSearchResponse:
+ headers = {
+ 'x-rapidapi-host': 'real-time-amazon-data.p.rapidapi.com',
+ 'x-rapidapi-key': RAPIDAPI_KEY
+ }
+
+ params = {
+ 'query': request.query,
+ 'page': '1',
+ 'country': 'US',
+ 'sort_by': 'RELEVANCE',
+ 'product_condition': 'ALL',
+ 'is_prime': 'false',
+ 'deals_and_discounts': 'NONE'
+ }
+
+ async with httpx.AsyncClient() as client:
+ response = await client.get(
+ 'https://real-time-amazon-data.p.rapidapi.com/search',
+ headers=headers,
+ params=params
+ )
+
+ if response.status_code != 200:
+ raise Exception(f"Error en la llamada a Amazon API: {response.status_code}")
+
+ raw_response = response.json()
+ return AmazonSearchResponse(raw_response)
+
+
+async def get_product_details(asin: str, country: str = "US") -> Dict[str, Any]:
+ headers = {
+ 'x-rapidapi-host': 'real-time-amazon-data.p.rapidapi.com',
+ 'x-rapidapi-key': RAPIDAPI_KEY
+ }
+
+ params = {
+ 'asin': asin,
+ 'country': country
+ }
+
+ async with httpx.AsyncClient() as client:
+ response = await client.get(
+ 'https://real-time-amazon-data.p.rapidapi.com/product-details',
+ headers=headers,
+ params=params
+ )
+
+ if response.status_code != 200:
+ raise Exception(f"Error with call Amazon RapidApi: {response.status_code}")
+
+ return response.json()
diff --git a/app/externals/amazon/requests/amazon_search_request.py b/app/externals/amazon/requests/amazon_search_request.py
new file mode 100644
index 0000000..6a44190
--- /dev/null
+++ b/app/externals/amazon/requests/amazon_search_request.py
@@ -0,0 +1,8 @@
+
+
+class AmazonSearchRequest:
+ def __init__(
+ self,
+ query: str,
+ ):
+ self.query = query
diff --git a/app/externals/amazon/responses/amazon_search_response.py b/app/externals/amazon/responses/amazon_search_response.py
new file mode 100644
index 0000000..36509ff
--- /dev/null
+++ b/app/externals/amazon/responses/amazon_search_response.py
@@ -0,0 +1,42 @@
+from dataclasses import dataclass
+from typing import List, Optional
+
+
+@dataclass
+class AmazonProduct:
+ asin: str
+ title: str
+ price: float
+ image_url: str
+ product_url: str
+
+
+class AmazonSearchResponse:
+ def __init__(self, raw_response: dict):
+ self.raw_response = raw_response
+
+ def get_products(self) -> List[dict]:
+ products = []
+
+ for item in self.raw_response.get('data', {}).get('products', []):
+ price = self._format_price(item.get('product_price'))
+ if price is not None and price > 0:
+ product = {
+ "source": "amazon",
+ "external_id": item.get('asin', ''),
+ "name": item.get('product_title', ''),
+ "url_website": item.get('product_url', ''),
+ "url_image": item.get('product_photo', ''),
+ "price": price
+ }
+ products.append(product)
+
+ return products
+
+ def _format_price(self, price) -> Optional[float]:
+ if not price:
+ return None
+ try:
+ return float(str(price).replace('$', '').replace(',', ''))
+ except (ValueError, TypeError):
+ return None
diff --git a/app/externals/dropi/__init__.py b/app/externals/dropi/__init__.py
new file mode 100644
index 0000000..0519ecb
--- /dev/null
+++ b/app/externals/dropi/__init__.py
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/app/externals/dropi/dropi_client.py b/app/externals/dropi/dropi_client.py
new file mode 100644
index 0000000..489e818
--- /dev/null
+++ b/app/externals/dropi/dropi_client.py
@@ -0,0 +1,62 @@
+import httpx
+from typing import Dict, Any
+
+from app.configurations.config import DROPI_HOST, get_dropi_api_key
+
+
+async def get_product_details(product_id: str, country: str = "co") -> Dict[str, Any]:
+ headers = {
+ "dropi-integration-key": get_dropi_api_key(country)
+ }
+
+ dropi_host = DROPI_HOST.replace(".co", f".{country}")
+ url = f"{dropi_host}/integrations/products/v2/{product_id}"
+
+ async with httpx.AsyncClient() as client:
+ try:
+ response = await client.get(url, headers=headers)
+ response.raise_for_status()
+ return response.json()
+ except httpx.HTTPStatusError as e:
+ raise Exception(f"API request failed with status {e.response.status_code}: {e.response.text}")
+ except httpx.RequestError as e:
+ raise Exception(f"API request failed: {str(e)}")
+
+
+async def get_departments(country: str = "co") -> Dict[str, Any]:
+ headers = {
+ "dropi-integration-key": get_dropi_api_key(country)
+ }
+ dropi_host = DROPI_HOST.replace(".co", f".{country}")
+ url = f"{dropi_host}/integrations/department"
+ async with httpx.AsyncClient() as client:
+ try:
+ response = await client.get(url, headers=headers)
+ response.raise_for_status()
+ return response.json()
+ except httpx.HTTPStatusError as e:
+ raise Exception(f"API request failed with status {e.response.status_code}: {e.response.text}")
+ except httpx.RequestError as e:
+ raise Exception(f"API request failed: {str(e)}")
+
+
+async def get_cities_by_department(department_id: int, rate_type: str, country: str = "co") -> Dict[str, Any]:
+ headers = {
+ "dropi-integration-key": get_dropi_api_key(country),
+ "Content-Type": "application/json"
+ }
+ payload = {
+ "department_id": department_id,
+ "rate_type": rate_type
+ }
+ dropi_host = DROPI_HOST.replace(".co", f".{country}")
+ url = f"{dropi_host}/integrations/trajectory/bycity"
+ async with httpx.AsyncClient() as client:
+ try:
+ response = await client.post(url, headers=headers, json=payload)
+ response.raise_for_status()
+ return response.json()
+ except httpx.HTTPStatusError as e:
+ raise Exception(f"API request failed with status {e.response.status_code}: {e.response.text}")
+ except httpx.RequestError as e:
+ raise Exception(f"API request failed: {str(e)}")
\ No newline at end of file
diff --git a/app/externals/fal/__init__.py b/app/externals/fal/__init__.py
new file mode 100644
index 0000000..526f30b
--- /dev/null
+++ b/app/externals/fal/__init__.py
@@ -0,0 +1 @@
+# Package initializer for FAL externals
\ No newline at end of file
diff --git a/app/externals/fal/fal_client.py b/app/externals/fal/fal_client.py
new file mode 100644
index 0000000..36bf7a3
--- /dev/null
+++ b/app/externals/fal/fal_client.py
@@ -0,0 +1,47 @@
+import urllib.parse
+from typing import Optional, Dict, Any
+
+import httpx
+
+from app.configurations.config import FAL_AI_API_KEY
+
+
+class FalClient:
+ def __init__(self, api_key: Optional[str] = None):
+ self.api_key = api_key or FAL_AI_API_KEY
+
+ async def _post(self, path: str, payload: Dict[str, Any], fal_webhook: Optional[str] = None) -> Dict[str, Any]:
+ if not self.api_key:
+ raise ValueError("FAL_AI_API_KEY no configurada")
+
+ base_url = f"https://queue.fal.run/{path}"
+ if fal_webhook:
+ query = f"fal_webhook={urllib.parse.quote_plus(fal_webhook)}"
+ url = f"{base_url}?{query}"
+ else:
+ url = base_url
+
+ headers = {
+ "Authorization": f"Key {self.api_key}",
+ "Content-Type": "application/json",
+ }
+
+ async with httpx.AsyncClient(timeout=60) as client:
+ response = await client.post(url, json=payload, headers=headers)
+ response.raise_for_status()
+ return response.json()
+
+ async def tts_multilingual_v2(self, text: str, fal_webhook: Optional[str] = None, **kwargs) -> Dict[str, Any]:
+ payload = {"text": text}
+ payload.update(kwargs)
+ return await self._post("fal-ai/elevenlabs/tts/multilingual-v2", payload, fal_webhook)
+
+ async def bytedance_omnihuman(self, image_url: str, audio_url: str, fal_webhook: Optional[str] = None, **kwargs) -> Dict[str, Any]:
+ payload = {"image_url": image_url, "audio_url": audio_url}
+ payload.update(kwargs)
+ return await self._post("fal-ai/bytedance/omnihuman", payload, fal_webhook)
+
+ async def kling_image_to_video(self, prompt: str, image_url: str, fal_webhook: Optional[str] = None, **kwargs) -> Dict[str, Any]:
+ payload = {"prompt": prompt, "image_url": image_url}
+ payload.update(kwargs)
+ return await self._post("fal-ai/kling-video/v2/master/image-to-video", payload, fal_webhook)
\ No newline at end of file
diff --git a/app/externals/google_vision/google_vision_client.py b/app/externals/google_vision/google_vision_client.py
new file mode 100644
index 0000000..21112d3
--- /dev/null
+++ b/app/externals/google_vision/google_vision_client.py
@@ -0,0 +1,57 @@
+import aiohttp
+from app.configurations.config import GOOGLE_VISION_API_KEY
+from app.externals.google_vision.responses.vision_analysis_response import VisionAnalysisResponse
+
+
+async def analyze_image(image_base64: str) -> VisionAnalysisResponse:
+ vision_api_url = f"https://vision.googleapis.com/v1/images:annotate?key={GOOGLE_VISION_API_KEY}"
+
+ payload = {
+ "requests": [{
+ "image": {
+ "content": image_base64
+ },
+ "features": [
+ {
+ "type": "LABEL_DETECTION",
+ "maxResults": 3
+ },
+ {
+ "type": "LOGO_DETECTION",
+ "maxResults": 1
+ }
+ ]
+ }]
+ }
+
+ async with aiohttp.ClientSession() as session:
+ async with session.post(
+ vision_api_url,
+ json=payload,
+ headers={"Content-Type": "application/json"}
+ ) as response:
+ if response.status != 200:
+ raise Exception(f"Error en Google Vision API: {await response.text()}")
+
+ data = await response.json()
+
+ logo_description = ""
+ if data["responses"][0].get("logoAnnotations"):
+ logo = data["responses"][0]["logoAnnotations"][0]
+ if logo.get("score", 0) > 0.65:
+ logo_description = logo["description"]
+
+ labels = []
+ if data["responses"][0].get("labelAnnotations"):
+ labels = [
+ label["description"]
+ for label in data["responses"][0]["labelAnnotations"]
+ if label.get("score", 0) > 0.65
+ ]
+
+ label_description = ", ".join(labels)
+
+ return VisionAnalysisResponse(
+ logo_description=logo_description,
+ label_description=label_description
+ )
diff --git a/app/externals/google_vision/responses/vision_analysis_response.py b/app/externals/google_vision/responses/vision_analysis_response.py
new file mode 100644
index 0000000..96dc5f4
--- /dev/null
+++ b/app/externals/google_vision/responses/vision_analysis_response.py
@@ -0,0 +1,18 @@
+from dataclasses import dataclass
+
+
+@dataclass
+class VisionAnalysisResponse:
+ logo_description: str
+ label_description: str
+
+ def get_analysis_text(self) -> str:
+ analysis_parts = []
+
+ if self.logo_description:
+ analysis_parts.append(f"Detected logos: {self.logo_description}")
+
+ if self.label_description:
+ analysis_parts.append(f"Detected category: {self.label_description}")
+
+ return ". ".join(analysis_parts) + ("." if analysis_parts else "")
diff --git a/app/externals/images/image_client.py b/app/externals/images/image_client.py
new file mode 100644
index 0000000..c579af2
--- /dev/null
+++ b/app/externals/images/image_client.py
@@ -0,0 +1,213 @@
+import base64
+import mimetypes
+from typing import Optional
+import os
+
+import aiohttp
+import asyncio
+import httpx
+import base64
+
+import requests
+
+from app.configurations import config
+from app.configurations.config import REPLICATE_API_KEY, GOOGLE_GEMINI_API_KEY, OPENAI_API_KEY
+
+
+async def generate_image_variation(
+ image_url: str,
+ prompt: str,
+ aspect_ratio: str = "1:1",
+ output_format: str = "webp",
+ output_quality: int = 80,
+ prompt_upsampling: bool = False,
+ safety_tolerance: int = 2
+) -> bytes:
+ payload = {
+ "input": {
+ "aspect_ratio": aspect_ratio,
+ "image_prompt": image_url,
+ "output_format": output_format,
+ "output_quality": output_quality,
+ "prompt": prompt,
+ "prompt_upsampling": prompt_upsampling,
+ "safety_tolerance": safety_tolerance
+ }
+ }
+
+ async with aiohttp.ClientSession() as session:
+ async with session.post(
+ "https://api.replicate.com/v1/models/black-forest-labs/flux-1.1-pro/predictions",
+ headers={
+ "Authorization": f"Bearer {REPLICATE_API_KEY}",
+ "Content-Type": "application/json"
+ },
+ json=payload
+ ) as response:
+ if response.status == 200 or response.status == 201:
+ prediction_data = await response.json()
+
+ while True:
+ async with session.get(
+ prediction_data["urls"]["get"],
+ headers={"Authorization": f"Bearer {REPLICATE_API_KEY}"}
+ ) as status_response:
+ status_data = await status_response.json()
+ if status_data["status"] == "succeeded":
+ image_url = status_data["output"]
+ async with session.get(image_url) as img_response:
+ if img_response.status == 200:
+ return await img_response.read()
+ else:
+ raise Exception(f"Error downloading image: {img_response.status}")
+ elif status_data["status"] == "failed":
+ raise Exception("Image Generation Failed")
+
+ await asyncio.sleep(1)
+ else:
+ raise Exception(f"Error {response.status}: {await response.text()}")
+
+
+def _build_image_part(image_base64: str, is_model_25: bool) -> dict:
+ if is_model_25:
+ return {
+ "inlineData": {
+ "mimeType": 'image/jpeg',
+ "data": image_base64
+ }
+ }
+ return {
+ "inline_data": {
+ "mime_type": 'image/jpeg',
+ "data": image_base64
+ }
+ }
+
+
+async def _fetch_and_encode_images(image_urls: list[str], is_model_25: bool) -> list[dict]:
+ parts = []
+ async with aiohttp.ClientSession() as fetch_session:
+ for image_url in image_urls:
+ try:
+ async with fetch_session.get(image_url) as img_response:
+ if img_response.status == 200:
+ image_bytes = await img_response.read()
+ image_base64 = base64.b64encode(image_bytes).decode('utf-8')
+ parts.append(_build_image_part(image_base64, is_model_25))
+ except Exception as e:
+ print(f"Error al procesar imagen de {image_url}: {str(e)}")
+ continue
+ return parts
+
+
+def _build_generation_config(is_model_25: bool, aspect_ratio: str, image_size: str) -> dict:
+ config = {"responseModalities": ["Text", "Image"]}
+ if not is_model_25:
+ config["imageConfig"] = {
+ "aspectRatio": aspect_ratio,
+ "imageSize": image_size
+ }
+ return config
+
+
+async def google_image(image_urls: list[str], prompt: str, model_ia: Optional[str] = None, extra_params: Optional[dict] = None) -> bytes:
+ if extra_params is None:
+ extra_params = {}
+
+ is_model_25 = model_ia and '2.5' in model_ia
+ aspect_ratio = extra_params.get('aspect_ratio', '1:1')
+ image_size = extra_params.get('image_size', '1K')
+
+ model_name = 'gemini-2.5-flash-image-preview' if is_model_25 else 'gemini-3-pro-image-preview'
+ url = f"https://generativelanguage.googleapis.com/v1beta/models/{model_name}:generateContent?key={GOOGLE_GEMINI_API_KEY}"
+
+ parts = [{"text": prompt}]
+
+ if image_urls:
+ image_parts = await _fetch_and_encode_images(image_urls, is_model_25)
+ parts.extend(image_parts)
+
+ payload = {
+ "contents": [{"parts": parts}],
+ "generationConfig": _build_generation_config(is_model_25, aspect_ratio, image_size)
+ }
+
+ headers = {'Content-Type': 'application/json'}
+
+ try:
+ async with aiohttp.ClientSession() as session:
+ async with session.post(url, headers=headers, json=payload) as response:
+ if response.status == 200:
+ data = await response.json()
+ parts = data["candidates"][0]["content"]["parts"]
+
+ for part in parts:
+ if "inlineData" in part:
+ img_data_base64 = part["inlineData"]["data"]
+ img_bytes = base64.b64decode(img_data_base64)
+ return img_bytes
+
+ raise Exception("No se generó ninguna imagen en la respuesta de Google Gemini")
+ else:
+ error_text = await response.text()
+ print(f"Error {response.status}: {error_text}")
+ response.raise_for_status()
+ except Exception as e:
+ print(f"Error al generar imagen con Google Gemini: {str(e)}")
+ raise Exception(f"Error al generar imagen con Google Gemini: {str(e)}")
+
+
+async def openai_image_edit(image_urls: list[str], prompt: str, model_ia: Optional[str] = None, extra_params: Optional[dict] = None) -> bytes:
+ url = "https://api.openai.com/v1/images/edits"
+ headers = {
+ "Authorization": f"Bearer {config.OPENAI_API_KEY}"
+ }
+ data = aiohttp.FormData()
+
+ async with aiohttp.ClientSession() as fetch_session:
+ for image_url in image_urls:
+ async with fetch_session.get(image_url) as img_response:
+ if img_response.status == 200:
+ image_bytes = await img_response.read()
+ filename = os.path.basename(image_url)
+ content_type = mimetypes.guess_type(filename)[0] or 'image/jpeg'
+ data.add_field(
+ 'image[]',
+ image_bytes,
+ filename=filename,
+ content_type=content_type
+ )
+
+ prompt = prompt + ". **escena completa visible, composición centrada, todos los elementos dentro del marco cuadrado, nada recortado en los bordes, composición completa**"
+
+ if extra_params is None:
+ extra_params = {}
+
+ size = extra_params.get('resolution', '1024x1024') or '1024x1024'
+
+ data.add_field('size', size)
+ data.add_field('prompt', prompt)
+ data.add_field('model', 'gpt-image-1')
+ data.add_field('n', '1')
+
+ try:
+ async with aiohttp.ClientSession() as session:
+ async with session.post(url, headers=headers, data=data) as response:
+ if response.status == 200:
+ result = await response.json()
+ if "data" in result and len(result["data"]) > 0 and "b64_json" in result["data"][0]:
+ b64_image = result["data"][0]["b64_json"]
+ image_bytes = base64.b64decode(b64_image)
+ return image_bytes
+ else:
+ raise Exception(f"Respuesta inesperada de la API de OpenAI: {result}")
+ else:
+ error_text = await response.text()
+ print(f"Error {response.status}: {error_text}")
+ response.raise_for_status()
+ except aiohttp.ClientError as e:
+ print(f"Error red al generar imagen: {str(e)}")
+ raise Exception(f"Error de red al llamar a OpenAI: {e}") from e
+ except Exception as e:
+ print(f"Error al generar imagen: {str(e)}")
+ raise Exception(f"Error al editar imagen con OpenAI: {e}") from e
diff --git a/app/externals/s3_upload/requests/s3_upload_request.py b/app/externals/s3_upload/requests/s3_upload_request.py
new file mode 100644
index 0000000..1df1f47
--- /dev/null
+++ b/app/externals/s3_upload/requests/s3_upload_request.py
@@ -0,0 +1,7 @@
+from pydantic import BaseModel
+
+
+class S3UploadRequest(BaseModel):
+ file: str
+ folder: str
+ filename: str
diff --git a/app/externals/s3_upload/responses/s3_upload_response.py b/app/externals/s3_upload/responses/s3_upload_response.py
new file mode 100644
index 0000000..0d2ba00
--- /dev/null
+++ b/app/externals/s3_upload/responses/s3_upload_response.py
@@ -0,0 +1,5 @@
+from pydantic import BaseModel
+
+
+class S3UploadResponse(BaseModel):
+ s3_url: str
diff --git a/app/externals/s3_upload/s3_upload_client.py b/app/externals/s3_upload/s3_upload_client.py
new file mode 100644
index 0000000..a87dd25
--- /dev/null
+++ b/app/externals/s3_upload/s3_upload_client.py
@@ -0,0 +1,36 @@
+import httpx
+from app.configurations.config import S3_UPLOAD_API
+from app.externals.s3_upload.requests.s3_upload_request import S3UploadRequest
+from app.externals.s3_upload.responses.s3_upload_response import S3UploadResponse
+
+
+async def upload_file(request: S3UploadRequest) -> S3UploadResponse:
+ headers = {
+ "Content-Type": "application/json"
+ }
+
+ timeout = httpx.Timeout(timeout=180.0, connect=60.0)
+
+ try:
+ async with httpx.AsyncClient(timeout=timeout) as client:
+ response = await client.post(
+ S3_UPLOAD_API,
+ headers=headers,
+ json=request.dict()
+ )
+ response.raise_for_status()
+ return S3UploadResponse(**response.json())
+ except Exception as e:
+ print(f"Error al cargar archivo a S3: {str(e)}")
+ raise Exception(f"Error al cargar archivo a S3: {str(e)}")
+
+
+async def check_file_exists_direct(s3_url: str) -> bool:
+ timeout = httpx.Timeout(timeout=10.0)
+
+ try:
+ async with httpx.AsyncClient(timeout=timeout) as client:
+ response = await client.head(s3_url)
+ return response.status_code == 200
+ except Exception as e:
+ return False
diff --git a/app/externals/scraperapi/__init__.py b/app/externals/scraperapi/__init__.py
new file mode 100644
index 0000000..fdb4450
--- /dev/null
+++ b/app/externals/scraperapi/__init__.py
@@ -0,0 +1 @@
+# Inicialización del paquete scraperapi
\ No newline at end of file
diff --git a/app/externals/scraperapi/scraperapi_client.py b/app/externals/scraperapi/scraperapi_client.py
new file mode 100644
index 0000000..29e548e
--- /dev/null
+++ b/app/externals/scraperapi/scraperapi_client.py
@@ -0,0 +1,48 @@
+import aiohttp
+from typing import Dict, Any
+
+from fastapi import HTTPException
+
+from app.configurations.config import SCRAPERAPI_KEY, URL_SCRAPER_LAMBDA
+
+
+class ScraperAPIClient:
+ def __init__(self):
+ self.api_key = SCRAPERAPI_KEY
+ self.base_url = "http://api.scraperapi.com"
+ self.lambda_url = URL_SCRAPER_LAMBDA
+
+ async def get_html(self, url: str, params: Dict[str, Any] = None) -> str:
+ default_params = {
+ "api_key": self.api_key,
+ "url": url
+ }
+
+ if params:
+ default_params.update(params)
+
+ async with aiohttp.ClientSession() as session:
+ async with session.get(self.base_url, params=default_params) as response:
+ if response.status != 200:
+ error_text = await response.text()
+ raise HTTPException(status_code=400, detail=error_text)
+
+ return await response.text()
+
+ async def get_html_lambda(self, url: str) -> str:
+ payload = {
+ "url": url
+ }
+
+ async with aiohttp.ClientSession() as session:
+ async with session.post(
+ self.lambda_url,
+ headers={"Content-Type": "application/json"},
+ json=payload
+ ) as response:
+ if response.status != 200:
+ error_text = await response.text()
+ raise HTTPException(status_code=400, detail=f"Error lambda API scraper: {error_text}")
+
+ response_data = await response.json()
+ return response_data.get("content", "")
diff --git a/app/factories/ai_provider_factory.py b/app/factories/ai_provider_factory.py
index 34e3c91..74fbb20 100644
--- a/app/factories/ai_provider_factory.py
+++ b/app/factories/ai_provider_factory.py
@@ -1,6 +1,7 @@
from app.providers.ai_provider_interface import AIProviderInterface
from app.providers.anthropic_provider import AnthropicProvider
from app.providers.deepseek_provider import DeepseekProvider
+from app.providers.gemini_provider import GeminiProvider
from app.providers.openai_provider import OpenAIProvider
@@ -13,5 +14,7 @@ def get_provider(provider_name: str) -> AIProviderInterface:
return AnthropicProvider()
elif provider_name == "deepseek":
return DeepseekProvider()
+ elif provider_name == "gemini":
+ return GeminiProvider()
else:
raise ValueError(f"El proveedor de AI '{provider_name}' no está implementado")
\ No newline at end of file
diff --git a/app/factories/scraping_factory.py b/app/factories/scraping_factory.py
new file mode 100644
index 0000000..6487233
--- /dev/null
+++ b/app/factories/scraping_factory.py
@@ -0,0 +1,30 @@
+from urllib.parse import urlparse
+
+from fastapi import Depends
+
+from app.scrapers.scraper_interface import ScraperInterface
+from app.scrapers.amazon_scraper import AmazonScraper
+from app.scrapers.aliexpress_scraper import AliexpressScraper
+from app.scrapers.cj_scraper import CJScraper
+from app.scrapers.dropi_scraper import DropiScraper
+from app.scrapers.ia_scraper import IAScraper
+from app.services.message_service_interface import MessageServiceInterface
+
+
+class ScrapingFactory:
+ def __init__(self, message_service: MessageServiceInterface = Depends()):
+ self.message_service = message_service
+
+ def get_scraper(self, url: str, country: str = "co") -> ScraperInterface:
+ domain = urlparse(url).netloc.lower()
+
+ if "amazon" in domain:
+ return AmazonScraper()
+ elif "aliexpress" in domain:
+ return AliexpressScraper()
+ elif "cjdropshipping" in domain:
+ return CJScraper()
+ elif "dropi" in domain:
+ return DropiScraper(country=country)
+ else:
+ return IAScraper(message_service=self.message_service)
diff --git a/app/helpers/escape_helper.py b/app/helpers/escape_helper.py
new file mode 100644
index 0000000..9668555
--- /dev/null
+++ b/app/helpers/escape_helper.py
@@ -0,0 +1,56 @@
+import re
+from bs4 import BeautifulSoup
+
+
+def clean_placeholders(text: str, allowed_keys: list = None) -> str:
+ if allowed_keys is None:
+ allowed_keys = []
+
+ def replace_placeholder(match):
+ key = match.group(1).strip('"\' ') # Remueve comillas internas
+ return match.group(0) if key in allowed_keys else ""
+
+ pattern = re.compile(r"\{\s*[\"']?([^\"'\{\}]+)[\"']?\s*\}")
+ return pattern.sub(replace_placeholder, text)
+
+
+def clean_html_deeply(html_content):
+ soup = BeautifulSoup(html_content, 'html.parser')
+
+ for tag in soup(['script', 'style', 'noscript', 'svg', 'link', 'meta', 'head']):
+ tag.decompose()
+
+ for tag in soup.find_all(True):
+ if tag.name == 'img':
+ tag.attrs = {key: tag.attrs[key] for key in ['src', 'alt'] if key in tag.attrs}
+ else:
+ tag.attrs = {}
+
+ simplified_html = str(soup)
+ simplified_html_clean = re.sub(r'\s+', ' ', simplified_html).strip()
+
+ return simplified_html_clean
+
+def clean_html_less_deeply(html_content):
+ soup = BeautifulSoup(html_content, 'html5lib')
+
+ for tag in soup(['script', 'style', 'noscript', 'svg', 'link', 'meta', 'head']):
+ tag.decompose()
+
+ for tag in soup.find_all(True):
+ if tag.name == 'img':
+ tag.attrs = {key: tag.attrs[key] for key in ['src', 'alt', 'class', 'id', 'title'] if key in tag.attrs}
+ elif tag.name == 'a':
+ tag.attrs = {key: tag.attrs[key] for key in ['href', 'title', 'target', 'class', 'id'] if key in tag.attrs}
+ elif tag.name == 'source':
+ tag.attrs = {key: tag.attrs[key] for key in ['media', 'srcset', 'type'] if key in tag.attrs}
+ elif tag.name == 'picture':
+ tag.attrs = {key: tag.attrs[key] for key in ['id', 'class'] if key in tag.attrs}
+ else:
+ allowed_common_attrs = ['id', 'class']
+ tag.attrs = {key: tag.attrs[key] for key in allowed_common_attrs if key in tag.attrs}
+
+ simplified_html = str(soup)
+ simplified_html_clean = re.sub(r'\s+', ' ', simplified_html).strip()
+
+ return simplified_html_clean
\ No newline at end of file
diff --git a/app/helpers/image_compression_helper.py b/app/helpers/image_compression_helper.py
new file mode 100644
index 0000000..922706b
--- /dev/null
+++ b/app/helpers/image_compression_helper.py
@@ -0,0 +1,72 @@
+import io
+import base64
+from PIL import Image
+
+
+def compress_image_to_target(original_image_bytes: bytes, target_kb: int = 120) -> str:
+ img = Image.open(io.BytesIO(original_image_bytes))
+
+ if img.mode in ("RGBA", "P"):
+ img_converted = img.convert("RGBA")
+ else:
+ img_converted = img.convert("RGB")
+
+ target_bytes = target_kb * 1024
+
+ output_buffer = io.BytesIO()
+ img_converted.save(output_buffer, format='WEBP', quality=80)
+ webp_size = len(output_buffer.getvalue())
+
+ if webp_size <= target_bytes:
+ return base64.b64encode(output_buffer.getvalue()).decode('utf-8')
+
+ quality = _calculate_initial_quality(webp_size, target_bytes)
+
+ for attempt in range(2):
+ output_buffer = io.BytesIO()
+ img_converted.save(output_buffer, format='WEBP', quality=quality)
+ compressed_size = len(output_buffer.getvalue())
+
+ if compressed_size <= target_bytes:
+ return base64.b64encode(output_buffer.getvalue()).decode('utf-8')
+
+ quality = max(40, quality - 10)
+
+ if compressed_size > target_bytes and max(img_converted.size) > 1024:
+ img_resized = _resize_image(img_converted, target_bytes, compressed_size)
+ output_buffer = io.BytesIO()
+ img_resized.save(output_buffer, format='WEBP', quality=70)
+ return base64.b64encode(output_buffer.getvalue()).decode('utf-8')
+
+ return base64.b64encode(output_buffer.getvalue()).decode('utf-8')
+
+
+def _calculate_initial_quality(current_size: int, target_size: int) -> int:
+ ratio = target_size / current_size
+
+ if ratio >= 0.8:
+ return 75
+ elif ratio >= 0.5:
+ return 65
+ elif ratio >= 0.3:
+ return 55
+ else:
+ return 45
+
+
+def _resize_image(img: Image, target_bytes: int, current_bytes: int) -> Image:
+ ratio = (target_bytes / current_bytes) ** 0.5
+ new_width = int(img.width * ratio)
+ new_height = int(img.height * ratio)
+
+ max_dimension = 1920
+ if new_width > max_dimension or new_height > max_dimension:
+ if new_width > new_height:
+ new_height = int(new_height * max_dimension / new_width)
+ new_width = max_dimension
+ else:
+ new_width = int(new_width * max_dimension / new_height)
+ new_height = max_dimension
+
+ return img.resize((new_width, new_height), Image.Resampling.LANCZOS)
+
diff --git a/app/managers/conversation_manager.py b/app/managers/conversation_manager.py
index b7e4ab8..46d0df4 100644
--- a/app/managers/conversation_manager.py
+++ b/app/managers/conversation_manager.py
@@ -1,4 +1,5 @@
-from typing import Dict, Any, List
+from typing import Dict, Any, List, Tuple
+from collections import defaultdict
from app.managers.conversation_manager_interface import ConversationManagerInterface
from app.processors.agent_processor import AgentProcessor
from app.processors.simple_processor import SimpleProcessor
@@ -6,16 +7,21 @@
from app.externals.agent_config.responses.agent_config_response import AgentConfigResponse
from app.factories.ai_provider_factory import AIProviderFactory
from app.tools.tool_generator import ToolGenerator
+from app.processors.mcp_processor import MCPProcessor
class ConversationManager(ConversationManagerInterface):
- # TODO HISTORY
- def get_conversation_history(self, conversation_id: str) -> List[str]:
- return []
+ def __init__(self):
+ self.history_store: Dict[str, List[Dict[str, Any]]] = defaultdict(list)
+ self.max_history_length: int = 10
+
+ def get_conversation_history(self, conversation_id: str) -> List[Dict[str, Any]]:
+ if conversation_id:
+ return self.history_store[conversation_id]
+ return []
async def process_conversation(self, request: MessageRequest, agent_config: AgentConfigResponse) -> dict[str, Any]:
ai_provider = AIProviderFactory.get_provider(agent_config.provider_ai)
-
llm = ai_provider.get_llm(
model=agent_config.model_ai,
temperature=agent_config.preferences.temperature,
@@ -23,13 +29,60 @@ async def process_conversation(self, request: MessageRequest, agent_config: Agen
top_p=agent_config.preferences.top_p
)
- history = self.get_conversation_history(request.conversation_id) or []
- tools = ToolGenerator.generate_tools(agent_config.tools)
+ history = self.get_conversation_history(request.conversation_id)
+ is_simple = False
+
+ if agent_config.mcp_config:
+ processor = MCPProcessor(llm, agent_config.prompt, history, agent_config.mcp_config)
+ else:
+ tools = ToolGenerator.generate_tools(agent_config.tools or [])
+ if tools:
+ processor = AgentProcessor(llm, agent_config.prompt, history, tools)
+ else:
+ processor = SimpleProcessor(llm, agent_config.prompt, history)
+ is_simple = True
+
+ try:
+ response_data = await processor.process(request, request.files, ai_provider.supports_interleaved_files())
+ except Exception as e:
+ if is_simple:
+ response_data = await self._fallback_with_anthropic(request, agent_config, history)
+ else:
+ raise e
+
+ if request.conversation_id:
+ ai_response_content = response_data.get("text")
+ if ai_response_content is None:
+ ai_response_content = str(response_data)
+
+ self._update_conversation_history(
+ conversation_id=request.conversation_id,
+ user_message_content=request.query,
+ ai_response_content=ai_response_content
+ )
+
+ return response_data
+
+ def _update_conversation_history(self, conversation_id: str, user_message_content: str, ai_response_content: str) -> None:
+ if not conversation_id:
+ return
- processor = (
- AgentProcessor(llm, agent_config.prompt, history, tools)
- if tools
- else SimpleProcessor(llm, agent_config.prompt, history)
+ self.history_store[conversation_id].append({"role": "user", "content": user_message_content})
+ self.history_store[conversation_id].append({"role": "assistant", "content": ai_response_content})
+
+ current_conv_history = self.history_store[conversation_id]
+ if len(current_conv_history) > self.max_history_length:
+ self.history_store[conversation_id] = current_conv_history[-self.max_history_length:]
+
+ async def _fallback_with_anthropic(self, request: MessageRequest, agent_config: AgentConfigResponse, history: list) -> dict[str, Any]:
+ anthropic_provider = AIProviderFactory.get_provider("claude")
+ anthropic_llm = anthropic_provider.get_llm(
+ model="claude-3-7-sonnet-20250219",
+ temperature=agent_config.preferences.temperature,
+ max_tokens=agent_config.preferences.max_tokens,
+ top_p=agent_config.preferences.top_p
)
- return await processor.process(request.query)
+ processor = SimpleProcessor(anthropic_llm, agent_config.prompt, history)
+
+ return await processor.process(request, request.files, anthropic_provider.supports_interleaved_files())
diff --git a/app/middlewares/auth_middleware.py b/app/middlewares/auth_middleware.py
new file mode 100644
index 0000000..a7bfc7b
--- /dev/null
+++ b/app/middlewares/auth_middleware.py
@@ -0,0 +1,80 @@
+from functools import wraps
+from fastapi import HTTPException, Header, Request
+from typing import Optional
+import httpx
+
+from app.configurations.config import AUTH_SERVICE_URL, API_KEY
+
+
+async def verify_api_key(api_key: Optional[str]) -> bool:
+ if not api_key:
+ raise HTTPException(
+ status_code=401,
+ detail="API Key not provided"
+ )
+
+ if api_key != API_KEY:
+ raise HTTPException(
+ status_code=401,
+ detail="Invalid API Key"
+ )
+
+ return True
+
+
+def require_api_key(func):
+ @wraps(func)
+ async def wrapper(request: Request, *args, **kwargs):
+ if request is None:
+ raise HTTPException(
+ status_code=500,
+ detail="Request not found"
+ )
+ await verify_api_key(request.headers.get("x-api-key"))
+ return await func(request, *args, **kwargs)
+
+ return wrapper
+
+
+async def verify_user_token(authorization: Optional[str]) -> dict:
+ if not authorization:
+ raise HTTPException(
+ status_code=401,
+ detail="Authorization token not provided"
+ )
+
+ try:
+ async with httpx.AsyncClient() as client:
+ response = await client.get(
+ AUTH_SERVICE_URL,
+ headers={"Authorization": authorization},
+ timeout=3.0
+ )
+
+ if response.status_code != 200:
+ raise HTTPException(
+ status_code=401,
+ detail="Invalid token"
+ )
+
+ return response.json()
+ except httpx.RequestError:
+ raise HTTPException(
+ status_code=500,
+ detail="Error verifying token"
+ )
+
+
+def require_auth(func):
+ @wraps(func)
+ async def wrapper(request: Request, *args, **kwargs):
+ if request is None:
+ raise HTTPException(
+ status_code=500,
+ detail="Request not found"
+ )
+ user_info = await verify_user_token(request.headers.get("authorization"))
+ request.state.user_info = user_info
+ return await func(request ,*args, **kwargs)
+
+ return wrapper
diff --git a/app/pdf/helpers.py b/app/pdf/helpers.py
new file mode 100644
index 0000000..eeffe3c
--- /dev/null
+++ b/app/pdf/helpers.py
@@ -0,0 +1,20 @@
+def clean_text(text):
+ text = text.replace("\u2019", "'")
+ text = text.replace("\u2018", "'")
+ text = text.replace("\u201C", '"')
+ text = text.replace("\u201D", '"')
+ text = text.replace("\u2014", "-")
+ text = text.replace("\u2013", "-")
+ text = text.replace("\u2026", "...")
+ return text
+
+
+def clean_json(text):
+ text = text.strip()
+ if text.startswith("```json"):
+ text = text[len("```json"):].strip()
+ elif text.startswith("```"):
+ text = text[len("```"):].strip()
+ if text.endswith("```"):
+ text = text[:-len("```")].strip()
+ return text
diff --git a/app/pdf/pdf_generator.py b/app/pdf/pdf_generator.py
new file mode 100644
index 0000000..ee005fe
--- /dev/null
+++ b/app/pdf/pdf_generator.py
@@ -0,0 +1,355 @@
+from fpdf import FPDF
+import requests
+import io
+import os
+from typing import Optional, Tuple
+try:
+ import PIL.Image as PILImage
+ PILLOW_AVAILABLE = True
+except ImportError:
+ PILImage = None
+ PILLOW_AVAILABLE = False
+
+# Constantes de diseño
+class PDFConstants:
+ # Colores
+ HEADER_COLOR = (0, 0, 0) # Negro para el header (título y línea)
+ SECTION_BG_COLOR = (64, 64, 64) # Gris oscuro más suave para el fondo del título de la sección
+ SECTION_BORDER_COLOR = (255, 140, 0) # Naranja/dorado para el borde
+ WHITE_COLOR = (255, 255, 255)
+ BLACK_COLOR = (0, 0, 0)
+ GRAY_COLOR = (128, 128, 128)
+ LIGHT_GRAY_COLOR = (200, 200, 200)
+
+ # Tamaños de fuente
+ HEADER_FONT_SIZE = 16
+ COVER_TITLE_FONT_SIZE = 28
+ SECTION_TITLE_FONT_SIZE = 14
+ CONTENT_FONT_SIZE = 12
+ FOOTER_FONT_SIZE = 10
+
+ # Márgenes y espaciado
+ PAGE_MARGIN = 15
+ HEADER_MARGIN = 10
+ OVERLAY_HEIGHT = 80
+ LINE_WIDTH_THIN = 0.3
+ LINE_WIDTH_MEDIUM = 0.5
+ LINE_WIDTH_THICK = 0.7
+
+ # Otros
+ IMAGE_QUALITY = 85
+ TEMP_IMAGE_PATH = "/tmp/temp_cover_image.jpg"
+ REQUEST_TIMEOUT = 10
+
+
+class PDFGenerator(FPDF):
+ def __init__(self, product_name: str):
+ super().__init__()
+ self.product_name = product_name
+ self.custom_title: Optional[str] = None
+ self.header_height = 0
+ self.version = "1.0"
+ self.first_section = True # Para controlar la primera sección
+
+ def header(self) -> None:
+ """Genera el header de cada página (excepto la portada)."""
+ if self.page_no() == 1:
+ return
+
+ initial_y = self.get_y()
+
+ self.set_font("Helvetica", "B", PDFConstants.HEADER_FONT_SIZE)
+ self.set_text_color(*PDFConstants.HEADER_COLOR)
+
+ title = self.custom_title if self.custom_title else f"User Manual for {self.product_name}"
+ clean_title = self._clean_text_for_latin1(title)
+
+ self.set_y(PDFConstants.HEADER_MARGIN)
+ width_available = self.w - (2 * PDFConstants.HEADER_MARGIN)
+ self.x = PDFConstants.HEADER_MARGIN
+
+ self.multi_cell(width_available, 8, clean_title, align="C")
+
+ end_y = self.get_y() + 2
+ self.set_line_width(PDFConstants.LINE_WIDTH_MEDIUM)
+ self.set_draw_color(*PDFConstants.HEADER_COLOR)
+ self.line(PDFConstants.HEADER_MARGIN, end_y, self.w - PDFConstants.HEADER_MARGIN, end_y)
+
+ self.set_y(end_y + PDFConstants.HEADER_MARGIN)
+ self.header_height = self.get_y() - initial_y
+
+ def footer(self) -> None:
+ """Genera el footer de cada página (excepto la portada)."""
+ if self.page_no() == 1:
+ return
+
+ self.set_y(-20)
+ self.set_font("Helvetica", "I", PDFConstants.FOOTER_FONT_SIZE)
+ self.set_text_color(*PDFConstants.GRAY_COLOR)
+ self.cell(0, 10, f"Page {self.page_no()-1}", 0, 0, "C")
+
+ def add_cover_page(self, title: str, subtitle: str = "", image_url: Optional[str] = None) -> None:
+ """
+ Crea la página de portada del PDF.
+
+ Args:
+ title: Título principal de la portada
+ subtitle: Subtítulo opcional
+ image_url: URL de imagen opcional para usar como fondo
+ """
+ self.add_page()
+
+ page_width = self.w
+ page_height = self.h
+
+ if image_url and PILLOW_AVAILABLE:
+ # Solo mostrar la imagen sin texto si hay imagen
+ self._create_image_only_cover(image_url, page_width, page_height)
+ else:
+ # Portada tradicional con texto si no hay imagen
+ title_y_pos, title_color = self._create_cover_background(None, page_width, page_height)
+ self._add_cover_text(title, subtitle, title_y_pos, title_color, page_width, page_height, None)
+
+ self.add_page()
+
+ def _create_cover_background(self, image_url: Optional[str], page_width: float, page_height: float) -> Tuple[float, Tuple[int, int, int]]:
+ """Crea el fondo de la portada (imagen o borde tradicional)."""
+ if image_url and PILLOW_AVAILABLE:
+ image_result = self._download_and_process_image(image_url)
+ if image_result:
+ temp_path, img_width, img_height = image_result
+
+ available_width = page_width - 2 * PDFConstants.PAGE_MARGIN
+ available_height = page_height - 2 * PDFConstants.PAGE_MARGIN
+
+ x_pos, y_pos, final_width, final_height = self._calculate_image_dimensions(
+ img_width, img_height, available_width, available_height
+ )
+
+ self.image(temp_path, x=x_pos, y=y_pos, w=final_width, h=final_height)
+ self._cleanup_temp_image()
+
+ # Crear overlay para el título
+ overlay_y = page_height - PDFConstants.OVERLAY_HEIGHT - PDFConstants.PAGE_MARGIN
+ self.set_fill_color(*PDFConstants.BLACK_COLOR)
+ self.rect(PDFConstants.PAGE_MARGIN, overlay_y,
+ page_width - 2 * PDFConstants.PAGE_MARGIN,
+ PDFConstants.OVERLAY_HEIGHT, 'F')
+
+ return overlay_y + 15, PDFConstants.WHITE_COLOR
+
+ # Portada tradicional con borde
+ self.set_draw_color(*PDFConstants.HEADER_COLOR)
+ self.set_line_width(PDFConstants.LINE_WIDTH_THICK)
+ self.rect(PDFConstants.PAGE_MARGIN, PDFConstants.PAGE_MARGIN,
+ page_width - 2 * PDFConstants.PAGE_MARGIN,
+ page_height - 2 * PDFConstants.PAGE_MARGIN)
+
+ return page_height * 0.4, PDFConstants.HEADER_COLOR
+
+ def _add_cover_text(self, title: str, subtitle: str, title_y_pos: float,
+ title_color: Tuple[int, int, int], page_width: float,
+ page_height: float, image_url: Optional[str]) -> None:
+ """Agrega el texto de la portada."""
+ self.set_font("Helvetica", "B", PDFConstants.COVER_TITLE_FONT_SIZE)
+ self.set_text_color(*title_color)
+
+ text_width = page_width - 2 * PDFConstants.PAGE_MARGIN - 20
+
+ self.set_y(title_y_pos)
+ self.set_x(PDFConstants.PAGE_MARGIN + 10)
+ clean_title = self._clean_text_for_latin1(title)
+ self.multi_cell(text_width, 18, clean_title, align="C")
+
+ # Solo mostrar subtítulo y versión si no hay imagen
+ if not image_url:
+ if subtitle:
+ self.ln(15)
+ self.set_font("Helvetica", "", 18)
+ self.set_text_color(80, 80, 80)
+ self.set_x(PDFConstants.PAGE_MARGIN + 10)
+ clean_subtitle = self._clean_text_for_latin1(subtitle)
+ self.multi_cell(text_width, 12, clean_subtitle, align="C")
+
+ self.set_font("Helvetica", "I", 11)
+ self.set_text_color(100, 100, 100)
+ version_y = page_height - PDFConstants.PAGE_MARGIN - 20
+ self.set_y(version_y)
+ self.set_x(PDFConstants.PAGE_MARGIN + 10)
+ self.multi_cell(text_width, 10, f"Document Version: {self.version}", align="C")
+
+ def set_document_version(self, version: str) -> None:
+ """Establece la versión del documento."""
+ self.version = version
+
+ def set_custom_title(self, title: str) -> None:
+ """Establece el título personalizado que aparecerá en el header de cada página."""
+ self.custom_title = title
+
+ def _download_and_process_image(self, image_url: str) -> Optional[Tuple[str, int, int]]:
+ """
+ Descarga y procesa una imagen desde una URL.
+
+ Returns:
+ Tuple con (ruta_temporal, ancho, alto) o None si falla
+ """
+ try:
+ response = requests.get(image_url, timeout=PDFConstants.REQUEST_TIMEOUT)
+ response.raise_for_status()
+
+ image = PILImage.open(io.BytesIO(response.content))
+
+ if image.mode != 'RGB':
+ image = image.convert('RGB')
+
+ image.save(PDFConstants.TEMP_IMAGE_PATH, "JPEG", quality=PDFConstants.IMAGE_QUALITY)
+
+ return PDFConstants.TEMP_IMAGE_PATH, image.width, image.height
+
+ except Exception as e:
+ print(f"Error al procesar imagen: {e}")
+ return None
+
+ def _calculate_image_dimensions(self, img_width: int, img_height: int,
+ available_width: float, available_height: float) -> Tuple[float, float, float, float]:
+ """
+ Calcula las dimensiones y posición para centrar una imagen manteniendo la proporción.
+
+ Returns:
+ Tuple con (x_pos, y_pos, final_width, final_height)
+ """
+ scale_width = available_width / img_width
+ scale_height = available_height / img_height
+ scale = min(scale_width, scale_height)
+
+ final_width = img_width * scale
+ final_height = img_height * scale
+
+ x_pos = (self.w - final_width) / 2
+ y_pos = (self.h - final_height) / 2
+
+ return x_pos, y_pos, final_width, final_height
+
+ def _cleanup_temp_image(self) -> None:
+ """Elimina el archivo temporal de imagen si existe."""
+ if os.path.exists(PDFConstants.TEMP_IMAGE_PATH):
+ os.remove(PDFConstants.TEMP_IMAGE_PATH)
+
+ def _create_image_only_cover(self, image_url: str, page_width: float, page_height: float) -> None:
+ """Crea una portada que muestra solo la imagen ocupando toda la página."""
+ image_result = self._download_and_process_image(image_url)
+ if image_result:
+ temp_path, img_width, img_height = image_result
+
+ # Calcular la escala para llenar toda la página (puede recortar)
+ scale_width = page_width / img_width
+ scale_height = page_height / img_height
+ # Usar la escala mayor para llenar completamente (crop to fit)
+ scale = max(scale_width, scale_height)
+
+ final_width = img_width * scale
+ final_height = img_height * scale
+
+ # Centrar la imagen (puede quedar parcialmente fuera de los bordes)
+ x_pos = (page_width - final_width) / 2
+ y_pos = (page_height - final_height) / 2
+
+ self.image(temp_path, x=x_pos, y=y_pos, w=final_width, h=final_height)
+ self._cleanup_temp_image()
+
+ def get_multi_cell_height(self, w, h, txt, align="J"):
+ x = self.x
+ y = self.y
+
+ lines = 1
+ width = 0
+ text = txt.split(' ')
+ for word in text:
+ word_width = self.get_string_width(word + ' ')
+ if width + word_width > w:
+ lines += 1
+ width = word_width
+ else:
+ width += word_width
+
+ self.x = x
+ self.y = y
+
+ return lines * h
+
+ def add_section(self, title: str, content: str) -> None:
+ """
+ Agrega una sección al PDF con título en negrita y contenido.
+ Cada sección inicia en una nueva página.
+
+ Args:
+ title: Título de la sección
+ content: Contenido de la sección
+ """
+ # Cada sección inicia en una nueva página (excepto la primera)
+ if not self.first_section:
+ self.add_page()
+ else:
+ self.first_section = False
+
+ # Crear el título con fondo gris y texto blanco (sin borde naranja)
+ self.set_font("Helvetica", "B", PDFConstants.SECTION_TITLE_FONT_SIZE)
+ self.set_text_color(*PDFConstants.WHITE_COLOR) # Texto blanco
+ self.set_fill_color(*PDFConstants.SECTION_BG_COLOR) # Fondo gris
+
+ # Crear el título con fondo gris completo
+ clean_title = self._clean_text_for_latin1(title)
+ self.cell(0, 12, clean_title, ln=True, fill=True, align="C", border=0)
+ self.ln(6)
+
+ # Contenido de la sección
+ self.set_text_color(*PDFConstants.BLACK_COLOR)
+ self.set_font("Helvetica", "", PDFConstants.CONTENT_FONT_SIZE)
+
+ formatted_text = self._format_content(content)
+ self.multi_cell(0, 8, formatted_text)
+
+ # Separador entre secciones
+ self.ln(8)
+ self.set_draw_color(*PDFConstants.LIGHT_GRAY_COLOR)
+ self.set_line_width(PDFConstants.LINE_WIDTH_THIN)
+ current_y = self.get_y()
+ self.line(PDFConstants.HEADER_MARGIN, current_y, self.w - PDFConstants.HEADER_MARGIN, current_y)
+ self.ln(10)
+
+ def _format_content(self, content) -> str:
+ """Formatea el contenido de una sección."""
+ if isinstance(content, list):
+ text = "\n".join(str(item) for item in content)
+ else:
+ text = content.replace("\\n", "\n")
+
+ # Limpiar caracteres que no son compatibles con latin-1
+ return self._clean_text_for_latin1(text)
+
+ def _clean_text_for_latin1(self, text: str) -> str:
+ """Limpia el texto para que sea compatible con latin-1."""
+ # Reemplazos de caracteres especiales comunes
+ replacements = {
+ '\u2022': '•', # Bullet point
+ '\u2013': '-', # En dash
+ '\u2014': '-', # Em dash
+ '\u2018': "'", # Left single quotation mark
+ '\u2019': "'", # Right single quotation mark
+ '\u201c': '"', # Left double quotation mark
+ '\u201d': '"', # Right double quotation mark
+ '\u2026': '...', # Horizontal ellipsis
+ '\u00a0': ' ', # Non-breaking space
+ }
+
+ # Aplicar reemplazos
+ for unicode_char, replacement in replacements.items():
+ text = text.replace(unicode_char, replacement)
+
+ # Intentar codificar y decodificar para detectar otros problemas
+ try:
+ text.encode('latin-1')
+ return text
+ except UnicodeEncodeError:
+ # Si aún hay problemas, reemplazar caracteres problemáticos
+ return text.encode('latin-1', errors='replace').decode('latin-1')
diff --git a/app/pdf/pdf_manual_generator.py b/app/pdf/pdf_manual_generator.py
new file mode 100644
index 0000000..ccc4095
--- /dev/null
+++ b/app/pdf/pdf_manual_generator.py
@@ -0,0 +1,37 @@
+import base64
+import os
+from app.pdf.pdf_generator import PDFGenerator
+from app.configurations.pdf_manual_config import PDF_MANUAL_SECTION_ORDER, get_sections_for_language
+
+
+class PDFManualGenerator:
+ def __init__(self, product_name: str, language: str = "es"):
+ self.product_name = product_name
+ self.language = language
+ self.sections = get_sections_for_language(language)
+ self.pdf = PDFGenerator(product_name)
+
+ async def create_manual(self, data: dict, title: str = None, image_url: str = None) -> str:
+ # Usar el título personalizado si se proporciona, sino usar el por defecto
+ cover_title = title if title else f"User Manual for {self.product_name}"
+
+ # Establecer el título personalizado para que aparezca en el header de todas las páginas
+ if title:
+ self.pdf.set_custom_title(title)
+
+ self.pdf.add_cover_page(
+ cover_title,
+ "Everything You Need to Know to Get Started",
+ image_url
+ )
+ self.pdf.set_auto_page_break(auto=True, margin=20)
+
+ for key in PDF_MANUAL_SECTION_ORDER:
+ self.pdf.add_section(self.sections[key], data.get(key, ""))
+
+ pdf_str = self.pdf.output(dest="S")
+ pdf_bytes = pdf_str.encode("latin1")
+
+ base64_str = base64.b64encode(pdf_bytes).decode("utf-8")
+
+ return base64_str
\ No newline at end of file
diff --git a/app/processors/agent_processor.py b/app/processors/agent_processor.py
index 217f065..687c321 100644
--- a/app/processors/agent_processor.py
+++ b/app/processors/agent_processor.py
@@ -1,17 +1,20 @@
-from typing import Dict, Any, List
+from typing import Dict, Any, List, Optional
from langchain.agents import AgentExecutor, create_tool_calling_agent
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from app.processors.conversation_processor import ConversationProcessor
from langchain_core.language_models import BaseChatModel
import traceback
+from app.requests.message_request import MessageRequest
+
class AgentProcessor(ConversationProcessor):
def __init__(self, llm: BaseChatModel, context: str, history: List[str], tools: List[Any]):
super().__init__(llm, context, history)
self.tools = tools
- async def process(self, query: str) -> Dict[str, Any]:
+ async def process(self, request: MessageRequest, files: Optional[List[Dict[str, str]]] = None,
+ supports_interleaved_files: bool = False) -> Dict[str, Any]:
prompt_template = ChatPromptTemplate.from_messages([
("system", "{context}"),
MessagesPlaceholder(variable_name="chat_history"),
@@ -35,12 +38,22 @@ async def process(self, query: str) -> Dict[str, Any]:
)
try:
+ config = self._get_langsmith_config(
+ request,
+ "agent_processor",
+ has_tools=len(self.tools) > 0
+ )
+
result = await agent_executor.ainvoke({
"context": self.context or "",
"chat_history": self.history,
- "input": query,
+ "input": request.query,
"agent_scratchpad": ""
- })
+ }, config=config)
+
+ if "text" not in result and "output" in result:
+ result["text"] = result["output"]
+
return result
except Exception as e:
print(f"Error durante la ejecución del agente: {str(e)}")
diff --git a/app/processors/conversation_processor.py b/app/processors/conversation_processor.py
index 1ad75eb..6df29c5 100644
--- a/app/processors/conversation_processor.py
+++ b/app/processors/conversation_processor.py
@@ -1,4 +1,4 @@
-from typing import Dict, Any, List
+from typing import Dict, Any, List, Optional
from langchain_core.language_models import BaseChatModel
@@ -8,5 +8,16 @@ def __init__(self, llm: BaseChatModel, context: str, history: List[str]):
self.context = context
self.history = history
- async def process(self, query: str) -> Dict[str, Any]:
- raise NotImplementedError
\ No newline at end of file
+ def _get_langsmith_config(self, request, processor_type: str, **extra_metadata) -> Dict[str, Any]:
+ config = {
+ "tags": [processor_type, f"agent_{request.agent_id}"],
+ "metadata": {
+ "agent_id": request.agent_id,
+ "conversation_id": request.conversation_id,
+ **extra_metadata
+ }
+ }
+ return config
+
+ async def process(self, query: str, files: Optional[List[Dict[str, str]]], supports_interleaved_files: bool) -> Dict[str, Any]:
+ raise NotImplementedError
diff --git a/app/processors/mcp_processor.py b/app/processors/mcp_processor.py
new file mode 100644
index 0000000..d4343f5
--- /dev/null
+++ b/app/processors/mcp_processor.py
@@ -0,0 +1,91 @@
+from typing import Dict, Any, List, Optional
+from app.processors.conversation_processor import ConversationProcessor
+from app.requests.message_request import MessageRequest
+from langchain_core.language_models import BaseChatModel
+from langchain_mcp_adapters.client import MultiServerMCPClient
+from langgraph.prebuilt import create_react_agent
+import json
+import re
+
+
+class MCPProcessor(ConversationProcessor):
+ def __init__(self, llm: BaseChatModel, context: str, history: List[str], mcp_config: Dict[str, Any]):
+ super().__init__(llm, context, history)
+ self.mcp_config = mcp_config
+
+ async def process(self, request: MessageRequest, files: Optional[List[Dict[str, str]]] = None,
+ supports_interleaved_files: bool = False) -> Dict[str, Any]:
+ async with MultiServerMCPClient(self.mcp_config) as client:
+ agent = create_react_agent(self.llm, client.get_tools())
+
+ system_message = self.context or ""
+ if request.json_parser:
+ format_instructions = json.dumps(request.json_parser, indent=2)
+ system_message += (
+ "\n\nIMPORTANT: Respond exclusively in JSON format following exactly this structure:\n\n"
+ f"{format_instructions}\n\n"
+ "Do NOT include markdown, explanations, or anything else besides the JSON."
+ )
+
+ messages = []
+ if system_message:
+ messages.append({"role": "system", "content": system_message})
+
+ if self.history:
+ messages.extend(self.history)
+
+ messages.append({"role": "user", "content": request.query})
+
+ config = self._get_langsmith_config(
+ request,
+ "mcp_processor",
+ mcp_servers=list(self.mcp_config.keys()) if isinstance(self.mcp_config, dict) else []
+ )
+
+ response = await agent.ainvoke({"messages": messages}, config=config)
+
+ content = ""
+ if "messages" in response and response["messages"]:
+ last_message = response["messages"][-1]
+ if hasattr(last_message, "content"):
+ content = last_message.content
+ elif isinstance(last_message, dict) and "content" in last_message:
+ content = last_message["content"]
+ else:
+ content = str(last_message)
+ else:
+ content = str(response)
+
+ match = re.search(r'```json\n(.*?)\n```', content, re.DOTALL)
+ result = match.group(1) if match else content
+
+ tool_info = await self.get_tool_data(response)
+
+ return {
+ "context": self.context,
+ "chat_history": self.history,
+ "input": request.query,
+ "text": result,
+ "tool_result": tool_info
+ }
+
+ async def get_tool_data(self, response):
+ tool_messages = [
+ msg for msg in response.get('messages', [])
+ if getattr(msg, 'type', None) == 'tool'
+ ]
+ tool_info = None
+ if tool_messages:
+ last_tool = tool_messages[-1]
+ name = last_tool.name
+ tool_result = last_tool.content
+ try:
+ tool_result_json = json.loads(tool_result)
+ except json.JSONDecodeError:
+ tool_result_json = tool_result
+
+ tool_info = {
+ "name": name,
+ "message": tool_result_json
+ }
+ return tool_info
diff --git a/app/processors/simple_processor.py b/app/processors/simple_processor.py
index 6e39123..117a032 100644
--- a/app/processors/simple_processor.py
+++ b/app/processors/simple_processor.py
@@ -1,25 +1,87 @@
-from typing import Dict, Any
-from langchain.chains import LLMChain
+import json
+from typing import Dict, Any, Optional, List
+from langchain_core.messages import SystemMessage, HumanMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
+
from app.processors.conversation_processor import ConversationProcessor
+from app.requests.message_request import MessageRequest
+import re
class SimpleProcessor(ConversationProcessor):
- async def process(self, query: str) -> Dict[str, Any]:
- prompt = ChatPromptTemplate.from_messages([
- ("system", "{context}"),
- MessagesPlaceholder(variable_name="chat_history"),
- ("human", "{input}")
- ])
-
- chain = LLMChain(
- llm=self.llm,
- prompt=prompt,
- verbose=False
+ async def generate_response(self, context: str, chat_history: list, query: str, prompt: ChatPromptTemplate,
+ config: dict = None) -> Dict[str, Any]:
+ chain = (
+ {
+ "context": lambda x: x["context"],
+ "chat_history": lambda x: x["chat_history"],
+ "input": lambda x: x["input"],
+ }
+ | prompt
+ | self.llm
)
- return await chain.ainvoke({
- "context": self.context or "",
- "chat_history": self.history,
+ raw_response = await chain.ainvoke({
+ "context": context,
+ "chat_history": chat_history,
"input": query
- })
\ No newline at end of file
+ }, config=config)
+
+ content = raw_response.content
+
+ match = re.search(r'```json\n(.*?)\n```', content, re.DOTALL)
+ if match:
+ json_content = match.group(1)
+ response_content = json_content
+ else:
+ response_content = content
+
+ return {
+ "context": context,
+ "chat_history": chat_history,
+ "input": query,
+ "text": response_content
+ }
+
+ async def process(self, request: MessageRequest, files: Optional[List[Dict[str, str]]] = None,
+ supports_interleaved_files: bool = False) -> Dict[str, Any]:
+ messages = []
+ system_message = self.context or ""
+
+ if files and not supports_interleaved_files:
+ file_references = []
+ for file in files:
+ tag = 'image' if file.get('type') == 'image' else 'file'
+ file_references.append(f"<{tag} url='{file['url']}'>{tag}>")
+
+ system_message += "\n\n" + "\n".join(file_references)
+
+ if request.json_parser:
+ format_instructions = json.dumps(request.json_parser, indent=2)
+ system_message += (
+ "\n\nIMPORTANT: Respond exclusively in JSON format following exactly this structure:\n\n"
+ f"{format_instructions}\n\n"
+ "Do NOT include markdown, explanations, or anything else besides the JSON."
+ )
+
+ if files and supports_interleaved_files:
+ interleaved_references = []
+ for file in files:
+ tag = 'image' if file.get('type') == 'image' else 'file'
+ interleaved_references.append(f"<{tag} url='{file['url']}'>{tag}>")
+ system_message += "\n\n" + "\n".join(interleaved_references)
+
+ messages.append(SystemMessage(content=system_message))
+ messages.append(MessagesPlaceholder(variable_name="chat_history"))
+ messages.append(HumanMessage(content=request.query))
+
+ prompt = ChatPromptTemplate.from_messages(messages)
+
+ config = self._get_langsmith_config(
+ request,
+ "simple_processor",
+ has_json_parser=request.json_parser is not None,
+ has_files=files is not None and len(files) > 0
+ )
+
+ return await self.generate_response(self.context, self.history, request.query, prompt, config)
diff --git a/app/providers/ai_provider_interface.py b/app/providers/ai_provider_interface.py
index b3028f8..f61d99c 100644
--- a/app/providers/ai_provider_interface.py
+++ b/app/providers/ai_provider_interface.py
@@ -15,3 +15,7 @@ def get_llm(self, model: str, temperature: float, max_tokens: int, top_p: float)
Retorna el modelo de lenguaje configurado
"""
pass
+
+ @abstractmethod
+ def supports_interleaved_files(self) -> bool:
+ pass
diff --git a/app/providers/anthropic_provider.py b/app/providers/anthropic_provider.py
index a3a11d2..b91a7f9 100644
--- a/app/providers/anthropic_provider.py
+++ b/app/providers/anthropic_provider.py
@@ -6,7 +6,10 @@ class AnthropicProvider(AIProviderInterface):
def get_llm(self, model: str, temperature: float, max_tokens: int, top_p: int) -> ChatAnthropic:
return ChatAnthropic(
model=model,
- #temperature=temperature,
- #max_tokens=max_tokens,
- #top_p=top_p
+ temperature=temperature,
+ max_tokens=max_tokens,
+ top_p=top_p
)
+
+ def supports_interleaved_files(self) -> bool:
+ return True
\ No newline at end of file
diff --git a/app/providers/deepseek_provider.py b/app/providers/deepseek_provider.py
index 8d100f2..19cde42 100644
--- a/app/providers/deepseek_provider.py
+++ b/app/providers/deepseek_provider.py
@@ -14,5 +14,8 @@ def get_llm(self, model: str, temperature: float, max_tokens: int, top_p: float)
return Ollama(
model=model,
base_url=DEEP_SEEK_HOST
- ##**model_kwargs
+ ** model_kwargs
)
+
+ def supports_interleaved_files(self) -> bool:
+ return False
diff --git a/app/providers/gemini_provider.py b/app/providers/gemini_provider.py
new file mode 100644
index 0000000..1adaf7b
--- /dev/null
+++ b/app/providers/gemini_provider.py
@@ -0,0 +1,18 @@
+import os
+
+from langchain_google_genai import ChatGoogleGenerativeAI
+from app.providers.ai_provider_interface import AIProviderInterface
+
+
+class GeminiProvider(AIProviderInterface):
+ def get_llm(self, model: str, temperature: float, max_tokens: int, top_p: int) -> ChatGoogleGenerativeAI:
+ return ChatGoogleGenerativeAI(
+ model=model,
+ temperature=temperature,
+ max_output_tokens=max_tokens,
+ top_p=top_p,
+ google_api_key=os.getenv("GOOGLE_GEMINI_API_KEY")
+ )
+
+ def supports_interleaved_files(self) -> bool:
+ return True
diff --git a/app/providers/openai_provider.py b/app/providers/openai_provider.py
index 1e2e2f5..7dd23a4 100644
--- a/app/providers/openai_provider.py
+++ b/app/providers/openai_provider.py
@@ -12,5 +12,8 @@ def get_llm(self, model: str, temperature: float, max_tokens: int, top_p: float)
return ChatOpenAI(
model=model,
- ##**model_kwargs
+ **model_kwargs
)
+
+ def supports_interleaved_files(self) -> bool:
+ return True
diff --git a/app/requests/brand_context_resolver_request.py b/app/requests/brand_context_resolver_request.py
new file mode 100644
index 0000000..b4e37ee
--- /dev/null
+++ b/app/requests/brand_context_resolver_request.py
@@ -0,0 +1,11 @@
+from pydantic import BaseModel
+from typing import List
+
+
+class BrandContextResolverRequest(BaseModel):
+ websites_info: List
+
+ @property
+ def prompt(self) -> dict:
+ websites_info_str = ", ".join(str(item) for item in self.websites_info)
+ return {"websites_info": websites_info_str}
diff --git a/app/requests/copy_request.py b/app/requests/copy_request.py
new file mode 100644
index 0000000..fa0e0b5
--- /dev/null
+++ b/app/requests/copy_request.py
@@ -0,0 +1,5 @@
+from pydantic import BaseModel, Field, validator
+
+
+class CopyRequest(BaseModel):
+ prompt: str
diff --git a/app/requests/direct_scrape_request.py b/app/requests/direct_scrape_request.py
new file mode 100644
index 0000000..1d3f2bd
--- /dev/null
+++ b/app/requests/direct_scrape_request.py
@@ -0,0 +1,5 @@
+from pydantic import BaseModel, Field, validator
+
+
+class DirectScrapeRequest(BaseModel):
+ html: str
diff --git a/app/requests/generate_audio_request.py b/app/requests/generate_audio_request.py
new file mode 100644
index 0000000..0621741
--- /dev/null
+++ b/app/requests/generate_audio_request.py
@@ -0,0 +1,7 @@
+from pydantic import BaseModel
+from typing import Optional, Dict, Any
+
+
+class GenerateAudioRequest(BaseModel):
+ text: str
+ content: Optional[Dict[str, Any]] = None
\ No newline at end of file
diff --git a/app/requests/generate_image_request.py b/app/requests/generate_image_request.py
new file mode 100644
index 0000000..f85a855
--- /dev/null
+++ b/app/requests/generate_image_request.py
@@ -0,0 +1,17 @@
+from pydantic import BaseModel
+from typing import Optional, Dict, Any
+
+
+class GenerateImageRequest(BaseModel):
+ file: Optional[str] = None
+ file_url: Optional[str] = None
+ file_urls: Optional[list[str]] = None
+ owner_id: Optional[str] = None
+ prompt: Optional[str] = None
+ agent_id: Optional[str] = None
+ provider: Optional[str] = None
+ model_ai: Optional[str] = None
+ num_variations: int = 4
+ parameter_prompt: Optional[Dict[str, Any]] = None
+ extra_parameters: Optional[Dict[str, Any]] = None
+ language: Optional[str] = "es"
\ No newline at end of file
diff --git a/app/requests/generate_pdf_request.py b/app/requests/generate_pdf_request.py
new file mode 100644
index 0000000..53a7a74
--- /dev/null
+++ b/app/requests/generate_pdf_request.py
@@ -0,0 +1,12 @@
+from pydantic import BaseModel
+
+
+class GeneratePdfRequest(BaseModel):
+ product_id: str
+ product_name: str
+ product_description: str
+ language: str
+ owner_id: str
+ image_url: str
+ title: str
+ content: str
diff --git a/app/requests/generate_video_request.py b/app/requests/generate_video_request.py
new file mode 100644
index 0000000..ce7dbc3
--- /dev/null
+++ b/app/requests/generate_video_request.py
@@ -0,0 +1,11 @@
+from enum import Enum
+from typing import Dict, Any, Optional
+from pydantic import BaseModel
+
+class VideoType(str, Enum):
+ human_scene = "human_scene"
+ animated_scene = "animated_scene"
+
+class GenerateVideoRequest(BaseModel):
+ type: VideoType
+ content: Optional[Dict[str, Any]] = None
diff --git a/app/requests/message_request.py b/app/requests/message_request.py
index 370d434..1e23bfa 100644
--- a/app/requests/message_request.py
+++ b/app/requests/message_request.py
@@ -1,10 +1,18 @@
-from pydantic import BaseModel
-from typing import Optional, List, Dict
+from typing import List, Dict, Any, Optional
+from pydantic import BaseModel, Field
+
+
+class MetadataFilter(BaseModel):
+ key: str
+ value: str
+ evaluator: str = "="
class MessageRequest(BaseModel):
- agent_id: Optional[str]
- query: str
+ agent_id: str
conversation_id: str
- metadata_filter: Optional[dict] = None
- parameter_prompt: Optional[dict] = None
+ query: str
+ metadata_filter: Optional[List[MetadataFilter]] = Field(default_factory=list)
+ parameter_prompt: Optional[Dict[str, Any]] = Field(default_factory=dict)
+ files: Optional[List[Dict[str, str]]] = Field(default_factory=list)
+ json_parser: Optional[Dict[str, Any]] = None
diff --git a/app/requests/product_scraping_request.py b/app/requests/product_scraping_request.py
new file mode 100644
index 0000000..7ef4bab
--- /dev/null
+++ b/app/requests/product_scraping_request.py
@@ -0,0 +1,7 @@
+from pydantic import BaseModel, HttpUrl
+from typing import Optional
+
+
+class ProductScrapingRequest(BaseModel):
+ product_url: HttpUrl
+ country: Optional[str] = "co"
diff --git a/app/requests/recommend_product_request.py b/app/requests/recommend_product_request.py
index 5b3ec3d..9712013 100644
--- a/app/requests/recommend_product_request.py
+++ b/app/requests/recommend_product_request.py
@@ -4,4 +4,5 @@
class RecommendProductRequest(BaseModel):
product_name: str
- product_description: str
\ No newline at end of file
+ product_description: str
+ similar: Optional[bool] = False
\ No newline at end of file
diff --git a/app/requests/resolve_funnel_request.py b/app/requests/resolve_funnel_request.py
new file mode 100644
index 0000000..40a9d24
--- /dev/null
+++ b/app/requests/resolve_funnel_request.py
@@ -0,0 +1,8 @@
+from pydantic import BaseModel
+from typing import Optional
+
+
+class ResolveFunnelRequest(BaseModel):
+ product_name: str
+ product_description: str
+ language: Optional[str] = "es"
\ No newline at end of file
diff --git a/app/requests/variation_image_request.py b/app/requests/variation_image_request.py
new file mode 100644
index 0000000..a89edc2
--- /dev/null
+++ b/app/requests/variation_image_request.py
@@ -0,0 +1,14 @@
+from pydantic import BaseModel, Field, validator
+from typing import Optional
+
+
+class VariationImageRequest(BaseModel):
+ file: str
+ num_variations: int = Field(default=3, ge=1, le=10)
+ language: Optional[str] = "es"
+
+ @validator('num_variations')
+ def validate_variations(cls, v):
+ if v > 10:
+ raise ValueError("El número máximo de variaciones permitidas es 10")
+ return v
diff --git a/app/responses/generate_image_response.py b/app/responses/generate_image_response.py
new file mode 100644
index 0000000..4a9a644
--- /dev/null
+++ b/app/responses/generate_image_response.py
@@ -0,0 +1,13 @@
+from typing import List, Optional
+
+from pydantic import BaseModel
+
+from app.externals.google_vision.responses.vision_analysis_response import VisionAnalysisResponse
+
+
+class GenerateImageResponse(BaseModel):
+ original_url: Optional[str]
+ original_urls: Optional[list[str]]
+ generated_urls: List[str]
+ generated_prompt: str
+ vision_analysis: Optional[VisionAnalysisResponse] = None
diff --git a/app/scrapers/aliexpress_scraper.py b/app/scrapers/aliexpress_scraper.py
new file mode 100644
index 0000000..b23b78f
--- /dev/null
+++ b/app/scrapers/aliexpress_scraper.py
@@ -0,0 +1,273 @@
+from app.scrapers.scraper_interface import ScraperInterface
+from typing import Dict, Any, List, Optional, Tuple
+from app.externals.aliexpress.aliexpress_client import get_item_detail
+import re
+from fastapi import HTTPException
+from decimal import Decimal, InvalidOperation
+from typing import Dict, Any
+
+class AliexpressScraper(ScraperInterface):
+ async def scrape_direct(self, html: str) -> Dict[str, Any]:
+ return {}
+
+ async def scrape(self, url: str, domain: str = None) -> Dict[str, Any]:
+ item_id = self._extract_item_id(url)
+ product_details = await get_item_detail(item_id)
+
+ try:
+ item_data = self._get_item_data(product_details)
+
+ result = {
+ "name": self._get_name(item_data),
+ "description": self._get_description(item_data),
+ "external_sell_price": self._get_price(item_data),
+ "images": self._get_images(item_data)
+ }
+
+ """
+ variants = self._extract_variants(item_data)
+ if variants:
+ result["variants"] = variants
+ """
+
+ response = {
+ "provider_id": "aliexpress",
+ "external_id": item_id,
+ **result
+ }
+
+ return {"data": response}
+
+ except Exception as e:
+ raise HTTPException(
+ status_code=400,
+ detail=f"Error procesando datos del producto: {str(e)}"
+ )
+
+ def _extract_item_id(self, url: str) -> str:
+ pattern = r'item/(\d+)\.html'
+ match = re.search(pattern, url)
+ if match:
+ return match.group(1)
+
+ pattern = r'itemId=(\d+)'
+ match = re.search(pattern, url)
+ if match:
+ return match.group(1)
+
+ raise HTTPException(status_code=400, detail=f"No se pudo extraer el ID del producto de la URL: {url}")
+
+ def _get_item_data(self, response: Dict[str, Any]) -> Dict[str, Any]:
+ result = response.get("result", {})
+ item_data = result.get("item", {})
+ if not item_data:
+ raise ValueError("No se encontraron datos del producto en la respuesta")
+ return item_data
+
+ def _get_name(self, item_data: Dict[str, Any]) -> str:
+ return item_data.get("title", "")
+
+ def _get_description(self, item_data: Dict[str, Any]) -> str:
+ description = ""
+ description_data = item_data.get("description", {})
+ if description_data:
+ # Intentamos extraer el texto de la descripción HTML
+ html_content = description_data.get("html", "")
+ if html_content:
+ # Simplificación básica - podría mejorarse con una biblioteca HTML
+ description = re.sub(r'<[^>]+>', ' ', html_content)
+ description = re.sub(r'\s+', ' ', description).strip()
+
+ # Si no hay descripción, intentamos usar las propiedades
+ if not description and "properties" in item_data:
+ properties = item_data.get("properties", {}).get("list", [])
+ if properties:
+ description = "\n".join([f"{prop.get('name')}: {prop.get('value')}" for prop in properties])
+
+ return description
+
+ def _get_price(self, item_data: Dict[str, Any]) -> Optional[Decimal]:
+ sku_data = item_data.get("sku", {})
+ if not sku_data:
+ return None
+
+ # Intentar obtener el precio de promoción primero
+ def_data = sku_data.get("def", {})
+ if def_data:
+ promotion_price = def_data.get("promotionPrice")
+ if promotion_price:
+ return self._parse_price(promotion_price)
+
+ price = def_data.get("price")
+ if price:
+ # Si el precio es un rango (ej: "3.55 - 3.87"), tomamos el valor más bajo
+ if isinstance(price, str) and " - " in price:
+ price = price.split(" - ")[0]
+ return self._parse_price(price)
+
+ # Si no hay precio en def, intentamos con la primera variante
+ base_variants = sku_data.get("base", [])
+ if base_variants and len(base_variants) > 0:
+ first_variant = base_variants[0]
+ promotion_price = first_variant.get("promotionPrice")
+ if promotion_price:
+ return self._parse_price(promotion_price)
+
+ price = first_variant.get("price")
+ if price:
+ return self._parse_price(price)
+
+ return None
+
+ def _parse_price(self, price_str: Any) -> Optional[Decimal]:
+ if isinstance(price_str, (int, float)):
+ return Decimal(str(price_str))
+
+ if isinstance(price_str, str):
+ match = re.search(r'(\d+(?:\.\d+)?)', price_str.replace(",", ""))
+ if match:
+ try:
+ return Decimal(match.group(1))
+ except InvalidOperation:
+ return None
+ return None
+
+ def _get_images(self, item_data: Dict[str, Any]) -> List[str]:
+ images = []
+
+ # Obtener imágenes principales
+ main_images = item_data.get("images", [])
+ if main_images:
+ # Asegurarse de que las URLs sean absolutas
+ images = [self._ensure_absolute_url(img) for img in main_images]
+
+ # Si no hay imágenes principales, intentar con imágenes de descripción
+ if not images and "description" in item_data:
+ desc_images = item_data.get("description", {}).get("images", [])
+ if desc_images:
+ images = [self._ensure_absolute_url(img) for img in desc_images]
+
+ return images
+
+ def _ensure_absolute_url(self, url: str) -> str:
+ """Asegura que la URL sea absoluta agregando el protocolo si es necesario."""
+ if url.startswith("//"):
+ return f"https:{url}"
+ return url
+
+ def _extract_variants(self, item_data: Dict[str, Any]) -> List[Dict[str, Any]]:
+ variants = []
+ sku_data = item_data.get("sku", {})
+
+ if not sku_data or "base" not in sku_data or "props" not in sku_data:
+ return []
+
+ base_variants = sku_data.get("base", [])
+ props = sku_data.get("props", [])
+ product_title = item_data.get("title", "")
+
+ # Crear mapeo de propiedades
+ prop_map = self._create_property_map(props)
+
+ # Procesar cada variante
+ for variant in base_variants:
+ sku_id = variant.get("skuId")
+ sku_attr = variant.get("skuAttr", "")
+
+ # Extraer atributos y imágenes de la variante
+ attributes, variant_images = self._process_variant_attributes(sku_attr, prop_map)
+
+ # Si no hay imágenes específicas de la variante, usar las imágenes principales
+ if not variant_images:
+ main_images = self._get_images(item_data)
+ if main_images:
+ variant_images = [main_images[0]]
+
+ # Crear clave de variante
+ variant_key = "-".join([attr["value"] for attr in attributes])
+
+ variant_info = {
+ "provider_id": "aliexpress",
+ "external_id": sku_id,
+ "name": product_title,
+ "images": variant_images,
+ "variant_key": variant_key,
+ "attributes": attributes
+ }
+
+ variants.append(variant_info)
+
+ return variants
+
+ def _create_property_map(self, props: List[Dict[str, Any]]) -> Dict[int, Dict[str, Any]]:
+ """Crea un mapa de propiedades para facilitar la búsqueda de atributos de variantes."""
+ prop_map = {}
+ for prop in props:
+ prop_id = prop.get("pid")
+ prop_name = prop.get("name")
+ values = {}
+ for val in prop.get("values", []):
+ values[val.get("vid")] = {
+ "name": val.get("name"),
+ "image": val.get("image", "")
+ }
+ prop_map[prop_id] = {
+ "name": prop_name,
+ "values": values
+ }
+ return prop_map
+
+ def _process_variant_attributes(self, sku_attr: str, prop_map: Dict[int, Dict[str, Any]]) -> Tuple[
+ List[Dict[str, Any]], List[str]]:
+ """Procesa los atributos de una variante y extrae imágenes relacionadas."""
+ attributes = []
+ variant_images = []
+
+ # Atributos a ignorar
+ ignored_attributes = ["Ships From", "ship from"]
+
+ if not sku_attr:
+ return attributes, variant_images
+
+ # Parsear skuAttr (formato: "pid:vid;pid:vid")
+ attr_parts = sku_attr.split(";")
+ for part in attr_parts:
+ if ":" not in part:
+ continue
+
+ pid_vid = part.split(":")
+ if len(pid_vid) != 2:
+ continue
+
+ try:
+ pid = int(pid_vid[0])
+ vid_raw = pid_vid[1]
+
+ # Extraer el vid (puede tener formato "vid#name")
+ vid = vid_raw
+ if "#" in vid_raw:
+ vid = vid_raw.split("#")[0]
+
+ try:
+ vid = int(vid)
+ except:
+ pass
+
+ if pid in prop_map and vid in prop_map[pid]["values"]:
+ prop_info = prop_map[pid]
+ value_info = prop_info["values"][vid]
+
+ # Ignorar atributos de envío
+ if prop_info["name"] not in ignored_attributes:
+ attributes.append({
+ "category_name": prop_info["name"],
+ "value": value_info["name"]
+ })
+
+ # Agregar imagen de la variante si existe
+ if value_info["image"]:
+ variant_images.append(self._ensure_absolute_url(value_info["image"]))
+ except:
+ continue
+
+ return attributes, variant_images
diff --git a/app/scrapers/amazon_scraper.py b/app/scrapers/amazon_scraper.py
new file mode 100644
index 0000000..d2235b0
--- /dev/null
+++ b/app/scrapers/amazon_scraper.py
@@ -0,0 +1,169 @@
+from fastapi import HTTPException
+
+from app.scrapers.helper_price import parse_price
+from app.scrapers.scraper_interface import ScraperInterface
+from typing import Dict, Any, List, Optional
+import re
+from app.externals.amazon.amazon_client import get_product_details
+from decimal import Decimal
+from typing import Dict, Any
+
+
+class AmazonScraper(ScraperInterface):
+ async def scrape_direct(self, html: str) -> Dict[str, Any]:
+ return {}
+
+ async def scrape(self, url: str, domain: str = None) -> Dict[str, Any]:
+ asin = self._extract_asin(url)
+
+ try:
+ data = await get_product_details(asin)
+ product_data = self._get_product_data(data)
+
+ result = {
+ "name": self._get_name(product_data),
+ "description": self._get_description(product_data),
+ "external_sell_price": self._get_price(product_data),
+ "images": self._get_images(product_data)
+ }
+
+ variants = self._extract_variants(product_data)
+ if variants:
+ result["variants"] = variants
+
+ response = {
+ "provider_id": "amazon",
+ "external_id": asin,
+ **result
+ }
+
+ return {"data": response}
+
+ except Exception as e:
+ raise HTTPException(
+ status_code=400,
+ detail=f"Error processing product data: {str(e)}"
+ )
+
+ def _get_product_data(self, response: Dict[str, Any]) -> Dict[str, Any]:
+ product_data = response.get("data", {})
+ if not product_data:
+ raise ValueError("No product data found in response")
+ return product_data
+
+ def _get_name(self, product_data: Dict[str, Any]) -> str:
+ return product_data.get("product_title", product_data.get("title", ""))
+
+ def _get_description(self, product_data: Dict[str, Any]) -> str:
+ description = product_data.get("product_description", "")
+
+ if not description:
+ about_product = product_data.get("about_product", [])
+ if about_product:
+ description = "\n".join(about_product)
+
+ return description
+
+ def _get_price(self, product_data: Dict[str, Any]) -> Optional[Decimal]:
+ price_str = product_data.get("product_price", "")
+ if not price_str:
+ price_info = product_data.get("pricing", {})
+ price_str = price_info.get("current_price", "")
+
+ if not price_str:
+ return None
+
+ return parse_price(price_str)
+
+ def _get_images(self, product_data: Dict[str, Any]) -> List[str]:
+ images = []
+
+ product_photos = product_data.get("product_photos", [])
+ if product_photos:
+ return product_photos
+
+ main_image = product_data.get("product_photo", product_data.get("main_image", ""))
+ if main_image:
+ images.append(main_image)
+
+ additional_images = product_data.get("images", [])
+ if additional_images:
+ images.extend(additional_images)
+
+ return images
+
+ def _extract_asin(self, url: str) -> str:
+ patterns = [
+ r'/dp/([A-Z0-9]{10})',
+ r'/gp/product/([A-Z0-9]{10})',
+ r'/ASIN/([A-Z0-9]{10})',
+ r'asin=([A-Z0-9]{10})',
+ r'asin%3D([A-Z0-9]{10})'
+ ]
+
+ for pattern in patterns:
+ match = re.search(pattern, url)
+ if match:
+ return match.group(1)
+
+ raise HTTPException(
+ status_code=400,
+ detail="Product not found - Invalid Amazon URL"
+ )
+
+ def _extract_variants(self, product_data: Dict[str, Any]) -> List[Dict[str, Any]]:
+ dimensions = product_data.get("product_variations_dimensions", [])
+ variations = product_data.get("product_variations", {})
+ all_variations = product_data.get("all_product_variations", {})
+
+ if not dimensions or not variations or not all_variations:
+ return []
+
+ variants = []
+ product_title = product_data.get("product_title", "")
+
+ for asin, variant_data in all_variations.items():
+ variant_attributes = self._get_variant_attributes(dimensions, variant_data)
+ variant_key = "-".join([attr["value"] for attr in variant_attributes])
+
+ variant_info = {
+ "provider_id": "amazon",
+ "external_id": asin,
+ "name": product_title,
+ "images": self._get_variant_images(dimensions, variations, variant_data, product_data),
+ "variant_key": variant_key,
+ "attributes": variant_attributes
+ }
+
+ variants.append(variant_info)
+
+ return variants
+
+ def _get_variant_attributes(self, dimensions: List[str], variant_data: Dict[str, str]) -> List[Dict[str, str]]:
+ attributes = []
+
+ for dim in dimensions:
+ if dim in variant_data:
+ attributes.append({
+ "category_name": dim.capitalize(),
+ "value": variant_data[dim]
+ })
+
+ return attributes
+
+ def _get_variant_images(self, dimensions: List[str], variations: Dict[str, List],
+ variant_data: Dict[str, str], product_data: Dict[str, Any]) -> List[str]:
+ images = []
+ for dim in dimensions:
+ if dim in variations and dim in variant_data:
+ for var in variations[dim]:
+ if var.get("value") == variant_data.get(dim) and "photo" in var:
+ images.append(var["photo"])
+ break
+
+ if not images:
+ main_image = product_data.get("product_photo")
+ if main_image:
+ images.append(main_image)
+
+ return images
diff --git a/app/scrapers/cj_scraper.py b/app/scrapers/cj_scraper.py
new file mode 100644
index 0000000..172eae5
--- /dev/null
+++ b/app/scrapers/cj_scraper.py
@@ -0,0 +1,42 @@
+from typing import Dict, Any
+
+import httpx
+from app.scrapers.scraper_interface import ScraperInterface
+from fastapi import HTTPException
+
+
+class CJScraper(ScraperInterface):
+ def __init__(self):
+ self.webhook_url = "https://n8n.fluxi.co/webhook/cj-search"
+
+ async def scrape_direct(self, html: str) -> Dict[str, Any]:
+ return {}
+
+ async def scrape(self, url: str, domain: str = None) -> dict:
+ payload = {
+ "url_cj": url
+ }
+
+ headers = {
+ "Content-Type": "application/json"
+ }
+
+ try:
+ async with httpx.AsyncClient(timeout=20.0) as client:
+ response = await client.post(
+ self.webhook_url,
+ headers=headers,
+ json=payload
+ )
+
+ if response.status_code == 200:
+ return response.json()
+ else:
+ error_message = f"Failed to get data from CJ Dropshipping: {response.status_code}"
+ raise HTTPException(status_code=response.status_code, detail=error_message)
+
+ except HTTPException as he:
+ raise he
+ except Exception as e:
+ error_message = f"Request error to CJ Dropshipping: {str(e)}"
+ raise HTTPException(status_code=500, detail=error_message)
diff --git a/app/scrapers/dropi_scraper.py b/app/scrapers/dropi_scraper.py
new file mode 100644
index 0000000..29b8477
--- /dev/null
+++ b/app/scrapers/dropi_scraper.py
@@ -0,0 +1,252 @@
+import re
+from decimal import Decimal
+from typing import Dict, Any, List, Optional
+
+from fastapi import HTTPException
+
+from app.externals.dropi.dropi_client import get_product_details
+from app.scrapers.helper_price import parse_price
+from app.scrapers.scraper_interface import ScraperInterface
+from app.configurations.config import DROPI_S3_BASE_URL
+
+
+class DropiScraper(ScraperInterface):
+ def __init__(self, country: str = "co"):
+ self.country = country
+
+ async def scrape_direct(self, html: str) -> Dict[str, Any]:
+ return {}
+
+ async def scrape(self, url: str, domain: str = None) -> Dict[str, Any]:
+ product_id = self._extract_product_id(url)
+
+ try:
+ data = await get_product_details(product_id, self.country)
+ product_data = self._get_product_data(data)
+
+ result = {
+ "name": self._get_name(product_data),
+ "description": self._get_description(product_data),
+ "external_sell_price": self._get_price(product_data),
+ "images": self._get_images(product_data),
+ }
+
+ variants = self._extract_variants(product_data)
+ if variants:
+ result["variants"] = variants
+
+ response = {
+ "provider_id": "dropi",
+ "external_id": product_id,
+ **result
+ }
+
+ return {"data": response}
+
+ except Exception as e:
+ raise HTTPException(
+ status_code=400,
+ detail=f"Error processing product data from Dropi: {str(e)}"
+ )
+
+ def _get_product_data(self, response: Dict[str, Any]) -> Dict[str, Any]:
+ if not response.get("isSuccess"):
+ raise ValueError("Dropi API returned an error.")
+
+ product_data = response.get("objects")
+ if not product_data or not isinstance(product_data, dict):
+ raise ValueError("No product data found in Dropi response")
+ return product_data
+
+ def _get_name(self, product_data: Dict[str, Any]) -> str:
+ return product_data.get("name", "")
+
+ def _get_description(self, product_data: Dict[str, Any]) -> str:
+ html_description = product_data.get("description", "")
+ if not html_description:
+ return ""
+
+ # Remove HTML tags for a cleaner description
+ clean_text = re.sub(r'<[^>]+>', ' ', html_description)
+ # Replace
with newlines and clean up whitespace
+ clean_text = clean_text.replace('
', '\n').strip()
+ clean_text = re.sub(r'\s+', ' ', clean_text).strip()
+ return clean_text
+
+ def _get_price(self, product_data: Dict[str, Any]) -> Optional[Decimal]:
+ price_str = product_data.get("sale_price")
+ if not price_str:
+ return None
+ return parse_price(price_str)
+
+ def _get_images(self, product_data: Dict[str, Any]) -> List[str]:
+ photos = product_data.get("photos", [])
+ if not photos:
+ return []
+
+ images = []
+ for item in photos:
+ if item.get("urlS3"):
+ images.append(DROPI_S3_BASE_URL + item["urlS3"])
+ return images
+
+ def _extract_variants(self, product_data: Dict[str, Any]) -> List[Dict[str, Any]]:
+ variations = product_data.get("variations", [])
+ if not variations:
+ return []
+
+ product_name = product_data.get("name", "")
+ product_photos = product_data.get("photos", [])
+
+ variants = []
+ for variation in variations:
+ variant = self._build_variant(variation, product_name, product_photos)
+ if variant:
+ variants.append(variant)
+
+ return variants
+
+ def _build_variant(self, variation: Dict[str, Any], product_name: str, product_photos: List[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
+ """Construye un objeto de variante en el formato estándar"""
+
+ # Extraer atributos
+ attributes = self._extract_attributes(variation)
+
+ # Construir nombre de la variante
+ variant_name = self._build_variant_name(product_name, attributes)
+
+ # Construir clave de variante
+ variant_key = self._build_variant_key(attributes)
+
+ # Obtener precios
+ sale_price = self._parse_variant_price(variation.get("sale_price"))
+ suggested_price = self._parse_variant_price(variation.get("suggested_price"))
+
+ # Determinar disponibilidad basada en stock
+ available = self._check_availability(variation)
+
+ # Obtener imágenes de la variante
+ images = self._get_variant_images(variation, product_photos)
+
+ return {
+ "name": variant_name,
+ "variant_key": variant_key,
+ "price": float(sale_price) if sale_price else None,
+ "available": available,
+ "images": images,
+ "attributes": attributes,
+ "provider_id": "dropi",
+ "external_id": str(variation.get("id", "")),
+ "external_sell_price": float(sale_price) if sale_price else None,
+ "external_suggested_sell_price": float(suggested_price) if suggested_price else None
+ }
+
+ def _extract_attributes(self, variation: Dict[str, Any]) -> List[Dict[str, str]]:
+ """Extrae los atributos de una variación"""
+ attributes = []
+ attribute_values = variation.get("attribute_values", [])
+
+ for attr_value in attribute_values:
+ attribute_info = attr_value.get("attribute", {})
+ attribute_name = attribute_info.get("description", "")
+ value = attr_value.get("value", "")
+
+ # El valor puede venir en formato "COLOR-TALLA VALOR" o similar
+ # Intentamos limpiar y separar si es necesario
+ if attribute_name and value:
+ # Si el valor contiene el nombre del atributo, lo limpiamos
+ clean_value = self._clean_attribute_value(value, attribute_name)
+
+ attributes.append({
+ "name": attribute_name.title(),
+ "value": clean_value
+ })
+
+ return attributes
+
+ def _clean_attribute_value(self, value: str, attribute_name: str) -> str:
+ """Limpia el valor del atributo removiendo prefijos redundantes"""
+ # Ejemplo: "NEGRO-TALLA L" cuando el atributo es "TALLA" -> "NEGRO-L"
+ # O mejor aún, intentar separar los componentes
+ parts = value.split("-")
+
+ # Si hay múltiples partes, intentamos encontrar la relevante
+ if len(parts) > 1:
+ # Buscar la parte que no sea el nombre del atributo
+ cleaned_parts = []
+ for part in parts:
+ # Remover el nombre del atributo si aparece en la parte
+ part_clean = part.replace(attribute_name.upper(), "").strip()
+ if part_clean:
+ cleaned_parts.append(part_clean)
+
+ return " ".join(cleaned_parts).strip() if cleaned_parts else value
+
+ return value
+
+ def _build_variant_name(self, product_name: str, attributes: List[Dict[str, str]]) -> str:
+ """Construye el nombre de la variante combinando el nombre del producto y los atributos"""
+ if not attributes:
+ return product_name
+
+ # Concatenar los valores de atributos
+ attribute_parts = [attr["value"] for attr in attributes]
+ attribute_string = " - ".join(attribute_parts)
+
+ return f"{product_name} - {attribute_string}"
+
+ def _build_variant_key(self, attributes: List[Dict[str, str]]) -> str:
+ """Construye una clave única para la variante basada en los atributos"""
+ if not attributes:
+ return "default"
+
+ # Crear clave en formato "attribute1-value1-attribute2-value2"
+ key_parts = []
+ for attr in attributes:
+ attr_name = attr["name"].lower().replace(" ", "-")
+ attr_value = attr["value"].lower().replace(" ", "-")
+ key_parts.append(f"{attr_name}-{attr_value}")
+
+ return "-".join(key_parts)
+
+ def _parse_variant_price(self, price_str: Any) -> Optional[Decimal]:
+ """Parsea el precio de una variante"""
+ if not price_str:
+ return None
+ return parse_price(str(price_str))
+
+ def _check_availability(self, variation: Dict[str, Any]) -> bool:
+ """Verifica si la variante está disponible basándose en el stock"""
+ warehouse_variations = variation.get("warehouse_product_variation", [])
+
+ if not warehouse_variations:
+ return False
+
+ # Verificar si hay stock disponible en algún almacén
+ total_stock = sum(wh.get("stock", 0) for wh in warehouse_variations)
+ return total_stock > 0
+
+ def _get_variant_images(self, variation: Dict[str, Any], product_photos: List[Dict[str, Any]]) -> List[str]:
+ variation_id = variation.get("id")
+ images = []
+
+ for photo in product_photos:
+ if photo.get("variation_id") == variation_id and photo.get("urlS3"):
+ images.append(DROPI_S3_BASE_URL + photo["urlS3"])
+
+ if not images:
+ for photo in product_photos:
+ if not photo.get("variation_id") and photo.get("urlS3"):
+ images.append(DROPI_S3_BASE_URL + photo["urlS3"])
+
+ return images
+
+ def _extract_product_id(self, url: str) -> str:
+ match = re.search(r'/product-details/(\d+)', url)
+ if match:
+ return match.group(1)
+
+ raise HTTPException(
+ status_code=400,
+ detail="Product ID not found in Dropi URL"
+ )
\ No newline at end of file
diff --git a/app/scrapers/helper_price.py b/app/scrapers/helper_price.py
new file mode 100644
index 0000000..e728944
--- /dev/null
+++ b/app/scrapers/helper_price.py
@@ -0,0 +1,19 @@
+from decimal import Decimal
+from typing import Optional, Any
+import re
+
+
+def parse_price(price_str: Any) -> Optional[Decimal]:
+ if isinstance(price_str, (int, float)):
+ return Decimal(str(price_str))
+
+ if isinstance(price_str, str):
+ match = re.search(r"[\d,.]+", price_str)
+ if match:
+ num_str = match.group(0).replace(",", "")
+ try:
+ return Decimal(num_str)
+ except:
+ pass
+
+ return None
diff --git a/app/scrapers/ia_scraper.py b/app/scrapers/ia_scraper.py
new file mode 100644
index 0000000..233f5fa
--- /dev/null
+++ b/app/scrapers/ia_scraper.py
@@ -0,0 +1,91 @@
+from app.configurations.config import SCRAPER_AGENT, SCRAPER_AGENT_DIRECT
+from app.helpers.escape_helper import clean_html_less_deeply, clean_html_deeply
+from app.pdf.helpers import clean_text, clean_json
+from app.requests.message_request import MessageRequest
+from app.scrapers.helper_price import parse_price
+from app.scrapers.scraper_interface import ScraperInterface
+from typing import Dict, Any
+from app.externals.scraperapi.scraperapi_client import ScraperAPIClient
+from app.services.message_service_interface import MessageServiceInterface
+import json
+import os
+from datetime import datetime
+
+
+class IAScraper(ScraperInterface):
+ async def scrape_direct(self, html: str) -> Dict[str, Any]:
+ simplified_html_clean = clean_html_deeply(html)
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+ filename = f"simplified_html_{timestamp}.html"
+
+ os.makedirs("scraped_html", exist_ok=True)
+
+ filepath = os.path.join("scraped_html", filename)
+ with open(filepath, 'w', encoding='utf-8') as f:
+ f.write(simplified_html_clean)
+
+ print(f"HTML simplificado guardado en: {filepath}")
+
+ message_request = MessageRequest(
+ query=f"Product content: {simplified_html_clean} ",
+ agent_id=SCRAPER_AGENT_DIRECT,
+ conversation_id="",
+ json_parser={"code": "string"})
+
+ """ json_parser={
+ "products": [
+ {
+ "id": "string",
+ "title": "string",
+ "description": "string",
+ "price": 0,
+ "images": ["string"],
+ "product_url": "string",
+ "variants": [
+ {
+ "title": "string",
+ "price": 0
+ }
+ ]
+ }
+ ]
+ """
+
+ result = await self.message_service.handle_message_json(message_request)
+
+ return result
+
+ def __init__(self, message_service: MessageServiceInterface):
+ self.message_service = message_service
+
+ async def scrape(self, url: str, domain: str = None) -> Dict[str, Any]:
+ client = ScraperAPIClient()
+ if domain and "alibaba" in domain:
+ html_content = await client.get_html(url)
+ else:
+ html_content = await client.get_html_lambda(url)
+ simplified_html_clean = clean_html_deeply(html_content)
+
+ message_request = MessageRequest(
+ query=f"provider_id={domain} . product_url={url} Product content: {simplified_html_clean} ",
+ agent_id=SCRAPER_AGENT,
+ conversation_id="",
+ )
+
+ result = await self.message_service.handle_message(message_request)
+ data_clean = clean_text(clean_json(result['text']))
+ data = json.loads(data_clean)
+ data['data']['external_sell_price'] = parse_price(data['data']['external_sell_price'])
+ images = data['data'].get('images', [])
+ cleaned_images = [
+ f"https:{img}" if img.startswith("//") else img for img in images
+ ]
+ data['data']['images'] = cleaned_images
+
+ if 'variants' in data['data']:
+ data['data']['variants'] = [
+ variant for variant in data['data']['variants']
+ if variant.get('variant_key') != 'unknown'
+ ]
+
+ return data
diff --git a/app/scrapers/scraper_interface.py b/app/scrapers/scraper_interface.py
new file mode 100644
index 0000000..c6349fa
--- /dev/null
+++ b/app/scrapers/scraper_interface.py
@@ -0,0 +1,16 @@
+from abc import ABC, abstractmethod
+from typing import Dict, Any
+
+
+class ScraperInterface(ABC):
+ @abstractmethod
+ async def scrape(self, url: str, domain: str = None) -> Dict[str, Any]:
+ pass
+
+ @abstractmethod
+ async def scrape_direct(self, html: str) -> Dict[str, Any]:
+ """
+ Optional method to scrape directly from HTML content.
+ This can be overridden by subclasses if needed.
+ """
+ raise NotImplementedError("This method is not implemented.")
\ No newline at end of file
diff --git a/app/services/audio_service.py b/app/services/audio_service.py
new file mode 100644
index 0000000..c8817d2
--- /dev/null
+++ b/app/services/audio_service.py
@@ -0,0 +1,25 @@
+from typing import Any, Dict
+
+from fastapi import Depends, HTTPException
+
+from app.requests.generate_audio_request import GenerateAudioRequest
+from app.services.audio_service_interface import AudioServiceInterface
+from app.externals.fal.fal_client import FalClient
+
+
+class AudioService(AudioServiceInterface):
+ def __init__(self, fal_client: FalClient = Depends()):
+ self.fal_client = fal_client
+
+ async def generate_audio(self, request: GenerateAudioRequest) -> Dict[str, Any]:
+ if not request.text:
+ raise HTTPException(status_code=400, detail="Falta 'text'")
+
+ content = request.content or {}
+ fal_webhook = content.get("fal_webhook")
+ extra = {k: v for k, v in content.items() if k not in {"fal_webhook"}}
+
+ try:
+ return await self.fal_client.tts_multilingual_v2(text=request.text, fal_webhook=fal_webhook, **extra)
+ except Exception as e:
+ raise HTTPException(status_code=502, detail=f"Error al llamar a FAL: {str(e)}")
\ No newline at end of file
diff --git a/app/services/audio_service_interface.py b/app/services/audio_service_interface.py
new file mode 100644
index 0000000..250753c
--- /dev/null
+++ b/app/services/audio_service_interface.py
@@ -0,0 +1,9 @@
+from abc import ABC, abstractmethod
+
+from app.requests.generate_audio_request import GenerateAudioRequest
+
+
+class AudioServiceInterface(ABC):
+ @abstractmethod
+ async def generate_audio(self, request: GenerateAudioRequest):
+ pass
\ No newline at end of file
diff --git a/app/services/dropi_service.py b/app/services/dropi_service.py
new file mode 100644
index 0000000..61ef333
--- /dev/null
+++ b/app/services/dropi_service.py
@@ -0,0 +1,25 @@
+from typing import List, Dict, Any
+from fastapi import Depends, HTTPException
+
+from app.externals.dropi import dropi_client
+from app.services.dropi_service_interface import DropiServiceInterface
+
+
+class DropiService(DropiServiceInterface):
+ def __init__(self):
+ pass
+
+ async def get_departments(self, country: str = "co") -> List[Dict[str, Any]]:
+ try:
+ response = await dropi_client.get_departments(country)
+ return response.get("objects", [])
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=f"Error fetching departments from Dropi: {str(e)}")
+
+ async def get_cities_by_department(self, department_id: int, country: str = "co") -> List[Dict[str, Any]]:
+ try:
+ rate_type = "CON RECAUDO"
+ response = await dropi_client.get_cities_by_department(department_id, rate_type, country)
+ return response.get("objects", {}).get("cities", [])
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=f"Error fetching cities from Dropi: {str(e)}")
\ No newline at end of file
diff --git a/app/services/dropi_service_interface.py b/app/services/dropi_service_interface.py
new file mode 100644
index 0000000..3de8899
--- /dev/null
+++ b/app/services/dropi_service_interface.py
@@ -0,0 +1,12 @@
+from abc import ABC, abstractmethod
+from typing import List, Dict, Any
+
+
+class DropiServiceInterface(ABC):
+ @abstractmethod
+ async def get_departments(self, country: str = "co") -> List[Dict[str, Any]]:
+ pass
+
+ @abstractmethod
+ async def get_cities_by_department(self, department_id: int, country: str = "co") -> List[Dict[str, Any]]:
+ pass
\ No newline at end of file
diff --git a/app/services/image_service.py b/app/services/image_service.py
new file mode 100644
index 0000000..6555f66
--- /dev/null
+++ b/app/services/image_service.py
@@ -0,0 +1,165 @@
+from app.configurations.config import (
+ AGENT_IMAGE_VARIATIONS,
+)
+from app.externals.agent_config.requests.agent_config_request import AgentConfigRequest
+from app.externals.s3_upload.responses.s3_upload_response import S3UploadResponse
+from app.requests.generate_image_request import GenerateImageRequest
+from app.requests.message_request import MessageRequest
+from app.requests.variation_image_request import VariationImageRequest
+from app.externals.s3_upload.requests.s3_upload_request import S3UploadRequest
+from app.responses.generate_image_response import GenerateImageResponse
+from app.services.image_service_interface import ImageServiceInterface
+from app.services.message_service_interface import MessageServiceInterface
+from app.externals.s3_upload.s3_upload_client import upload_file
+from app.helpers.image_compression_helper import compress_image_to_target
+from fastapi import Depends
+import asyncio
+import uuid
+from dotenv import load_dotenv
+from app.externals.google_vision.google_vision_client import analyze_image
+from app.externals.images.image_client import google_image, openai_image_edit
+from typing import Optional
+import base64
+
+load_dotenv()
+
+
+class ImageService(ImageServiceInterface):
+ def __init__(self, message_service: MessageServiceInterface = Depends()):
+ self.message_service = message_service
+
+ async def _upload_to_s3(self, image_base64: str, owner_id: str, folder_id: str,
+ prefix_name: str) -> S3UploadResponse:
+ unique_id = uuid.uuid4().hex[:8]
+ file_name = f"{prefix_name}_{unique_id}"
+ original_image_bytes = base64.b64decode(image_base64)
+ image_base64_compressed = compress_image_to_target(original_image_bytes, target_kb=120)
+
+ return await upload_file(
+ S3UploadRequest(
+ file=image_base64_compressed,
+ folder=f"{owner_id}/products/variations/{folder_id}",
+ filename=file_name
+ )
+ )
+
+
+ async def _generate_single_variation(self, url_images: list[str], prompt: str, owner_id: str,
+ folder_id: str, file: Optional[str] = None, extra_params: Optional[dict] = None,
+ provider: Optional[str] = None, model_ai: Optional[str] = None) -> str:
+
+ if provider and provider.lower() == "openai":
+ image_content = await openai_image_edit(image_urls=url_images, prompt=prompt, model_ia=model_ai, extra_params=extra_params)
+ else:
+ image_content = await google_image(image_urls=url_images, prompt=prompt, model_ia=model_ai, extra_params=extra_params)
+
+ content_base64 = base64.b64encode(image_content).decode('utf-8')
+ final_upload = await self._upload_to_s3(
+ content_base64,
+ owner_id,
+ folder_id,
+ "variation"
+ )
+ return final_upload.s3_url
+
+ async def generate_variation_images(self, request: VariationImageRequest, owner_id: str):
+ folder_id = uuid.uuid4().hex[:8]
+ original_image_response = await self._upload_to_s3(request.file, owner_id, folder_id, "original")
+ vision_analysis = await analyze_image(request.file)
+
+ message_request = MessageRequest(
+ query=f"Attached is the product image. {vision_analysis.get_analysis_text()}",
+ agent_id=AGENT_IMAGE_VARIATIONS,
+ conversation_id="",
+ parameter_prompt={"language": request.language},
+ files=[{
+ "type": "image",
+ "url": original_image_response.s3_url,
+ "content": request.file
+ }]
+ )
+
+ response_data = await self.message_service.handle_message_with_config(message_request)
+ agent_config = response_data["agent_config"]
+ response = response_data["message"]
+
+ extra_params = None
+ if agent_config.preferences.extra_parameters:
+ extra_params = agent_config.preferences.extra_parameters
+
+ prompt = response["text"] + " Do not modify any text, letters, brand logos, brand names, or symbols."
+ tasks = [
+ self._generate_single_variation([original_image_response.s3_url], prompt, owner_id, folder_id,
+ request.file, extra_params, provider=agent_config.provider_ai,
+ model_ai=agent_config.model_ai)
+ for i in range(request.num_variations)
+ ]
+ generated_urls = await asyncio.gather(*tasks)
+
+ return GenerateImageResponse(
+ generated_urls=generated_urls,
+ original_url=original_image_response.s3_url,
+ original_urls=[original_image_response.s3_url],
+ generated_prompt=prompt,
+ vision_analysis=vision_analysis
+ )
+
+ async def generate_images_from(self, request: GenerateImageRequest, owner_id: str):
+ folder_id = uuid.uuid4().hex[:8]
+ urls = request.file_urls or []
+ original_url = request.file_url
+
+ if request.file:
+ original_image_response = await self._upload_to_s3(request.file, owner_id, folder_id, "original")
+ original_url = original_image_response.s3_url
+
+ if len(urls) == 0 and original_url:
+ urls.append(request.file_url)
+
+ tasks = [
+ self._generate_single_variation(
+ urls,
+ request.prompt,
+ owner_id,
+ folder_id,
+ request.file,
+ extra_params=request.extra_parameters,
+ provider=request.provider,
+ model_ai=request.model_ai
+ )
+ for i in range(request.num_variations)
+ ]
+ generated_urls = await asyncio.gather(*tasks)
+
+ return GenerateImageResponse(
+ original_urls=urls,
+ generated_urls=generated_urls,
+ original_url=original_url,
+ generated_prompt=request.prompt
+ )
+
+ async def generate_images_from_agent(self, request: GenerateImageRequest, owner_id: str):
+ parameter_prompt = request.parameter_prompt or {}
+ parameter_prompt["language"] = request.language
+
+ data = MessageRequest(
+ agent_id=request.agent_id,
+ query=request.agent_id,
+ parameter_prompt=parameter_prompt,
+ conversation_id="",
+ )
+
+ response_data = await self.message_service.handle_message_with_config(data)
+ agent_config = response_data["agent_config"]
+ message = response_data["message"]
+
+ request.prompt = message["text"]
+ request.provider = agent_config.provider_ai
+ request.model_ai = agent_config.model_ai
+
+ if agent_config.preferences.extra_parameters:
+ request.extra_parameters = agent_config.preferences.extra_parameters
+
+ response = await self.generate_images_from(request, owner_id)
+
+ return response
diff --git a/app/services/image_service_interface.py b/app/services/image_service_interface.py
new file mode 100644
index 0000000..40d9816
--- /dev/null
+++ b/app/services/image_service_interface.py
@@ -0,0 +1,17 @@
+from abc import abstractmethod, ABC
+
+from app.requests.generate_image_request import GenerateImageRequest
+from app.requests.variation_image_request import VariationImageRequest
+
+
+class ImageServiceInterface(ABC):
+ @abstractmethod
+ async def generate_variation_images(self, request: VariationImageRequest, owner_id: str):
+ pass
+
+ @abstractmethod
+ async def generate_images_from(self, request: GenerateImageRequest, owner_id: str):
+ pass
+
+ async def generate_images_from_agent(self, generate_image_request, owner_id):
+ pass
diff --git a/app/services/message_service.py b/app/services/message_service.py
index 432da36..1d7b088 100644
--- a/app/services/message_service.py
+++ b/app/services/message_service.py
@@ -1,8 +1,16 @@
import json
+import asyncio
+import hashlib
-from app.configurations.config import AGENT_RECOMMEND_PRODUCTS_ID
+from app.configurations.config import AGENT_RECOMMEND_PRODUCTS_ID, AGENT_RECOMMEND_SIMILAR_PRODUCTS_ID, ENVIRONMENT
+from app.configurations.copies_config import AGENT_COPIES
from app.externals.agent_config.agent_config_client import get_agent
-from app.externals.aliexpress.requests.aliexpress_search_request import AliexpressSearchRequest
+from app.externals.s3_upload.requests.s3_upload_request import S3UploadRequest
+from app.externals.s3_upload.s3_upload_client import upload_file, check_file_exists_direct
+from app.pdf.helpers import clean_text, clean_json
+from app.requests.brand_context_resolver_request import BrandContextResolverRequest
+from app.requests.copy_request import CopyRequest
+from app.requests.generate_pdf_request import GeneratePdfRequest
from app.requests.message_request import MessageRequest
from app.externals.agent_config.requests.agent_config_request import AgentConfigRequest
from app.requests.recommend_product_request import RecommendProductRequest
@@ -10,7 +18,11 @@
from app.services.message_service_interface import MessageServiceInterface
from app.managers.conversation_manager_interface import ConversationManagerInterface
from fastapi import Depends
-from app.externals.aliexpress.aliexpress_client import search_products
+from app.configurations.pdf_manual_config import PDF_MANUAL_SECTIONS, get_sections_for_language
+from app.pdf.pdf_manual_generator import PDFManualGenerator
+from app.externals.amazon.requests.amazon_search_request import AmazonSearchRequest
+from app.externals.amazon.amazon_client import search_products
+from app.requests.resolve_funnel_request import ResolveFunnelRequest
class MessageService(MessageServiceInterface):
@@ -32,14 +44,197 @@ async def handle_message(self, request: MessageRequest):
agent_config=agent_config
)
+ async def handle_message_with_config(self, request: MessageRequest):
+ data = AgentConfigRequest(
+ agent_id=request.agent_id,
+ query=request.query,
+ metadata_filter=request.metadata_filter,
+ parameter_prompt=request.parameter_prompt
+ )
+
+ agent_config = await get_agent(data)
+
+ message_response = await self.conversation_manager.process_conversation(
+ request=request,
+ agent_config=agent_config
+ )
+ return {
+ "message": message_response,
+ "agent_config": agent_config
+ }
+
+ async def handle_message_json(self, request: MessageRequest):
+ response = await self.handle_message(request)
+
+ return json.loads(response['text'])
+
async def recommend_products(self, request: RecommendProductRequest):
+ agent_id = AGENT_RECOMMEND_SIMILAR_PRODUCTS_ID if request.similar else AGENT_RECOMMEND_PRODUCTS_ID
+
data = await self.handle_message(MessageRequest(
- agent_id=AGENT_RECOMMEND_PRODUCTS_ID,
+ agent_id=agent_id,
conversation_id="",
query=f"Product Name: {request.product_name} Description: {request.product_description}",
))
json_data = json.loads(data['text'])
- aliexpress_data = await search_products(AliexpressSearchRequest(q=json_data['recommended_product']))
+ amazon_data = await search_products(AmazonSearchRequest(query=json_data['recommended_product']))
+
+ return RecommendProductResponse(ai_response=json_data, products=amazon_data.get_products())
+
+ async def process_multiple_agents(self, agent_queries: list[dict]) -> dict:
+ tasks = [
+ self.handle_message(MessageRequest(
+ agent_id=item['agent'],
+ conversation_id="",
+ query=item['query']
+ )) for item in agent_queries
+ ]
+
+ try:
+ responses = await asyncio.gather(*tasks, return_exceptions=True)
+
+ combined_data = {}
+ for response in responses:
+ if isinstance(response, Exception):
+ continue
+ data_clean = clean_text(clean_json(response['text']))
+ data = json.loads(data_clean)
+ combined_data.update(data)
+
+ if not combined_data:
+ raise ValueError("No se pudo obtener respuesta válida de ningún agente")
+
+ return combined_data
+
+ except Exception as e:
+ raise ValueError(f"Error procesando respuestas de agentes: {str(e)}")
+
+ async def generate_copies(self, request: CopyRequest):
+ agent_queries = [
+ {'agent': agent, 'query': request.prompt}
+ for agent in AGENT_COPIES
+ ]
+
+ combined_data = await self.process_multiple_agents(agent_queries)
+
+ return {"copies": combined_data}
+
+ async def generate_pdf(self, request: GeneratePdfRequest):
+ base_query = f"Product Name: {request.product_name} Description: {request.product_description}. Language: {request.language}. Content: {request.content}"
+
+ content_hash = hashlib.md5(f"{request.title}_{request.image_url}".encode()).hexdigest()[:8]
+ base_filename = f"{request.product_id}_{request.language}_{content_hash}"
+
+ version = "v2"
+ base_url = f"https://fluxi.co/{ENVIRONMENT}/assets"
+ folder_path = f"{request.owner_id}/pdfs/{version}"
+ s3_url = f"{base_url}/{folder_path}/{base_filename}.pdf"
+
+ exists = await check_file_exists_direct(s3_url)
+ if exists:
+ return {"s3_url": s3_url}
+
+ sections = get_sections_for_language(request.language)
+
+ agent_queries = [
+ {'agent': "agent_copies_pdf", 'query': f"section: {section}. {base_query} "}
+ for section in sections.keys()
+ ]
+
+ combined_data = await self.process_multiple_agents(agent_queries)
+
+ pdf_generator = PDFManualGenerator(request.product_name, language=request.language)
+ pdf = await pdf_generator.create_manual(combined_data, request.title, request.image_url)
+
+ result = await upload_file(
+ S3UploadRequest(
+ file=pdf,
+ folder=folder_path,
+ filename=base_filename
+ )
+ )
+
+ return result
+
+ async def resolve_funnel(self, request: ResolveFunnelRequest):
+ pain_detection_response = await self.handle_message(MessageRequest(
+ agent_id="pain_detection",
+ conversation_id="",
+ query="pain_detection",
+ parameter_prompt={
+ "product_name": request.product_name,
+ "product_description": request.product_description,
+ "language": request.language
+ }
+ ))
+
+ pain_detection_message = pain_detection_response['text']
+
+ buyer_detection_response = await self.handle_message(MessageRequest(
+ agent_id="buyer_detection",
+ conversation_id="",
+ query="buyer_detection",
+ parameter_prompt={
+ "product_name": request.product_name,
+ "product_description": request.product_description,
+ "pain_detection": pain_detection_message,
+ "language": request.language
+ }
+ ))
+
+ buyer_detection_message = buyer_detection_response['text']
+
+ sales_angles_response = await self.handle_message_json(MessageRequest(
+ agent_id="sales_angles_v2",
+ conversation_id="",
+ query="sales_angles_v2",
+ json_parser={
+ "angles": [
+ {
+ "name": "string",
+ "description": "string"
+ }
+ ]
+ },
+ parameter_prompt={
+ "product_name": request.product_name,
+ "product_description": request.product_description,
+ "pain_detection": pain_detection_message,
+ "buyer_detection": buyer_detection_message,
+ "language": request.language
+ }
+ ))
+
+ return {
+ "pain_detection": pain_detection_message,
+ "buyer_detection": buyer_detection_message,
+ "sales_angles": sales_angles_response["angles"]
+ }
+
+ async def resolve_brand_context(self, request: BrandContextResolverRequest):
+ brand_agent_task = self.handle_message_json(MessageRequest(
+ agent_id="store_brand_agent",
+ conversation_id="",
+ query="store_brand_agent",
+ parameter_prompt=request.prompt,
+ json_parser={"brands": ["string", "string"]}
+ ))
+
+ context_agent_task = self.handle_message_json(MessageRequest(
+ agent_id="store_context_agent",
+ conversation_id="",
+ query="store_context_agent",
+ parameter_prompt=request.prompt,
+ json_parser={"contexts": ["string", "string"]}
+ ))
+
+ responses = await asyncio.gather(brand_agent_task, context_agent_task)
+
+ brands = responses[0].get("brands", [])
+ contexts = responses[1].get("contexts", [])
- return RecommendProductResponse(ai_response=json_data, products=aliexpress_data.get_products())
+ return {
+ "brands": brands,
+ "contexts": contexts
+ }
diff --git a/app/services/message_service_interface.py b/app/services/message_service_interface.py
index 4423d33..6a940ca 100644
--- a/app/services/message_service_interface.py
+++ b/app/services/message_service_interface.py
@@ -1,7 +1,11 @@
from abc import abstractmethod, ABC
+from app.requests.copy_request import CopyRequest
from app.requests.message_request import MessageRequest
from app.requests.recommend_product_request import RecommendProductRequest
+from app.requests.resolve_funnel_request import ResolveFunnelRequest
+from app.requests.brand_context_resolver_request import BrandContextResolverRequest
+from app.requests.generate_pdf_request import GeneratePdfRequest
class MessageServiceInterface(ABC):
@@ -9,6 +13,30 @@ class MessageServiceInterface(ABC):
async def handle_message(self, request: MessageRequest):
pass
+ @abstractmethod
+ async def handle_message_json(self, request: MessageRequest):
+ pass
+
+ @abstractmethod
+ async def generate_copies(self, request: CopyRequest):
+ pass
+
@abstractmethod
async def recommend_products(self, request: RecommendProductRequest):
+ pass
+
+ @abstractmethod
+ async def generate_pdf(self, request: GeneratePdfRequest):
+ pass
+
+ @abstractmethod
+ async def resolve_funnel(self, request: ResolveFunnelRequest):
+ pass
+
+ @abstractmethod
+ async def resolve_brand_context(self, request: BrandContextResolverRequest):
+ pass
+
+ @abstractmethod
+ async def handle_message_with_config(self, request: MessageRequest):
pass
\ No newline at end of file
diff --git a/app/services/product_scraping_service.py b/app/services/product_scraping_service.py
new file mode 100644
index 0000000..c9c2367
--- /dev/null
+++ b/app/services/product_scraping_service.py
@@ -0,0 +1,24 @@
+from fastapi import Depends
+
+from app.requests.product_scraping_request import ProductScrapingRequest
+from app.services.product_scraping_service_interface import ProductScrapingServiceInterface
+from app.factories.scraping_factory import ScrapingFactory
+from urllib.parse import urlparse
+
+
+class ProductScrapingService(ProductScrapingServiceInterface):
+ def __init__(self, scraping_factory: ScrapingFactory = Depends()):
+ self.scraping_factory = scraping_factory
+
+ async def scrape_product(self, request: ProductScrapingRequest):
+ url = str(request.product_url)
+ domain = urlparse(url).netloc.lower()
+
+ scraper = self.scraping_factory.get_scraper(url, country=request.country)
+ return await scraper.scrape(url, domain)
+
+ async def scrape_direct(self, html):
+ scraper = self.scraping_factory.get_scraper("https://www.macys.com/shop/womens-clothing/accessories/womens-sunglasses/Upc_bops_purchasable,Productsperpage/5376,120?id=28295&_additionalStoreLocations=5376")
+
+ return await scraper.scrape_direct(html)
+
diff --git a/app/services/product_scraping_service_interface.py b/app/services/product_scraping_service_interface.py
new file mode 100644
index 0000000..864bced
--- /dev/null
+++ b/app/services/product_scraping_service_interface.py
@@ -0,0 +1,11 @@
+from abc import ABC, abstractmethod
+from app.requests.product_scraping_request import ProductScrapingRequest
+
+
+class ProductScrapingServiceInterface(ABC):
+ @abstractmethod
+ async def scrape_product(self, request: ProductScrapingRequest):
+ pass
+
+ async def scrape_direct(self, html):
+ pass
diff --git a/app/services/video_service.py b/app/services/video_service.py
new file mode 100644
index 0000000..df14cba
--- /dev/null
+++ b/app/services/video_service.py
@@ -0,0 +1,40 @@
+from typing import Any, Dict
+
+from fastapi import Depends, HTTPException
+
+from app.requests.generate_video_request import GenerateVideoRequest, VideoType
+from app.services.video_service_interface import VideoServiceInterface
+from app.externals.fal.fal_client import FalClient
+
+
+class VideoService(VideoServiceInterface):
+ def __init__(self, fal_client: FalClient = Depends()):
+ self.fal_client = fal_client
+
+ async def generate_video(self, request: GenerateVideoRequest) -> Dict[str, Any]:
+ content: Dict[str, Any] = request.content or {}
+
+ try:
+ if request.type == VideoType.animated_scene:
+ prompt = content.get("prompt")
+ image_url = content.get("image_url")
+ if not prompt or not image_url:
+ raise HTTPException(status_code=400, detail="Se requieren 'prompt' e 'image_url' en content para animated_scene")
+ fal_webhook = content.get("fal_webhook")
+ extra = {k: v for k, v in content.items() if k not in {"prompt", "image_url", "fal_webhook"}}
+ return await self.fal_client.kling_image_to_video(prompt=prompt, image_url=image_url, fal_webhook=fal_webhook, **extra)
+
+ if request.type == VideoType.human_scene:
+ image_url = content.get("image_url")
+ audio_url = content.get("audio_url")
+ if not image_url or not audio_url:
+ raise HTTPException(status_code=400, detail="Se requieren 'image_url' y 'audio_url' en content para human_scene")
+ fal_webhook = content.get("fal_webhook")
+ extra = {k: v for k, v in content.items() if k not in {"image_url", "audio_url", "fal_webhook"}}
+ return await self.fal_client.bytedance_omnihuman(image_url=image_url, audio_url=audio_url, fal_webhook=fal_webhook, **extra)
+
+ raise HTTPException(status_code=400, detail="Tipo de video no soportado")
+ except HTTPException:
+ raise
+ except Exception as e:
+ raise HTTPException(status_code=502, detail=f"Error al llamar a FAL: {str(e)}")
\ No newline at end of file
diff --git a/app/services/video_service_interface.py b/app/services/video_service_interface.py
new file mode 100644
index 0000000..6632014
--- /dev/null
+++ b/app/services/video_service_interface.py
@@ -0,0 +1,9 @@
+from abc import ABC, abstractmethod
+
+from app.requests.generate_video_request import GenerateVideoRequest
+
+
+class VideoServiceInterface(ABC):
+ @abstractmethod
+ async def generate_video(self, request: GenerateVideoRequest):
+ pass
\ No newline at end of file
diff --git a/main.py b/main.py
index 4ad61d6..a4c32bd 100644
--- a/main.py
+++ b/main.py
@@ -3,19 +3,35 @@
from app.controllers.handle_controller import router
from app.managers.conversation_manager import ConversationManager
from app.managers.conversation_manager_interface import ConversationManagerInterface
+from app.services.image_service import ImageService
+from app.services.image_service_interface import ImageServiceInterface
from app.services.message_service import MessageService
from app.services.message_service_interface import MessageServiceInterface
+from app.services.product_scraping_service import ProductScrapingService
+from app.services.product_scraping_service_interface import ProductScrapingServiceInterface
+from app.services.video_service import VideoService
+from app.services.video_service_interface import VideoServiceInterface
+from app.services.audio_service import AudioService
+from app.services.audio_service_interface import AudioServiceInterface
app = FastAPI(
title="Conversational Agent API",
description="API for agent ai",
version="1.0.0"
)
+
app.include_router(router)
+
+conversation_manager_singleton = ConversationManager()
+
app.dependency_overrides[MessageServiceInterface] = MessageService
-app.dependency_overrides[ConversationManagerInterface] = ConversationManager
+app.dependency_overrides[ConversationManagerInterface] = lambda: conversation_manager_singleton
+app.dependency_overrides[ImageServiceInterface] = ImageService
+app.dependency_overrides[ProductScrapingServiceInterface] = ProductScrapingService
+app.dependency_overrides[VideoServiceInterface] = VideoService
+app.dependency_overrides[AudioServiceInterface] = AudioService
if __name__ == "__main__":
import uvicorn
- uvicorn.run(app, host="0.0.0.0", port=8000)
\ No newline at end of file
+ uvicorn.run(app, host="0.0.0.0", port=8000)
diff --git a/requirements.txt b/requirements.txt
index f3c1f62..9d33655 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,13 +1,23 @@
-fastapi==0.109.1
-pydantic==1.10.13
+fastapi>=0.109.1
+pydantic>=2.5.0
mangum==0.17.0
python-dotenv==1.0.0
uvicorn==0.24.0
-httpx
+httpx>=0.24.0
langchain-community>=0.2.0
-langchain-openai
+langchain-openai>=0.0.5
openai
-langgraph>=0.0.10
+langgraph==0.3.31
langchain-core>=0.1.17
langchain-anthropic
-langchain-ollama
\ No newline at end of file
+langchain-ollama
+fpdf
+beautifulsoup4
+lxml
+langchain_mcp
+langchain-mcp-adapters==0.0.9
+langchain-google-genai
+Pillow==10.3.0
+html5lib
+requests
+langsmith