Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
409 changes: 409 additions & 0 deletions INTEGRATION_SUMMARY.md

Large diffs are not rendered by default.

14 changes: 13 additions & 1 deletion dlplus/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,15 @@
__author__ = "خليف 'ذيبان' العنزي"

# Core components
from .core import DLPlusCore, ArabicProcessor, ContextAnalyzer
from .core import (
DLPlusCore,
ArabicProcessor,
ContextAnalyzer,
ModelManager,
ModelStatus,
IntegrationBridge,
ExecutionMode
)

# API components
from .api import FastAPIConnector, InternalExecutionAPI
Expand All @@ -24,6 +32,10 @@
'DLPlusCore',
'ArabicProcessor',
'ContextAnalyzer',
'ModelManager',
'ModelStatus',
'IntegrationBridge',
'ExecutionMode',
'FastAPIConnector',
'InternalExecutionAPI',
'Settings',
Expand Down
53 changes: 51 additions & 2 deletions dlplus/agents/base_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

import logging
from abc import ABC, abstractmethod
from typing import Dict, Any, Optional
from typing import Dict, Any, Optional, List
from datetime import datetime

logger = logging.getLogger(__name__)
Expand All @@ -34,6 +34,8 @@ def __init__(self, name: str, config: Optional[Dict[str, Any]] = None):
self.enabled = True
self.execution_count = 0
self.last_execution = None
self.model_manager = None # Will be set by integration bridge
self.preferred_models = [] # Preferred models for this agent

logger.info(f"🤖 Agent '{name}' initialized")

Expand Down Expand Up @@ -112,8 +114,55 @@ def get_status(self) -> Dict[str, Any]:
'enabled': self.enabled,
'execution_count': self.execution_count,
'last_execution': self.last_execution,
'config': self.config
'config': self.config,
'has_model_manager': self.model_manager is not None,
'preferred_models': self.preferred_models
}

def set_model_manager(self, model_manager: Any):
"""
Set the model manager for AI model integration

Args:
model_manager: ModelManager instance
"""
self.model_manager = model_manager
logger.info(f"🔗 Model Manager connected to agent '{self.name}'")

def set_preferred_models(self, models: List[str]):
"""
Set preferred models for this agent

Args:
models: List of model IDs
"""
self.preferred_models = models
logger.info(f"📋 Agent '{self.name}' preferred models: {models}")

async def use_model(
self,
model_id: str,
input_text: str,
parameters: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""
Use an AI model from within the agent

Args:
model_id: Model identifier
input_text: Input text for the model
parameters: Optional inference parameters

Returns:
Model inference result
"""
if not self.model_manager:
return {
'success': False,
'error': 'No model manager connected to this agent'
}

return await self.model_manager.inference(model_id, input_text, parameters)

def __repr__(self) -> str:
return f"<{self.__class__.__name__} name='{self.name}' enabled={self.enabled}>"
51 changes: 45 additions & 6 deletions dlplus/agents/code_generator_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ def __init__(self, config: Dict[str, Any] = None):
'rust', 'typescript', 'php', 'ruby', 'swift'
]
self.default_language = 'python'
# Prefer DeepSeek for code generation, fallback to LLaMA 3
self.set_preferred_models(['deepseek', 'llama3', 'mistral'])

async def execute(self, task: Dict[str, Any]) -> Dict[str, Any]:
"""
Expand Down Expand Up @@ -78,13 +80,33 @@ async def _generate_code(
"""
Generate the actual code

In production, this would use:
- OpenAI Codex
- GitHub Copilot API
- DeepSeek Coder
- Or other code generation models
Uses AI models if available, otherwise falls back to templates.
"""
# Template-based generation for demonstration
# Try to use AI model if available
if self.model_manager:
for model_id in self.preferred_models:
try:
# Create prompt for code generation
prompt = self._create_code_generation_prompt(
description, language, requirements
)

# Use model
result = await self.use_model(model_id, prompt, {
'max_length': 2048,
'temperature': 0.7
})

if result.get('success'):
logger.info(f"✅ Code generated using model '{model_id}'")
return result.get('output', '')

except Exception as e:
logger.warning(f"⚠️ Model '{model_id}' failed: {e}, trying next...")
continue

# Fallback to template-based generation
logger.info("📝 Using template-based code generation")
templates = {
'python': self._generate_python_template,
'javascript': self._generate_javascript_template,
Expand All @@ -94,6 +116,23 @@ async def _generate_code(
generator = templates.get(language, self._generate_generic_template)
return generator(description, requirements)

def _create_code_generation_prompt(
self,
description: str,
language: str,
requirements: List[str]
) -> str:
"""Create prompt for AI model code generation"""
prompt = f"""Generate {language} code for the following task:

Task: {description}

Requirements: {', '.join(requirements) if requirements else 'None'}

Please provide clean, well-commented code following best practices."""

return prompt

def _generate_python_template(
self,
description: str,
Expand Down
45 changes: 42 additions & 3 deletions dlplus/agents/web_retrieval_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@ def __init__(self, config: Dict[str, Any] = None):
super().__init__("Web Retrieval Agent", config)
self.max_results = config.get('max_results', 10) if config else 10
self.timeout = config.get('timeout', 30) if config else 30
# Prefer Arabic-understanding models for query enhancement
self.set_preferred_models(['arabert', 'camelbert', 'qwen_arabic'])

async def execute(self, task: Dict[str, Any]) -> Dict[str, Any]:
"""
Expand All @@ -45,17 +47,54 @@ async def execute(self, task: Dict[str, Any]) -> Dict[str, Any]:

logger.info(f"🔍 Searching for: {query}")

# In production, this would use actual search API
# For now, return simulated results
results = await self._search(query)
# Enhance query using AI models if available
enhanced_query = await self._enhance_query_with_ai(query)

# Perform search
results = await self._search(enhanced_query)

return {
'success': True,
'query': query,
'enhanced_query': enhanced_query,
'results': results,
'count': len(results)
}

async def _enhance_query_with_ai(self, query: str) -> str:
"""
Enhance search query using AI models

Args:
query: Original search query

Returns:
Enhanced query
"""
if not self.model_manager:
return query

# Try to use Arabic models for query understanding
for model_id in self.preferred_models:
try:
prompt = f"""تحليل وتحسين استعلام البحث التالي:

الاستعلام: {query}

قم بتحليل الاستعلام وتحسينه لنتائج بحث أفضل. اذكر الكلمات المفتاحية المهمة والمفاهيم ذات الصلة."""

result = await self.use_model(model_id, prompt)

if result.get('success'):
logger.info(f"✅ Query enhanced using model '{model_id}'")
return result.get('output', query)

except Exception as e:
logger.warning(f"⚠️ Model '{model_id}' failed: {e}")
continue

return query

async def _search(self, query: str) -> list:
"""
Perform the actual search
Expand Down
12 changes: 11 additions & 1 deletion dlplus/core/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,5 +11,15 @@
from .intelligence_core import DLPlusCore
from .arabic_processor import ArabicProcessor
from .context_analyzer import ContextAnalyzer
from .model_manager import ModelManager, ModelStatus
from .integration_bridge import IntegrationBridge, ExecutionMode

__all__ = ['DLPlusCore', 'ArabicProcessor', 'ContextAnalyzer']
__all__ = [
'DLPlusCore',
'ArabicProcessor',
'ContextAnalyzer',
'ModelManager',
'ModelStatus',
'IntegrationBridge',
'ExecutionMode'
]
Loading