From ec1873281a67a7b3f2f1e722de13df399ab5da7d Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Mon, 20 Oct 2025 07:10:08 +0000
Subject: [PATCH 1/7] Initial plan
From 8c0b9503b8fd396674b986a60fe5ed41421b8269 Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Mon, 20 Oct 2025 07:16:46 +0000
Subject: [PATCH 2/7] Add core Supreme Agent components: models, scripts, API,
and config
Co-authored-by: wasalstor-web <230709381+wasalstor-web@users.noreply.github.com>
---
.env.example | 20 ++
api/server.py | 324 ++++++++++++++++++++++
config/settings.json | 68 +++++
models/Modelfile | 76 ++++++
scripts/install-supreme-agent.sh | 271 ++++++++++++++++++
scripts/supreme_agent.py | 453 +++++++++++++++++++++++++++++++
6 files changed, 1212 insertions(+)
create mode 100755 api/server.py
create mode 100644 config/settings.json
create mode 100644 models/Modelfile
create mode 100755 scripts/install-supreme-agent.sh
create mode 100755 scripts/supreme_agent.py
diff --git a/.env.example b/.env.example
index 192b768..a65fd01 100644
--- a/.env.example
+++ b/.env.example
@@ -22,6 +22,26 @@ OLLAMA_API_BASE_URL=http://localhost:11434
# Generate a secure key with: openssl rand -hex 32
WEBUI_SECRET_KEY=change-me-to-a-secure-random-key
+# Supreme Agent Configuration / إعدادات الوكيل الأعلى
+SUPREME_MODEL=supreme-executor
+OLLAMA_HOST=http://localhost:11434
+
+# API Configuration / إعدادات API
+API_HOST=0.0.0.0
+API_PORT=5000
+API_DEBUG=False
+API_KEY=optional-your-key
+
+# Web Interface / واجهة الويب
+WEB_PORT=8080
+WEB_THEME=auto
+WEB_LANGUAGE=auto
+
+# Hostinger Deployment / نشر Hostinger
+DOMAIN=your-domain.com
+SSH_USER=your-username
+SSL_ENABLED=true
+
# Example Usage / مثال الاستخدام:
# 1. Copy this file: cp .env.example .env
# 2. Edit .env with your VPS details
diff --git a/api/server.py b/api/server.py
new file mode 100755
index 0000000..bb5e6df
--- /dev/null
+++ b/api/server.py
@@ -0,0 +1,324 @@
+#!/usr/bin/env python3
+"""
+Supreme Agent API Server - خادم API للوكيل الأعلى
+Flask REST API for Supreme Agent
+
+المؤلف / Author: wasalstor-web
+التاريخ / Date: 2025-10-20
+"""
+
+import sys
+import os
+from pathlib import Path
+
+# إضافة المسار للوحدات / Add path for modules
+sys.path.insert(0, str(Path(__file__).parent.parent))
+
+from flask import Flask, request, jsonify
+from flask_cors import CORS
+import logging
+import json
+from datetime import datetime
+from scripts.supreme_agent import SupremeAgent
+
+# إعداد Flask / Setup Flask
+app = Flask(__name__)
+CORS(app) # تفعيل CORS / Enable CORS
+
+# إعداد السجلات / Setup logging
+logging.basicConfig(
+ level=logging.INFO,
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
+)
+logger = logging.getLogger('SupremeAPI')
+
+# إنشاء الوكيل / Create agent instance
+agent = SupremeAgent(
+ model=os.getenv('SUPREME_MODEL', 'supreme-executor'),
+ ollama_host=os.getenv('OLLAMA_HOST', 'http://localhost:11434')
+)
+
+
+@app.route('/')
+def index():
+ """الصفحة الرئيسية / Home page"""
+ return jsonify({
+ "name": "Supreme Agent API",
+ "version": "1.0.0",
+ "description": "الوكيل الأعلى المتكامل / Supreme Integrated Agent",
+ "endpoints": {
+ "POST /api/chat": "محادثة / Chat",
+ "POST /api/execute": "تنفيذ أمر / Execute command",
+ "POST /api/analyze": "تحليل ملف / Analyze file",
+ "POST /api/generate-code": "توليد كود / Generate code",
+ "GET /api/models": "قائمة النماذج / List models",
+ "GET /api/health": "فحص الصحة / Health check"
+ },
+ "documentation": "/api/docs"
+ })
+
+
+@app.route('/api/docs')
+def docs():
+ """توثيق API / API Documentation"""
+ return jsonify({
+ "title": "Supreme Agent API Documentation",
+ "version": "1.0.0",
+ "endpoints": [
+ {
+ "path": "/api/chat",
+ "method": "POST",
+ "description": "محادثة مع الوكيل / Chat with agent",
+ "parameters": {
+ "message": "string - الرسالة / Message (required)",
+ "context": "string - سياق إضافي / Additional context (optional)"
+ },
+ "example": {
+ "message": "مرحباً، كيف حالك؟",
+ "context": "نحن نتحدث عن البرمجة"
+ }
+ },
+ {
+ "path": "/api/execute",
+ "method": "POST",
+ "description": "تنفيذ أمر / Execute command",
+ "parameters": {
+ "command": "string - الأمر / Command (required)"
+ },
+ "example": {
+ "command": "اكتب سكريبت لنسخ الملفات"
+ }
+ },
+ {
+ "path": "/api/analyze",
+ "method": "POST",
+ "description": "تحليل ملف / Analyze file",
+ "parameters": {
+ "filepath": "string - مسار الملف / File path (required)"
+ },
+ "example": {
+ "filepath": "/path/to/file.py"
+ }
+ },
+ {
+ "path": "/api/generate-code",
+ "method": "POST",
+ "description": "توليد كود / Generate code",
+ "parameters": {
+ "description": "string - وصف الكود / Code description (required)",
+ "language": "string - لغة البرمجة / Programming language (default: python)"
+ },
+ "example": {
+ "description": "برنامج حاسبة بسيط",
+ "language": "python"
+ }
+ },
+ {
+ "path": "/api/models",
+ "method": "GET",
+ "description": "قائمة النماذج المتاحة / List available models"
+ },
+ {
+ "path": "/api/health",
+ "method": "GET",
+ "description": "فحص صحة النظام / System health check"
+ }
+ ]
+ })
+
+
+@app.route('/api/chat', methods=['POST'])
+def chat():
+ """محادثة / Chat endpoint"""
+ try:
+ data = request.get_json()
+
+ if not data or 'message' not in data:
+ return jsonify({
+ "success": False,
+ "error": "الرسالة مطلوبة / Message required"
+ }), 400
+
+ message = data['message']
+ context = data.get('context')
+
+ response = agent.chat(message, context=context)
+
+ return jsonify({
+ "success": True,
+ "response": response,
+ "timestamp": datetime.now().isoformat()
+ })
+
+ except Exception as e:
+ logger.error(f"Chat error: {e}")
+ return jsonify({
+ "success": False,
+ "error": str(e)
+ }), 500
+
+
+@app.route('/api/execute', methods=['POST'])
+def execute():
+ """تنفيذ أمر / Execute command endpoint"""
+ try:
+ data = request.get_json()
+
+ if not data or 'command' not in data:
+ return jsonify({
+ "success": False,
+ "error": "الأمر مطلوب / Command required"
+ }), 400
+
+ command = data['command']
+ response = agent.execute(command)
+
+ return jsonify({
+ "success": True,
+ "response": response,
+ "timestamp": datetime.now().isoformat()
+ })
+
+ except Exception as e:
+ logger.error(f"Execute error: {e}")
+ return jsonify({
+ "success": False,
+ "error": str(e)
+ }), 500
+
+
+@app.route('/api/analyze', methods=['POST'])
+def analyze():
+ """تحليل ملف / Analyze file endpoint"""
+ try:
+ data = request.get_json()
+
+ if not data or 'filepath' not in data:
+ return jsonify({
+ "success": False,
+ "error": "مسار الملف مطلوب / Filepath required"
+ }), 400
+
+ filepath = data['filepath']
+ result = agent.analyze_file(filepath)
+
+ return jsonify(result)
+
+ except Exception as e:
+ logger.error(f"Analyze error: {e}")
+ return jsonify({
+ "success": False,
+ "error": str(e)
+ }), 500
+
+
+@app.route('/api/generate-code', methods=['POST'])
+def generate_code():
+ """توليد كود / Generate code endpoint"""
+ try:
+ data = request.get_json()
+
+ if not data or 'description' not in data:
+ return jsonify({
+ "success": False,
+ "error": "وصف الكود مطلوب / Description required"
+ }), 400
+
+ description = data['description']
+ language = data.get('language', 'python')
+
+ code = agent.generate_code(description, lang=language)
+
+ return jsonify({
+ "success": True,
+ "code": code,
+ "language": language,
+ "timestamp": datetime.now().isoformat()
+ })
+
+ except Exception as e:
+ logger.error(f"Generate code error: {e}")
+ return jsonify({
+ "success": False,
+ "error": str(e)
+ }), 500
+
+
+@app.route('/api/models', methods=['GET'])
+def models():
+ """قائمة النماذج / List models endpoint"""
+ try:
+ models_list = agent.get_models()
+
+ return jsonify({
+ "success": True,
+ "models": models_list,
+ "current_model": agent.model,
+ "timestamp": datetime.now().isoformat()
+ })
+
+ except Exception as e:
+ logger.error(f"Models error: {e}")
+ return jsonify({
+ "success": False,
+ "error": str(e)
+ }), 500
+
+
+@app.route('/api/health', methods=['GET'])
+def health():
+ """فحص الصحة / Health check endpoint"""
+ try:
+ health_status = agent.health_check()
+ return jsonify(health_status)
+
+ except Exception as e:
+ logger.error(f"Health check error: {e}")
+ return jsonify({
+ "status": "unhealthy",
+ "error": str(e),
+ "timestamp": datetime.now().isoformat()
+ }), 500
+
+
+@app.errorhandler(404)
+def not_found(error):
+ """معالج 404 / 404 handler"""
+ return jsonify({
+ "success": False,
+ "error": "الصفحة غير موجودة / Page not found"
+ }), 404
+
+
+@app.errorhandler(500)
+def internal_error(error):
+ """معالج 500 / 500 handler"""
+ return jsonify({
+ "success": False,
+ "error": "خطأ في الخادم / Internal server error"
+ }), 500
+
+
+if __name__ == '__main__':
+ # إعدادات الخادم / Server settings
+ host = os.getenv('API_HOST', '0.0.0.0')
+ port = int(os.getenv('API_PORT', 5000))
+ debug = os.getenv('API_DEBUG', 'False').lower() == 'true'
+
+ logger.info(f"Starting Supreme Agent API Server...")
+ logger.info(f"Host: {host}")
+ logger.info(f"Port: {port}")
+ logger.info(f"Debug: {debug}")
+ logger.info(f"Model: {agent.model}")
+ logger.info(f"Ollama: {agent.ollama_host}")
+
+ print("\n" + "="*60)
+ print(" Supreme Agent API Server")
+ print(" الوكيل الأعلى - خادم API")
+ print("="*60)
+ print(f"\n Server running at: http://{host}:{port}")
+ print(f" Documentation: http://{host}:{port}/api/docs")
+ print(f" Health check: http://{host}:{port}/api/health")
+ print("\n" + "="*60 + "\n")
+
+ app.run(host=host, port=port, debug=debug)
diff --git a/config/settings.json b/config/settings.json
new file mode 100644
index 0000000..2993877
--- /dev/null
+++ b/config/settings.json
@@ -0,0 +1,68 @@
+{
+ "agent": {
+ "default_model": "supreme-executor",
+ "fallback_model": "aya",
+ "temperature": 0.7,
+ "max_tokens": 4096,
+ "timeout": 120
+ },
+ "ollama": {
+ "host": "http://localhost:11434",
+ "timeout": 120,
+ "models": [
+ "llama3",
+ "aya",
+ "mistral",
+ "deepseek-coder",
+ "qwen2",
+ "supreme-executor"
+ ]
+ },
+ "openwebui": {
+ "enabled": true,
+ "port": 3000,
+ "integration": "full",
+ "sync_models": true
+ },
+ "api": {
+ "host": "0.0.0.0",
+ "port": 5000,
+ "cors": true,
+ "auth": false,
+ "rate_limit": {
+ "enabled": false,
+ "requests_per_minute": 60
+ }
+ },
+ "web": {
+ "port": 8080,
+ "theme": "auto",
+ "language": "auto",
+ "features": {
+ "code_editor": true,
+ "file_upload": true,
+ "history": true,
+ "export": true
+ }
+ },
+ "hostinger": {
+ "domain": "your-domain.com",
+ "ssl": true,
+ "nginx": true,
+ "paths": {
+ "web": "/public_html",
+ "api": "/opt/supreme-agent"
+ }
+ },
+ "logging": {
+ "level": "INFO",
+ "file": "supreme_agent.log",
+ "max_size": "10MB",
+ "backup_count": 5
+ },
+ "security": {
+ "api_key_required": false,
+ "allowed_origins": ["*"],
+ "max_file_size": "10MB"
+ }
+}
diff --git a/models/Modelfile b/models/Modelfile
new file mode 100644
index 0000000..831e4ce
--- /dev/null
+++ b/models/Modelfile
@@ -0,0 +1,76 @@
+# Supreme Executor - الوكيل الأعلى المتكامل
+# Supreme AI Agent with advanced capabilities for understanding, analyzing, and executing tasks
+
+FROM llama3
+
+# System prompt for Supreme Executor
+SYSTEM """
+أنت Supreme Executor - الوكيل الأعلى المتكامل. أنت نظام ذكاء اصطناعي متقدم يجمع بين قدرات متعددة:
+
+You are Supreme Executor - The Supreme Integrated Agent. You are an advanced AI system combining multiple capabilities:
+
+## قدراتك / Your Capabilities:
+
+1. **الفهم والتحليل / Understanding & Analysis**
+ - فهم عميق للنصوص العربية والإنجليزية
+ - Deep understanding of Arabic and English texts
+ - تحليل الملفات والأكواد والبيانات
+ - Analysis of files, code, and data
+
+2. **التنفيذ / Execution**
+ - تنفيذ الأوامر والمهام المعقدة
+ - Execute complex commands and tasks
+ - حل المشكلات بطريقة إبداعية
+ - Creative problem-solving
+
+3. **توليد الأكواد / Code Generation**
+ - كتابة أكواد بجميع لغات البرمجة
+ - Write code in all programming languages
+ - شرح الأكواد وتحسينها
+ - Explain and optimize code
+
+4. **المحادثة الذكية / Intelligent Conversation**
+ - محادثة طبيعية ومفيدة
+ - Natural and helpful conversation
+ - فهم السياق والتعلم المستمر
+ - Context understanding and continuous learning
+
+## مبادئك / Your Principles:
+
+- دقة عالية في الإجابات / High accuracy in responses
+- وضوح في الشرح / Clarity in explanations
+- احترافية في التعامل / Professionalism in interactions
+- مساعدة فعالة للمستخدمين / Effective user assistance
+- دعم كامل للغتين العربية والإنجليزية / Full bilingual support
+
+## تعليمات خاصة / Special Instructions:
+
+- اسأل للتوضيح عند الحاجة / Ask for clarification when needed
+- قدم خيارات متعددة عند الإمكان / Provide multiple options when possible
+- اشرح خطوات الحل بوضوح / Explain solution steps clearly
+- تحقق من صحة الأكواد قبل تقديمها / Verify code correctness before providing
+"""
+
+# Model parameters for optimal performance
+PARAMETER temperature 0.7
+PARAMETER top_p 0.9
+PARAMETER top_k 40
+PARAMETER num_ctx 4096
+PARAMETER num_predict 2048
+
+# Template for chat messages
+TEMPLATE """{{ if .System }}<|system|>
+{{ .System }}|system|>
+{{ end }}{{ if .Prompt }}<|user|>
+{{ .Prompt }}|user|>
+{{ end }}<|assistant|>
+{{ .Response }}<|end|>
+"""
+
+# License and attribution
+LICENSE """
+Supreme Executor Model
+Created for AI-Agent-Platform
+© 2025 wasalstor-web
+MIT License
+"""
diff --git a/scripts/install-supreme-agent.sh b/scripts/install-supreme-agent.sh
new file mode 100755
index 0000000..c12f287
--- /dev/null
+++ b/scripts/install-supreme-agent.sh
@@ -0,0 +1,271 @@
+#!/bin/bash
+
+################################################################################
+# Supreme Agent Installation Script
+# سكريبت تثبيت الوكيل الأعلى
+#
+# هذا السكريبت يقوم بتثبيت وإعداد Supreme Agent بالكامل
+# This script installs and configures Supreme Agent completely
+#
+# المؤلف / Author: wasalstor-web
+# التاريخ / Date: 2025-10-20
+################################################################################
+
+set -e # Exit on error
+
+# الألوان / Colors
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+NC='\033[0m' # No Color
+
+# الرموز / Symbols
+SUCCESS="✓"
+ERROR="✗"
+INFO="ℹ"
+ARROW="➜"
+
+# دالة للطباعة الملونة / Colored print function
+print_header() {
+ echo -e "\n${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
+ echo -e "${BLUE} $1${NC}"
+ echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}\n"
+}
+
+print_success() {
+ echo -e "${GREEN}${SUCCESS} $1${NC}"
+}
+
+print_error() {
+ echo -e "${RED}${ERROR} $1${NC}"
+}
+
+print_info() {
+ echo -e "${YELLOW}${INFO} $1${NC}"
+}
+
+print_step() {
+ echo -e "${BLUE}${ARROW} $1${NC}"
+}
+
+# التحقق من الصلاحيات / Check permissions
+check_sudo() {
+ if [ "$EUID" -ne 0 ]; then
+ print_info "بعض العمليات قد تحتاج صلاحيات sudo / Some operations may need sudo"
+ print_info "سيتم طلب كلمة المرور عند الحاجة / Password will be requested when needed"
+ fi
+}
+
+# تثبيت Ollama / Install Ollama
+install_ollama() {
+ print_step "تثبيت Ollama / Installing Ollama..."
+
+ if command -v ollama &> /dev/null; then
+ print_success "Ollama مثبت مسبقاً / Ollama already installed"
+ ollama --version
+ else
+ print_info "تحميل وتثبيت Ollama / Downloading and installing Ollama..."
+ curl -fsSL https://ollama.ai/install.sh | sh
+
+ if command -v ollama &> /dev/null; then
+ print_success "تم تثبيت Ollama بنجاح / Ollama installed successfully"
+ else
+ print_error "فشل تثبيت Ollama / Ollama installation failed"
+ exit 1
+ fi
+ fi
+
+ # تشغيل Ollama / Start Ollama
+ print_step "تشغيل خدمة Ollama / Starting Ollama service..."
+ if systemctl is-active --quiet ollama 2>/dev/null; then
+ print_success "Ollama قيد التشغيل / Ollama is running"
+ else
+ # محاولة تشغيل Ollama في الخلفية / Try to start Ollama in background
+ nohup ollama serve > /tmp/ollama.log 2>&1 &
+ sleep 3
+ print_success "تم تشغيل Ollama / Ollama started"
+ fi
+}
+
+# تحميل النماذج / Download models
+download_models() {
+ print_step "تحميل النماذج الأساسية / Downloading base models..."
+
+ local models=("llama3" "aya" "mistral" "deepseek-coder" "qwen2")
+
+ for model in "${models[@]}"; do
+ print_info "تحميل $model / Downloading $model..."
+ if ollama pull "$model" 2>/dev/null; then
+ print_success "تم تحميل $model / Downloaded $model"
+ else
+ print_error "فشل تحميل $model / Failed to download $model"
+ fi
+ done
+}
+
+# إنشاء النموذج المخصص / Create custom model
+create_supreme_model() {
+ print_step "إنشاء النموذج المخصص Supreme Executor / Creating Supreme Executor model..."
+
+ local modelfile_path="$(dirname "$0")/../models/Modelfile"
+
+ if [ -f "$modelfile_path" ]; then
+ if ollama create supreme-executor -f "$modelfile_path"; then
+ print_success "تم إنشاء supreme-executor بنجاح / Created supreme-executor successfully"
+ else
+ print_error "فشل إنشاء النموذج المخصص / Failed to create custom model"
+ fi
+ else
+ print_error "ملف Modelfile غير موجود / Modelfile not found"
+ print_info "المسار المتوقع / Expected path: $modelfile_path"
+ fi
+}
+
+# تثبيت Python dependencies
+install_python_deps() {
+ print_step "تثبيت مكتبات Python / Installing Python dependencies..."
+
+ # التحقق من Python / Check Python
+ if ! command -v python3 &> /dev/null; then
+ print_error "Python 3 غير مثبت / Python 3 not installed"
+ print_info "تثبيت Python 3 / Installing Python 3..."
+ sudo apt-get update
+ sudo apt-get install -y python3 python3-pip
+ fi
+
+ print_success "Python 3 متوفر / Python 3 available"
+ python3 --version
+
+ # تثبيت المكتبات / Install libraries
+ print_info "تثبيت المكتبات المطلوبة / Installing required libraries..."
+ pip3 install --user requests flask flask-cors
+
+ print_success "تم تثبيت مكتبات Python / Python libraries installed"
+}
+
+# إنشاء أمر supreme-agent / Create supreme-agent command
+create_command() {
+ print_step "إنشاء الأمر supreme-agent / Creating supreme-agent command..."
+
+ local script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+ local agent_script="$script_dir/supreme_agent.py"
+
+ # إنشاء symlink في /usr/local/bin
+ # Create symlink in /usr/local/bin
+ if [ -f "$agent_script" ]; then
+ sudo ln -sf "$agent_script" /usr/local/bin/supreme-agent
+ sudo chmod +x /usr/local/bin/supreme-agent
+ print_success "تم إنشاء الأمر supreme-agent / Created supreme-agent command"
+ print_info "يمكنك الآن استخدام: supreme-agent / You can now use: supreme-agent"
+ else
+ print_error "ملف supreme_agent.py غير موجود / supreme_agent.py not found"
+ fi
+}
+
+# اختبار النظام / Test system
+test_system() {
+ print_step "اختبار النظام / Testing system..."
+
+ # اختبار Ollama / Test Ollama
+ print_info "اختبار Ollama API / Testing Ollama API..."
+ if curl -s http://localhost:11434/api/tags >/dev/null 2>&1; then
+ print_success "Ollama API يعمل / Ollama API working"
+ else
+ print_error "Ollama API لا يستجيب / Ollama API not responding"
+ fi
+
+ # قائمة النماذج / List models
+ print_info "النماذج المتاحة / Available models:"
+ ollama list || true
+
+ # اختبار supreme-agent / Test supreme-agent
+ if command -v supreme-agent &> /dev/null; then
+ print_success "أمر supreme-agent جاهز / supreme-agent command ready"
+ print_info "تجربة: supreme-agent models / Try: supreme-agent models"
+ else
+ print_error "أمر supreme-agent غير متاح / supreme-agent command not available"
+ fi
+}
+
+# عرض المعلومات النهائية / Show final information
+show_final_info() {
+ print_header "التثبيت مكتمل / Installation Complete"
+
+ cat << EOF
+${GREEN}تم تثبيت Supreme Agent بنجاح! / Supreme Agent installed successfully!${NC}
+
+${YELLOW}الأوامر المتاحة / Available commands:${NC}
+
+ ${BLUE}supreme-agent chat${NC} "رسالتك / your message"
+ محادثة مع الوكيل / Chat with the agent
+
+ ${BLUE}supreme-agent execute${NC} "أمرك / your command"
+ تنفيذ أمر معين / Execute a specific command
+
+ ${BLUE}supreme-agent analyze-file${NC} path/to/file
+ تحليل ملف / Analyze a file
+
+ ${BLUE}supreme-agent generate-code${NC} "وصف الكود / code description" --lang python
+ توليد كود برمجي / Generate code
+
+ ${BLUE}supreme-agent health${NC}
+ فحص صحة النظام / Check system health
+
+ ${BLUE}supreme-agent models${NC}
+ عرض النماذج المتاحة / Show available models
+
+${YELLOW}الخطوات التالية / Next steps:${NC}
+
+ 1. ${BLUE}تشغيل API Server:${NC}
+ cd api && python3 server.py
+
+ 2. ${BLUE}فتح الواجهة:${NC}
+ افتح web/index.html في المتصفح
+ Open web/index.html in browser
+
+ 3. ${BLUE}التكامل مع OpenWebUI:${NC}
+ ./scripts/integrate-openwebui.sh
+
+${YELLOW}المزيد من المعلومات / More information:${NC}
+ - README.md
+ - docs/API.md
+ - docs/MODELS.md
+
+${GREEN}استمتع باستخدام Supreme Agent! / Enjoy using Supreme Agent!${NC}
+EOF
+}
+
+# البرنامج الرئيسي / Main program
+main() {
+ print_header "Supreme Agent Installation / تثبيت الوكيل الأعلى"
+
+ check_sudo
+
+ print_info "بدء عملية التثبيت / Starting installation process..."
+ echo
+
+ # الخطوات / Steps
+ install_ollama
+ echo
+
+ download_models
+ echo
+
+ create_supreme_model
+ echo
+
+ install_python_deps
+ echo
+
+ create_command
+ echo
+
+ test_system
+ echo
+
+ show_final_info
+}
+
+# تشغيل البرنامج / Run program
+main
diff --git a/scripts/supreme_agent.py b/scripts/supreme_agent.py
new file mode 100755
index 0000000..980f0f3
--- /dev/null
+++ b/scripts/supreme_agent.py
@@ -0,0 +1,453 @@
+#!/usr/bin/env python3
+"""
+Supreme Agent - الوكيل الأعلى المتكامل
+Supreme Integrated AI Agent
+
+قدرات الوكيل / Agent Capabilities:
+- تنفيذ الأوامر / Execute commands
+- تحليل الملفات / Analyze files
+- توليد أكواد / Generate code
+- فهم السياق / Understand context
+- تعلم مستمر / Continuous learning
+
+المؤلف / Author: wasalstor-web
+التاريخ / Date: 2025-10-20
+"""
+
+import sys
+import json
+import subprocess
+import logging
+from typing import Optional, Dict, Any, List
+from pathlib import Path
+import requests
+from datetime import datetime
+
+# إعداد نظام السجلات / Setup logging
+logging.basicConfig(
+ level=logging.INFO,
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
+ handlers=[
+ logging.FileHandler('supreme_agent.log'),
+ logging.StreamHandler(sys.stdout)
+ ]
+)
+logger = logging.getLogger('SupremeAgent')
+
+
+class SupremeAgent:
+ """
+ الوكيل الأعلى - نظام ذكاء اصطناعي متكامل
+ Supreme Agent - Integrated AI System
+ """
+
+ def __init__(self,
+ model: str = "supreme-executor",
+ ollama_host: str = "http://localhost:11434",
+ temperature: float = 0.7,
+ max_tokens: int = 4096):
+ """
+ تهيئة الوكيل / Initialize the agent
+
+ Args:
+ model: اسم النموذج / Model name
+ ollama_host: عنوان خادم Ollama / Ollama server URL
+ temperature: درجة الإبداع / Temperature for generation
+ max_tokens: أقصى عدد رموز / Maximum tokens
+ """
+ self.model = model
+ self.ollama_host = ollama_host
+ self.temperature = temperature
+ self.max_tokens = max_tokens
+ self.conversation_history: List[Dict[str, str]] = []
+
+ logger.info(f"Supreme Agent initialized with model: {model}")
+ logger.info(f"Ollama host: {ollama_host}")
+
+ def _call_ollama(self, prompt: str, system: Optional[str] = None) -> str:
+ """
+ استدعاء Ollama API / Call Ollama API
+
+ Args:
+ prompt: النص المدخل / Input prompt
+ system: رسالة النظام / System message
+
+ Returns:
+ str: استجابة النموذج / Model response
+ """
+ try:
+ url = f"{self.ollama_host}/api/generate"
+ payload = {
+ "model": self.model,
+ "prompt": prompt,
+ "stream": False,
+ "options": {
+ "temperature": self.temperature,
+ "num_predict": self.max_tokens
+ }
+ }
+
+ if system:
+ payload["system"] = system
+
+ logger.debug(f"Calling Ollama API: {url}")
+ response = requests.post(url, json=payload, timeout=120)
+ response.raise_for_status()
+
+ result = response.json()
+ return result.get('response', '')
+
+ except requests.exceptions.RequestException as e:
+ logger.error(f"Error calling Ollama API: {e}")
+ return f"خطأ في الاتصال بـ Ollama / Error connecting to Ollama: {e}"
+ except Exception as e:
+ logger.error(f"Unexpected error: {e}")
+ return f"خطأ غير متوقع / Unexpected error: {e}"
+
+ def execute(self, command: str) -> str:
+ """
+ تنفيذ أي أمر / Execute any command
+
+ Args:
+ command: الأمر المطلوب تنفيذه / Command to execute
+
+ Returns:
+ str: نتيجة التنفيذ / Execution result
+ """
+ logger.info(f"Executing command: {command}")
+
+ system_prompt = """أنت وكيل تنفيذي متقدم. قم بتحليل الأمر وتنفيذه بدقة.
+You are an advanced execution agent. Analyze and execute the command accurately.
+
+إذا كان الأمر يتطلب كود، قدم الكود الكامل والقابل للتشغيل.
+If the command requires code, provide complete, executable code.
+
+إذا كان الأمر غير واضح، اطلب التوضيح.
+If the command is unclear, ask for clarification."""
+
+ response = self._call_ollama(command, system=system_prompt)
+
+ # حفظ في السجل / Save to history
+ self.conversation_history.append({
+ "timestamp": datetime.now().isoformat(),
+ "type": "execute",
+ "command": command,
+ "response": response
+ })
+
+ return response
+
+ def analyze_file(self, filepath: str) -> Dict[str, Any]:
+ """
+ تحليل ملف / Analyze a file
+
+ Args:
+ filepath: مسار الملف / File path
+
+ Returns:
+ dict: نتائج التحليل / Analysis results
+ """
+ logger.info(f"Analyzing file: {filepath}")
+
+ try:
+ file_path = Path(filepath)
+
+ if not file_path.exists():
+ return {
+ "success": False,
+ "error": f"الملف غير موجود / File not found: {filepath}"
+ }
+
+ # قراءة محتوى الملف / Read file content
+ with open(file_path, 'r', encoding='utf-8') as f:
+ content = f.read()
+
+ # تحليل الملف باستخدام النموذج / Analyze using model
+ analysis_prompt = f"""قم بتحليل هذا الملف بشكل شامل:
+Analyze this file comprehensively:
+
+اسم الملف / Filename: {file_path.name}
+النوع / Type: {file_path.suffix}
+
+المحتوى / Content:
+{content[:2000]} # أول 2000 حرف / First 2000 characters
+
+قدم:
+Provide:
+1. ملخص المحتوى / Content summary
+2. اللغة المستخدمة / Language used
+3. الغرض من الملف / File purpose
+4. ملاحظات وتوصيات / Notes and recommendations
+"""
+
+ analysis = self._call_ollama(analysis_prompt)
+
+ result = {
+ "success": True,
+ "filename": file_path.name,
+ "filepath": str(file_path),
+ "size": file_path.stat().st_size,
+ "extension": file_path.suffix,
+ "analysis": analysis,
+ "timestamp": datetime.now().isoformat()
+ }
+
+ # حفظ في السجل / Save to history
+ self.conversation_history.append({
+ "timestamp": datetime.now().isoformat(),
+ "type": "analyze_file",
+ "filepath": filepath,
+ "result": result
+ })
+
+ return result
+
+ except Exception as e:
+ logger.error(f"Error analyzing file: {e}")
+ return {
+ "success": False,
+ "error": str(e)
+ }
+
+ def generate_code(self, description: str, lang: str = "python") -> str:
+ """
+ توليد كود برمجي / Generate code
+
+ Args:
+ description: وصف الكود المطلوب / Code description
+ lang: لغة البرمجة / Programming language
+
+ Returns:
+ str: الكود المولد / Generated code
+ """
+ logger.info(f"Generating {lang} code for: {description}")
+
+ code_prompt = f"""اكتب كود {lang} كامل وقابل للتشغيل بناءً على الوصف التالي:
+Write complete, executable {lang} code based on the following description:
+
+الوصف / Description: {description}
+
+متطلبات / Requirements:
+1. كود نظيف ومنظم / Clean and organized code
+2. تعليقات واضحة / Clear comments
+3. معالجة الأخطاء / Error handling
+4. أفضل الممارسات / Best practices
+
+قدم الكود فقط بدون شرح إضافي.
+Provide only the code without additional explanation."""
+
+ code = self._call_ollama(code_prompt)
+
+ # حفظ في السجل / Save to history
+ self.conversation_history.append({
+ "timestamp": datetime.now().isoformat(),
+ "type": "generate_code",
+ "language": lang,
+ "description": description,
+ "code": code
+ })
+
+ return code
+
+ def chat(self, message: str, context: Optional[str] = None) -> str:
+ """
+ محادثة ذكية / Intelligent chat
+
+ Args:
+ message: الرسالة / Message
+ context: سياق إضافي / Additional context
+
+ Returns:
+ str: الرد / Response
+ """
+ logger.info(f"Chat message: {message[:100]}...")
+
+ # بناء السياق من المحادثات السابقة / Build context from history
+ history_context = ""
+ if self.conversation_history:
+ recent = self.conversation_history[-3:] # آخر 3 تفاعلات / Last 3 interactions
+ history_context = "\n".join([
+ f"{h.get('type', 'chat')}: {str(h)[:200]}"
+ for h in recent
+ ])
+
+ full_message = message
+ if context:
+ full_message = f"{context}\n\n{message}"
+
+ if history_context:
+ full_message = f"السياق السابق / Previous context:\n{history_context}\n\n{full_message}"
+
+ response = self._call_ollama(full_message)
+
+ # حفظ في السجل / Save to history
+ self.conversation_history.append({
+ "timestamp": datetime.now().isoformat(),
+ "type": "chat",
+ "message": message,
+ "response": response
+ })
+
+ return response
+
+ def get_models(self) -> List[str]:
+ """
+ الحصول على قائمة النماذج المتاحة / Get available models
+
+ Returns:
+ list: قائمة النماذج / List of models
+ """
+ try:
+ url = f"{self.ollama_host}/api/tags"
+ response = requests.get(url, timeout=10)
+ response.raise_for_status()
+
+ data = response.json()
+ models = [model['name'] for model in data.get('models', [])]
+ logger.info(f"Available models: {models}")
+ return models
+
+ except Exception as e:
+ logger.error(f"Error getting models: {e}")
+ return []
+
+ def health_check(self) -> Dict[str, Any]:
+ """
+ فحص صحة النظام / System health check
+
+ Returns:
+ dict: حالة النظام / System status
+ """
+ try:
+ # فحص اتصال Ollama / Check Ollama connection
+ url = f"{self.ollama_host}/api/tags"
+ response = requests.get(url, timeout=5)
+ ollama_status = response.status_code == 200
+
+ # فحص النموذج / Check model
+ models = self.get_models()
+ model_available = self.model in models
+
+ return {
+ "status": "healthy" if (ollama_status and model_available) else "unhealthy",
+ "ollama_connected": ollama_status,
+ "model_available": model_available,
+ "current_model": self.model,
+ "available_models": models,
+ "conversation_history_size": len(self.conversation_history),
+ "timestamp": datetime.now().isoformat()
+ }
+
+ except Exception as e:
+ logger.error(f"Health check error: {e}")
+ return {
+ "status": "unhealthy",
+ "error": str(e),
+ "timestamp": datetime.now().isoformat()
+ }
+
+ def save_history(self, filepath: str = "conversation_history.json"):
+ """
+ حفظ سجل المحادثات / Save conversation history
+
+ Args:
+ filepath: مسار الملف / File path
+ """
+ try:
+ with open(filepath, 'w', encoding='utf-8') as f:
+ json.dump(self.conversation_history, f, ensure_ascii=False, indent=2)
+ logger.info(f"History saved to {filepath}")
+ except Exception as e:
+ logger.error(f"Error saving history: {e}")
+
+ def load_history(self, filepath: str = "conversation_history.json"):
+ """
+ تحميل سجل المحادثات / Load conversation history
+
+ Args:
+ filepath: مسار الملف / File path
+ """
+ try:
+ with open(filepath, 'r', encoding='utf-8') as f:
+ self.conversation_history = json.load(f)
+ logger.info(f"History loaded from {filepath}")
+ except Exception as e:
+ logger.error(f"Error loading history: {e}")
+
+
+def main():
+ """برنامج سطر الأوامر / Command line interface"""
+ import argparse
+
+ parser = argparse.ArgumentParser(
+ description='Supreme Agent - الوكيل الأعلى المتكامل',
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ epilog="""
+أمثلة / Examples:
+ supreme-agent chat "مرحباً، كيف حالك؟"
+ supreme-agent execute "اكتب سكريبت لنسخ الملفات"
+ supreme-agent analyze-file script.py
+ supreme-agent generate-code "برنامج حاسبة بسيط" --lang python
+ supreme-agent health
+ """
+ )
+
+ parser.add_argument('command',
+ choices=['chat', 'execute', 'analyze-file', 'generate-code', 'health', 'models'],
+ help='الأمر المطلوب / Command to execute')
+ parser.add_argument('input', nargs='?',
+ help='المدخل / Input (message, file path, or description)')
+ parser.add_argument('--lang', default='python',
+ help='لغة البرمجة / Programming language (for generate-code)')
+ parser.add_argument('--model', default='supreme-executor',
+ help='النموذج المستخدم / Model to use')
+ parser.add_argument('--host', default='http://localhost:11434',
+ help='عنوان Ollama / Ollama host')
+
+ args = parser.parse_args()
+
+ # إنشاء الوكيل / Create agent
+ agent = SupremeAgent(model=args.model, ollama_host=args.host)
+
+ # تنفيذ الأمر / Execute command
+ if args.command == 'chat':
+ if not args.input:
+ print("خطأ: الرجاء إدخال الرسالة / Error: Please provide a message")
+ sys.exit(1)
+ result = agent.chat(args.input)
+ print(result)
+
+ elif args.command == 'execute':
+ if not args.input:
+ print("خطأ: الرجاء إدخال الأمر / Error: Please provide a command")
+ sys.exit(1)
+ result = agent.execute(args.input)
+ print(result)
+
+ elif args.command == 'analyze-file':
+ if not args.input:
+ print("خطأ: الرجاء إدخال مسار الملف / Error: Please provide a file path")
+ sys.exit(1)
+ result = agent.analyze_file(args.input)
+ print(json.dumps(result, ensure_ascii=False, indent=2))
+
+ elif args.command == 'generate-code':
+ if not args.input:
+ print("خطأ: الرجاء إدخال وصف الكود / Error: Please provide a code description")
+ sys.exit(1)
+ result = agent.generate_code(args.input, lang=args.lang)
+ print(result)
+
+ elif args.command == 'health':
+ result = agent.health_check()
+ print(json.dumps(result, ensure_ascii=False, indent=2))
+
+ elif args.command == 'models':
+ models = agent.get_models()
+ print("النماذج المتاحة / Available models:")
+ for model in models:
+ print(f" - {model}")
+
+
+if __name__ == "__main__":
+ main()
From 72bdcdd8a1f29692d8fca53e8ce30b022214770c Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Mon, 20 Oct 2025 07:21:23 +0000
Subject: [PATCH 3/7] Add web interface and integration scripts for Supreme
Agent
Co-authored-by: wasalstor-web <230709381+wasalstor-web@users.noreply.github.com>
---
scripts/integrate-openwebui.sh | 180 +++++++++
scripts/quick-start.sh | 163 ++++++++
web/app.js | 575 ++++++++++++++++++++++++++++
web/index.html | 254 +++++++++++++
web/style.css | 664 +++++++++++++++++++++++++++++++++
5 files changed, 1836 insertions(+)
create mode 100755 scripts/integrate-openwebui.sh
create mode 100755 scripts/quick-start.sh
create mode 100644 web/app.js
create mode 100644 web/index.html
create mode 100644 web/style.css
diff --git a/scripts/integrate-openwebui.sh b/scripts/integrate-openwebui.sh
new file mode 100755
index 0000000..1d0e2b3
--- /dev/null
+++ b/scripts/integrate-openwebui.sh
@@ -0,0 +1,180 @@
+#!/bin/bash
+
+################################################################################
+# OpenWebUI Integration Script
+# سكريبت التكامل مع OpenWebUI
+#
+# هذا السكريبت يقوم بربط Supreme Agent مع OpenWebUI
+# This script integrates Supreme Agent with OpenWebUI
+#
+# المؤلف / Author: wasalstor-web
+# التاريخ / Date: 2025-10-20
+################################################################################
+
+set -e
+
+# الألوان / Colors
+GREEN='\033[0;32m'
+BLUE='\033[0;34m'
+YELLOW='\033[1;33m'
+RED='\033[0;31m'
+NC='\033[0m'
+
+# دالة للطباعة / Print function
+print_header() {
+ echo -e "\n${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
+ echo -e "${BLUE} $1${NC}"
+ echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}\n"
+}
+
+print_success() {
+ echo -e "${GREEN}✓ $1${NC}"
+}
+
+print_info() {
+ echo -e "${YELLOW}ℹ $1${NC}"
+}
+
+print_error() {
+ echo -e "${RED}✗ $1${NC}"
+}
+
+print_header "Supreme Agent & OpenWebUI Integration / التكامل مع OpenWebUI"
+
+# التحقق من Ollama / Check Ollama
+print_info "التحقق من Ollama / Checking Ollama..."
+if ! command -v ollama &> /dev/null; then
+ print_error "Ollama غير مثبت / Ollama not installed"
+ print_info "قم بتشغيل ./scripts/install-supreme-agent.sh أولاً"
+ print_info "Run ./scripts/install-supreme-agent.sh first"
+ exit 1
+fi
+print_success "Ollama مثبت / Ollama installed"
+
+# التحقق من OpenWebUI / Check OpenWebUI
+print_info "التحقق من OpenWebUI / Checking OpenWebUI..."
+if command -v docker &> /dev/null && docker ps | grep -q openwebui; then
+ print_success "OpenWebUI يعمل / OpenWebUI is running"
+else
+ print_error "OpenWebUI غير مثبت أو لا يعمل / OpenWebUI not installed or not running"
+ print_info "قم بتشغيل ./setup-openwebui.sh أولاً"
+ print_info "Run ./setup-openwebui.sh first"
+ exit 1
+fi
+
+# مزامنة النماذج / Sync models
+print_info "مزامنة النماذج / Syncing models..."
+ollama list
+
+# التحقق من النموذج المخصص / Check custom model
+if ollama list | grep -q "supreme-executor"; then
+ print_success "النموذج المخصص متاح / Custom model available"
+else
+ print_info "إنشاء النموذج المخصص / Creating custom model..."
+ cd "$(dirname "$0")/.."
+ if [ -f "models/Modelfile" ]; then
+ ollama create supreme-executor -f models/Modelfile
+ print_success "تم إنشاء النموذج المخصص / Custom model created"
+ else
+ print_error "ملف Modelfile غير موجود / Modelfile not found"
+ exit 1
+ fi
+fi
+
+# إنشاء ملف تكوين مشترك / Create shared configuration
+print_info "إنشاء ملف التكوين / Creating configuration file..."
+
+cat > /tmp/supreme-openwebui-config.json << 'EOF'
+{
+ "integration": {
+ "type": "full",
+ "ollama_host": "http://localhost:11434",
+ "openwebui_port": 3000,
+ "supreme_api_port": 5000,
+ "models": {
+ "supreme-executor": "النموذج الأعلى المتكامل / Supreme Integrated Model",
+ "llama3": "نموذج أساسي / Base Model",
+ "aya": "نموذج عربي / Arabic Model",
+ "mistral": "نموذج سريع / Fast Model",
+ "deepseek-coder": "نموذج برمجة / Coding Model",
+ "qwen2": "نموذج متقدم / Advanced Model"
+ },
+ "features": {
+ "chat": true,
+ "execute": true,
+ "analyze": true,
+ "generate_code": true
+ }
+ }
+}
+EOF
+
+print_success "تم إنشاء ملف التكوين / Configuration file created"
+
+# اختبار التكامل / Test integration
+print_info "اختبار التكامل / Testing integration..."
+
+# اختبار Ollama API / Test Ollama API
+if curl -s http://localhost:11434/api/tags > /dev/null; then
+ print_success "Ollama API يعمل / Ollama API working"
+else
+ print_error "Ollama API لا يستجيب / Ollama API not responding"
+fi
+
+# اختبار OpenWebUI / Test OpenWebUI
+if curl -s http://localhost:3000 > /dev/null; then
+ print_success "OpenWebUI يعمل / OpenWebUI working"
+else
+ print_error "OpenWebUI لا يستجيب / OpenWebUI not responding"
+fi
+
+# عرض معلومات الوصول / Show access information
+print_header "معلومات الوصول / Access Information"
+
+cat << EOF
+${GREEN}التكامل مكتمل! / Integration Complete!${NC}
+
+${YELLOW}روابط الوصول / Access URLs:${NC}
+
+ ${BLUE}OpenWebUI:${NC}
+ http://localhost:3000
+
+ ${BLUE}Supreme Agent API:${NC}
+ http://localhost:5000
+ http://localhost:5000/api/docs
+
+ ${BLUE}Supreme Agent Web UI:${NC}
+ file://$(pwd)/web/index.html
+ أو / or: python3 -m http.server 8080 (في مجلد web)
+
+${YELLOW}النماذج المتاحة / Available Models:${NC}
+ • supreme-executor - الوكيل الأعلى / Supreme Agent
+ • llama3 - نموذج أساسي / Base model
+ • aya - نموذج عربي / Arabic model
+ • mistral - نموذج سريع / Fast model
+ • deepseek-coder - نموذج برمجة / Coding model
+ • qwen2 - نموذج متقدم / Advanced model
+
+${YELLOW}الاستخدام / Usage:${NC}
+
+ 1. ${BLUE}في OpenWebUI:${NC}
+ - افتح http://localhost:3000
+ - اختر النموذج supreme-executor
+ - ابدأ المحادثة
+
+ 2. ${BLUE}في واجهة Supreme Agent:${NC}
+ - افتح web/index.html
+ - استخدم جميع الميزات المتقدمة
+
+ 3. ${BLUE}عبر API:${NC}
+ curl -X POST http://localhost:5000/api/chat \\
+ -H "Content-Type: application/json" \\
+ -d '{"message": "مرحباً"}'
+
+${YELLOW}إعدادات إضافية / Additional Settings:${NC}
+ - ملف التكوين: /tmp/supreme-openwebui-config.json
+ - السجلات: ./supreme_agent.log
+ - السجل: يتم حفظه في المتصفح localStorage
+
+${GREEN}استمتع باستخدام Supreme Agent! / Enjoy using Supreme Agent!${NC}
+EOF
diff --git a/scripts/quick-start.sh b/scripts/quick-start.sh
new file mode 100755
index 0000000..e289c91
--- /dev/null
+++ b/scripts/quick-start.sh
@@ -0,0 +1,163 @@
+#!/bin/bash
+
+################################################################################
+# Quick Start Script - سكريبت البدء السريع
+# Supreme Agent - One Command Installation
+#
+# المؤلف / Author: wasalstor-web
+# التاريخ / Date: 2025-10-20
+################################################################################
+
+set -e
+
+# الألوان / Colors
+GREEN='\033[0;32m'
+BLUE='\033[0;34m'
+YELLOW='\033[1;33m'
+NC='\033[0m'
+
+echo -e "${BLUE}"
+cat << "EOF"
+╔═══════════════════════════════════════════════════════════╗
+║ ║
+║ ███████╗██╗ ██╗██████╗ ██████╗ ███████╗███╗ ███╗ ║
+║ ██╔════╝██║ ██║██╔══██╗██╔══██╗██╔════╝████╗ ████║ ║
+║ ███████╗██║ ██║██████╔╝██████╔╝█████╗ ██╔████╔██║ ║
+║ ╚════██║██║ ██║██╔═══╝ ██╔══██╗██╔══╝ ██║╚██╔╝██║ ║
+║ ███████║╚██████╔╝██║ ██║ ██║███████╗██║ ╚═╝ ██║ ║
+║ ╚══════╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝ ║
+║ ║
+║ AGENT - الوكيل الأعلى المتكامل ║
+║ Quick Start / بدء سريع ║
+║ ║
+╚═══════════════════════════════════════════════════════════╝
+EOF
+echo -e "${NC}\n"
+
+echo -e "${YELLOW}مرحباً! سيتم تثبيت Supreme Agent بالكامل.${NC}"
+echo -e "${YELLOW}Welcome! Supreme Agent will be fully installed.${NC}\n"
+
+# الانتقال إلى مجلد المشروع / Navigate to project directory
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
+cd "$PROJECT_DIR"
+
+echo -e "${GREEN}مجلد المشروع / Project directory: $PROJECT_DIR${NC}\n"
+
+# الخطوة 1: تثبيت Supreme Agent
+echo -e "${BLUE}[1/4] تثبيت Supreme Agent / Installing Supreme Agent...${NC}"
+if [ -f "scripts/install-supreme-agent.sh" ]; then
+ bash scripts/install-supreme-agent.sh
+ echo -e "${GREEN}✓ تم التثبيت / Installed${NC}\n"
+else
+ echo -e "${YELLOW}⚠ سكريبت التثبيت غير موجود / Installation script not found${NC}\n"
+fi
+
+# الخطوة 2: تشغيل API Server
+echo -e "${BLUE}[2/4] تشغيل API Server / Starting API Server...${NC}"
+if [ -f "api/server.py" ]; then
+ # تشغيل في الخلفية / Run in background
+ nohup python3 api/server.py > /tmp/supreme-api.log 2>&1 &
+ API_PID=$!
+ echo $API_PID > /tmp/supreme-api.pid
+ sleep 3
+
+ if ps -p $API_PID > /dev/null; then
+ echo -e "${GREEN}✓ API Server يعمل / API Server running (PID: $API_PID)${NC}"
+ echo -e "${GREEN} Log: /tmp/supreme-api.log${NC}\n"
+ else
+ echo -e "${YELLOW}⚠ فشل تشغيل API Server / Failed to start API Server${NC}\n"
+ fi
+else
+ echo -e "${YELLOW}⚠ API Server غير موجود / API Server not found${NC}\n"
+fi
+
+# الخطوة 3: تشغيل Web Interface
+echo -e "${BLUE}[3/4] تشغيل واجهة الويب / Starting Web Interface...${NC}"
+if [ -d "web" ]; then
+ cd web
+ # تشغيل خادم HTTP بسيط / Start simple HTTP server
+ nohup python3 -m http.server 8080 > /tmp/supreme-web.log 2>&1 &
+ WEB_PID=$!
+ echo $WEB_PID > /tmp/supreme-web.pid
+ cd ..
+ sleep 2
+
+ if ps -p $WEB_PID > /dev/null; then
+ echo -e "${GREEN}✓ واجهة الويب تعمل / Web Interface running (PID: $WEB_PID)${NC}"
+ echo -e "${GREEN} Log: /tmp/supreme-web.log${NC}\n"
+ else
+ echo -e "${YELLOW}⚠ فشل تشغيل واجهة الويب / Failed to start Web Interface${NC}\n"
+ fi
+else
+ echo -e "${YELLOW}⚠ مجلد web غير موجود / web directory not found${NC}\n"
+fi
+
+# الخطوة 4: التكامل مع OpenWebUI (اختياري)
+echo -e "${BLUE}[4/4] التكامل مع OpenWebUI (اختياري) / OpenWebUI Integration (optional)${NC}"
+if [ -f "scripts/integrate-openwebui.sh" ]; then
+ read -p "هل تريد التكامل مع OpenWebUI؟ / Integrate with OpenWebUI? (y/n): " -n 1 -r
+ echo
+ if [[ $REPLY =~ ^[Yy]$ ]]; then
+ bash scripts/integrate-openwebui.sh || true
+ else
+ echo -e "${YELLOW}تم تخطي التكامل / Integration skipped${NC}"
+ fi
+else
+ echo -e "${YELLOW}⚠ سكريبت التكامل غير موجود / Integration script not found${NC}"
+fi
+
+echo -e "\n${GREEN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
+echo -e "${GREEN} التثبيت مكتمل! / Installation Complete! ${NC}"
+echo -e "${GREEN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}\n"
+
+cat << EOF
+${YELLOW}روابط الوصول / Access URLs:${NC}
+
+ ${BLUE}1. واجهة الويب / Web Interface:${NC}
+ http://localhost:8080
+
+ ${BLUE}2. API Server:${NC}
+ http://localhost:5000
+ http://localhost:5000/api/docs
+
+ ${BLUE}3. OpenWebUI (إذا تم التثبيت):${NC}
+ http://localhost:3000
+
+${YELLOW}الأوامر المتاحة / Available Commands:${NC}
+
+ ${BLUE}supreme-agent chat${NC} "رسالتك"
+ ${BLUE}supreme-agent execute${NC} "أمرك"
+ ${BLUE}supreme-agent analyze-file${NC} path/to/file
+ ${BLUE}supreme-agent generate-code${NC} "وصف" --lang python
+ ${BLUE}supreme-agent health${NC}
+ ${BLUE}supreme-agent models${NC}
+
+${YELLOW}إيقاف الخوادم / Stop Servers:${NC}
+
+ ${BLUE}# إيقاف API Server${NC}
+ kill \$(cat /tmp/supreme-api.pid 2>/dev/null) 2>/dev/null || true
+
+ ${BLUE}# إيقاف واجهة الويب${NC}
+ kill \$(cat /tmp/supreme-web.pid 2>/dev/null) 2>/dev/null || true
+
+${YELLOW}السجلات / Logs:${NC}
+
+ ${BLUE}# API Server logs${NC}
+ tail -f /tmp/supreme-api.log
+
+ ${BLUE}# Web Interface logs${NC}
+ tail -f /tmp/supreme-web.log
+
+ ${BLUE}# Agent logs${NC}
+ tail -f supreme_agent.log
+
+${YELLOW}مزيد من المعلومات / More Information:${NC}
+
+ • README.md - دليل شامل / Complete guide
+ • docs/API.md - توثيق API / API documentation
+ • docs/MODELS.md - شرح النماذج / Models explanation
+
+${GREEN}استمتع باستخدام Supreme Agent! 🚀${NC}
+${GREEN}Enjoy using Supreme Agent! 🚀${NC}
+EOF
diff --git a/web/app.js b/web/app.js
new file mode 100644
index 0000000..ca6452f
--- /dev/null
+++ b/web/app.js
@@ -0,0 +1,575 @@
+/**
+ * Supreme Agent - Web Interface JavaScript
+ * الوكيل الأعلى - واجهة الويب
+ *
+ * Author: wasalstor-web
+ * Date: 2025-10-20
+ */
+
+// Configuration / الإعدادات
+let config = {
+ apiUrl: localStorage.getItem('apiUrl') || 'http://localhost:5000',
+ defaultModel: localStorage.getItem('defaultModel') || 'supreme-executor',
+ temperature: parseFloat(localStorage.getItem('temperature')) || 0.7,
+ language: localStorage.getItem('language') || 'ar',
+ theme: localStorage.getItem('theme') || 'light'
+};
+
+// State / الحالة
+let state = {
+ currentTab: 'chat',
+ conversationHistory: JSON.parse(localStorage.getItem('conversationHistory')) || [],
+ isConnected: false
+};
+
+// Initialize / التهيئة
+document.addEventListener('DOMContentLoaded', () => {
+ initializeTheme();
+ initializeLanguage();
+ initializeTabs();
+ initializeEventListeners();
+ checkHealth();
+ loadHistory();
+});
+
+// Theme Management / إدارة الوضع
+function initializeTheme() {
+ document.documentElement.setAttribute('data-theme', config.theme);
+ updateThemeIcon();
+}
+
+function toggleTheme() {
+ config.theme = config.theme === 'light' ? 'dark' : 'light';
+ localStorage.setItem('theme', config.theme);
+ initializeTheme();
+}
+
+function updateThemeIcon() {
+ const icon = document.querySelector('#themeToggle .icon');
+ icon.textContent = config.theme === 'light' ? '🌙' : '☀️';
+}
+
+// Language Management / إدارة اللغة
+function initializeLanguage() {
+ document.documentElement.setAttribute('dir', config.language === 'ar' ? 'rtl' : 'ltr');
+ document.documentElement.setAttribute('lang', config.language);
+ updateLanguageTexts();
+}
+
+function toggleLanguage() {
+ config.language = config.language === 'ar' ? 'en' : 'ar';
+ localStorage.setItem('language', config.language);
+ initializeLanguage();
+}
+
+function updateLanguageTexts() {
+ const elements = document.querySelectorAll('[data-ar][data-en]');
+ elements.forEach(el => {
+ const text = config.language === 'ar' ? el.getAttribute('data-ar') : el.getAttribute('data-en');
+ if (el.placeholder !== undefined) {
+ el.placeholder = text;
+ } else {
+ el.textContent = text;
+ }
+ });
+}
+
+// Tab Management / إدارة التبويبات
+function initializeTabs() {
+ const navItems = document.querySelectorAll('.nav-item');
+ navItems.forEach(item => {
+ item.addEventListener('click', () => {
+ const tab = item.getAttribute('data-tab');
+ switchTab(tab);
+ });
+ });
+}
+
+function switchTab(tabName) {
+ // Update navigation
+ document.querySelectorAll('.nav-item').forEach(item => {
+ item.classList.remove('active');
+ if (item.getAttribute('data-tab') === tabName) {
+ item.classList.add('active');
+ }
+ });
+
+ // Update content
+ document.querySelectorAll('.tab-content').forEach(content => {
+ content.classList.remove('active');
+ if (content.id === `tab-${tabName}`) {
+ content.classList.add('active');
+ }
+ });
+
+ state.currentTab = tabName;
+
+ // Load data for specific tabs
+ if (tabName === 'models') {
+ loadModels();
+ } else if (tabName === 'history') {
+ displayHistory();
+ }
+}
+
+// Event Listeners / مستمعي الأحداث
+function initializeEventListeners() {
+ // Theme toggle
+ document.getElementById('themeToggle').addEventListener('click', toggleTheme);
+
+ // Language toggle
+ document.getElementById('langToggle').addEventListener('click', toggleLanguage);
+
+ // Settings
+ document.getElementById('settingsBtn').addEventListener('click', openSettings);
+ document.querySelector('.modal-close').addEventListener('click', closeSettings);
+ document.getElementById('saveSettingsBtn').addEventListener('click', saveSettings);
+
+ // Chat
+ document.getElementById('chatSendBtn').addEventListener('click', sendChatMessage);
+ document.getElementById('chatInput').addEventListener('keypress', (e) => {
+ if (e.key === 'Enter' && !e.shiftKey) {
+ e.preventDefault();
+ sendChatMessage();
+ }
+ });
+
+ // Execute
+ document.getElementById('executeBtn').addEventListener('click', executeCommand);
+
+ // Analyze
+ document.getElementById('analyzeBtn').addEventListener('click', analyzeFile);
+ document.getElementById('analyzeFileUpload').addEventListener('change', handleFileUpload);
+
+ // Generate
+ document.getElementById('generateBtn').addEventListener('click', generateCode);
+
+ // Models
+ document.getElementById('refreshModelsBtn').addEventListener('click', loadModels);
+
+ // History
+ document.getElementById('exportHistoryBtn').addEventListener('click', exportHistory);
+ document.getElementById('clearHistoryBtn').addEventListener('click', clearHistory);
+
+ // Temperature slider
+ document.getElementById('temperature').addEventListener('input', (e) => {
+ document.getElementById('temperatureValue').textContent = e.target.value;
+ });
+}
+
+// API Functions / دوال API
+async function apiCall(endpoint, method = 'GET', data = null) {
+ try {
+ const options = {
+ method,
+ headers: {
+ 'Content-Type': 'application/json'
+ }
+ };
+
+ if (data) {
+ options.body = JSON.stringify(data);
+ }
+
+ const response = await fetch(`${config.apiUrl}${endpoint}`, options);
+ const result = await response.json();
+
+ return result;
+ } catch (error) {
+ console.error('API call error:', error);
+ showNotification('خطأ في الاتصال / Connection error', 'error');
+ return { success: false, error: error.message };
+ }
+}
+
+// Health Check / فحص الصحة
+async function checkHealth() {
+ const result = await apiCall('/api/health');
+
+ if (result.status === 'healthy') {
+ state.isConnected = true;
+ updateStatus('متصل / Connected', true);
+ } else {
+ state.isConnected = false;
+ updateStatus('غير متصل / Disconnected', false);
+ }
+}
+
+function updateStatus(text, connected) {
+ const statusText = document.getElementById('statusText');
+ const statusDot = document.getElementById('statusDot');
+
+ statusText.textContent = text;
+
+ if (connected) {
+ statusDot.classList.add('connected');
+ } else {
+ statusDot.classList.remove('connected');
+ }
+}
+
+// Chat Functions / دوال المحادثة
+function sendQuickMessage(message) {
+ document.getElementById('chatInput').value = message;
+ sendChatMessage();
+}
+
+async function sendChatMessage() {
+ const input = document.getElementById('chatInput');
+ const message = input.value.trim();
+
+ if (!message) return;
+
+ // Add user message
+ addMessage(message, 'user');
+
+ // Clear input
+ input.value = '';
+
+ // Show typing indicator
+ const typingId = addMessage('...', 'agent', true);
+
+ // Call API
+ const result = await apiCall('/api/chat', 'POST', { message });
+
+ // Remove typing indicator
+ removeMessage(typingId);
+
+ // Add agent response
+ if (result.success) {
+ addMessage(result.response, 'agent');
+
+ // Save to history
+ saveToHistory('chat', message, result.response);
+ } else {
+ addMessage(`خطأ: ${result.error}`, 'agent');
+ }
+}
+
+function addMessage(text, sender, isTyping = false) {
+ const messagesDiv = document.getElementById('chatMessages');
+ const messageId = `msg-${Date.now()}`;
+
+ // Remove welcome message if exists
+ const welcomeMsg = messagesDiv.querySelector('.welcome-message');
+ if (welcomeMsg) {
+ welcomeMsg.remove();
+ }
+
+ const messageDiv = document.createElement('div');
+ messageDiv.className = `message ${sender}`;
+ messageDiv.id = messageId;
+
+ const avatar = sender === 'user' ? '👤' : '🤖';
+
+ messageDiv.innerHTML = `
+
${avatar}
+ ${escapeHtml(text)}
+ `;
+
+ messagesDiv.appendChild(messageDiv);
+ messagesDiv.scrollTop = messagesDiv.scrollHeight;
+
+ return messageId;
+}
+
+function removeMessage(messageId) {
+ const message = document.getElementById(messageId);
+ if (message) {
+ message.remove();
+ }
+}
+
+// Execute Functions / دوال التنفيذ
+async function executeCommand() {
+ const input = document.getElementById('executeInput');
+ const resultDiv = document.getElementById('executeResult');
+ const command = input.value.trim();
+
+ if (!command) {
+ showNotification('الرجاء إدخال أمر / Please enter a command', 'warning');
+ return;
+ }
+
+ resultDiv.innerHTML = '';
+ resultDiv.classList.add('show');
+
+ const result = await apiCall('/api/execute', 'POST', { command });
+
+ if (result.success) {
+ resultDiv.innerHTML = `${escapeHtml(result.response)}`;
+ saveToHistory('execute', command, result.response);
+ } else {
+ resultDiv.innerHTML = `خطأ: ${result.error}
`;
+ }
+}
+
+// Analyze Functions / دوال التحليل
+async function analyzeFile() {
+ const filepath = document.getElementById('analyzeFilepath').value.trim();
+ const resultDiv = document.getElementById('analyzeResult');
+
+ if (!filepath) {
+ showNotification('الرجاء إدخال مسار الملف / Please enter file path', 'warning');
+ return;
+ }
+
+ resultDiv.innerHTML = '';
+ resultDiv.classList.add('show');
+
+ const result = await apiCall('/api/analyze', 'POST', { filepath });
+
+ if (result.success) {
+ const html = `
+ تحليل الملف / File Analysis
+ الاسم / Name: ${result.filename}
+ الحجم / Size: ${formatBytes(result.size)}
+ النوع / Type: ${result.extension}
+
+
التحليل / Analysis:
+
${escapeHtml(result.analysis)}
+
+ `;
+ resultDiv.innerHTML = html;
+ saveToHistory('analyze', filepath, result.analysis);
+ } else {
+ resultDiv.innerHTML = `خطأ: ${result.error}
`;
+ }
+}
+
+function handleFileUpload(event) {
+ const file = event.target.files[0];
+ if (file) {
+ document.getElementById('analyzeFilepath').value = file.name;
+ showNotification(`تم اختيار: ${file.name}`, 'success');
+ }
+}
+
+// Generate Code Functions / دوال توليد الكود
+async function generateCode() {
+ const description = document.getElementById('generateInput').value.trim();
+ const language = document.getElementById('codeLanguage').value;
+ const resultDiv = document.getElementById('generateResult');
+
+ if (!description) {
+ showNotification('الرجاء إدخال وصف الكود / Please enter code description', 'warning');
+ return;
+ }
+
+ resultDiv.innerHTML = '';
+ resultDiv.classList.add('show');
+
+ const result = await apiCall('/api/generate-code', 'POST', { description, language });
+
+ if (result.success) {
+ const html = `
+
+
كود ${language} مولد / Generated ${language} Code
+
+
+ ${escapeHtml(result.code)}
+ `;
+ resultDiv.innerHTML = html;
+ saveToHistory('generate', `${language}: ${description}`, result.code);
+ } else {
+ resultDiv.innerHTML = `خطأ: ${result.error}
`;
+ }
+}
+
+function copyCode(button) {
+ const code = button.parentElement.nextElementSibling.textContent;
+ navigator.clipboard.writeText(code);
+ showNotification('تم النسخ / Copied', 'success');
+}
+
+// Models Functions / دوال النماذج
+async function loadModels() {
+ const listDiv = document.getElementById('modelsList');
+ listDiv.innerHTML = '';
+
+ const result = await apiCall('/api/models');
+
+ if (result.success) {
+ if (result.models.length === 0) {
+ listDiv.innerHTML = 'لا توجد نماذج متاحة / No models available
';
+ return;
+ }
+
+ const html = result.models.map(model => `
+
+
${model}
+
${model === result.current_model ? '✓ ' : ''}${model === result.current_model ? 'النموذج الحالي / Current' : ''}
+
+ `).join('');
+
+ listDiv.innerHTML = html;
+ } else {
+ listDiv.innerHTML = `خطأ: ${result.error}
`;
+ }
+}
+
+// History Functions / دوال السجل
+function saveToHistory(type, input, output) {
+ const entry = {
+ id: Date.now(),
+ type,
+ input,
+ output,
+ timestamp: new Date().toISOString()
+ };
+
+ state.conversationHistory.unshift(entry);
+
+ // Keep only last 100 entries
+ if (state.conversationHistory.length > 100) {
+ state.conversationHistory = state.conversationHistory.slice(0, 100);
+ }
+
+ localStorage.setItem('conversationHistory', JSON.stringify(state.conversationHistory));
+}
+
+function loadHistory() {
+ state.conversationHistory = JSON.parse(localStorage.getItem('conversationHistory')) || [];
+}
+
+function displayHistory() {
+ const listDiv = document.getElementById('historyList');
+
+ if (state.conversationHistory.length === 0) {
+ listDiv.innerHTML = 'لا يوجد سجل / No history
';
+ return;
+ }
+
+ const html = state.conversationHistory.map(entry => {
+ const date = new Date(entry.timestamp).toLocaleString(config.language === 'ar' ? 'ar-SA' : 'en-US');
+ const typeIcons = { chat: '💬', execute: '⚡', analyze: '📊', generate: '💻' };
+
+ return `
+
+
+ ${typeIcons[entry.type]} ${entry.type}
+ ${date}
+
+
+ ${escapeHtml(entry.input.substring(0, 100))}
+
+
+ `;
+ }).join('');
+
+ listDiv.innerHTML = html;
+}
+
+function viewHistoryItem(id) {
+ const entry = state.conversationHistory.find(e => e.id === id);
+ if (!entry) return;
+
+ // Switch to appropriate tab and populate
+ switchTab(entry.type === 'chat' ? 'chat' : entry.type);
+
+ // Show entry details
+ alert(`${entry.type}\n\nInput: ${entry.input}\n\nOutput: ${entry.output.substring(0, 200)}...`);
+}
+
+function exportHistory() {
+ const dataStr = JSON.stringify(state.conversationHistory, null, 2);
+ const dataBlob = new Blob([dataStr], { type: 'application/json' });
+ const url = URL.createObjectURL(dataBlob);
+
+ const link = document.createElement('a');
+ link.href = url;
+ link.download = `supreme-agent-history-${Date.now()}.json`;
+ link.click();
+
+ URL.revokeObjectURL(url);
+ showNotification('تم التصدير / Exported', 'success');
+}
+
+function clearHistory() {
+ if (confirm('هل أنت متأكد من مسح السجل؟ / Are you sure you want to clear history?')) {
+ state.conversationHistory = [];
+ localStorage.removeItem('conversationHistory');
+ displayHistory();
+ showNotification('تم مسح السجل / History cleared', 'success');
+ }
+}
+
+// Settings Functions / دوال الإعدادات
+function openSettings() {
+ document.getElementById('settingsModal').classList.add('show');
+ document.getElementById('apiUrl').value = config.apiUrl;
+ document.getElementById('defaultModel').value = config.defaultModel;
+ document.getElementById('temperature').value = config.temperature;
+ document.getElementById('temperatureValue').textContent = config.temperature;
+}
+
+function closeSettings() {
+ document.getElementById('settingsModal').classList.remove('show');
+}
+
+function saveSettings() {
+ config.apiUrl = document.getElementById('apiUrl').value;
+ config.defaultModel = document.getElementById('defaultModel').value;
+ config.temperature = parseFloat(document.getElementById('temperature').value);
+
+ localStorage.setItem('apiUrl', config.apiUrl);
+ localStorage.setItem('defaultModel', config.defaultModel);
+ localStorage.setItem('temperature', config.temperature);
+
+ closeSettings();
+ showNotification('تم حفظ الإعدادات / Settings saved', 'success');
+ checkHealth();
+}
+
+// Utility Functions / دوال مساعدة
+function escapeHtml(text) {
+ const div = document.createElement('div');
+ div.textContent = text;
+ return div.innerHTML;
+}
+
+function formatBytes(bytes) {
+ if (bytes === 0) return '0 Bytes';
+ const k = 1024;
+ const sizes = ['Bytes', 'KB', 'MB', 'GB'];
+ const i = Math.floor(Math.log(bytes) / Math.log(k));
+ return Math.round(bytes / Math.pow(k, i) * 100) / 100 + ' ' + sizes[i];
+}
+
+function showNotification(message, type = 'info') {
+ // Simple notification - could be enhanced with a library
+ const colors = {
+ success: 'var(--success)',
+ error: 'var(--error)',
+ warning: 'var(--warning)',
+ info: 'var(--accent-primary)'
+ };
+
+ const notification = document.createElement('div');
+ notification.style.cssText = `
+ position: fixed;
+ top: 20px;
+ right: 20px;
+ padding: 1rem 1.5rem;
+ background: ${colors[type]};
+ color: white;
+ border-radius: 0.5rem;
+ box-shadow: 0 4px 12px rgba(0,0,0,0.2);
+ z-index: 10000;
+ animation: slideIn 0.3s ease;
+ `;
+ notification.textContent = message;
+
+ document.body.appendChild(notification);
+
+ setTimeout(() => {
+ notification.style.animation = 'fadeOut 0.3s ease';
+ setTimeout(() => notification.remove(), 300);
+ }, 3000);
+}
+
+// Auto-check health every 30 seconds
+setInterval(checkHealth, 30000);
diff --git a/web/index.html b/web/index.html
new file mode 100644
index 0000000..0ad38e8
--- /dev/null
+++ b/web/index.html
@@ -0,0 +1,254 @@
+
+
+
+
+
+ Supreme Agent - الوكيل الأعلى المتكامل
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
مرحباً بك في الوكيل الأعلى
+
+ مساعدك الذكي للمحادثة والتحليل وتوليد الأكواد والمزيد.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
تنفيذ أمر
+
+ أخبر الوكيل بما تريد فعله وسيقوم بتنفيذه.
+
+
+
+
+
+
+
+
+
+
+
تحليل ملف
+
+ ارفع ملف أو حدد مساره لتحليله.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
توليد كود
+
+ صف الكود الذي تحتاجه وسيقوم الوكيل بتوليده.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
النماذج المتاحة
+
+ إدارة وعرض نماذج الذكاء الاصطناعي المتاحة.
+
+
+
+
+
+
+
+
+
+
سجل المحادثات
+
+ عرض وإدارة سجل تفاعلاتك.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/web/style.css b/web/style.css
new file mode 100644
index 0000000..00a175d
--- /dev/null
+++ b/web/style.css
@@ -0,0 +1,664 @@
+/* Supreme Agent - Modern Web Interface Styles
+ * الوكيل الأعلى - تصميم واجهة ويب حديثة
+ * Author: wasalstor-web
+ * Date: 2025-10-20
+ */
+
+/* CSS Variables / المتغيرات */
+:root {
+ /* Colors - Light Mode */
+ --bg-primary: #f8f9fa;
+ --bg-secondary: #ffffff;
+ --bg-tertiary: #e9ecef;
+ --text-primary: #212529;
+ --text-secondary: #6c757d;
+ --accent-primary: #4f46e5;
+ --accent-secondary: #7c3aed;
+ --accent-gradient: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
+ --success: #10b981;
+ --warning: #f59e0b;
+ --error: #ef4444;
+ --border-color: #dee2e6;
+ --shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
+ --shadow-lg: 0 10px 30px rgba(0, 0, 0, 0.15);
+
+ /* Spacing */
+ --spacing-xs: 0.5rem;
+ --spacing-sm: 1rem;
+ --spacing-md: 1.5rem;
+ --spacing-lg: 2rem;
+ --spacing-xl: 3rem;
+
+ /* Border Radius */
+ --radius-sm: 0.375rem;
+ --radius-md: 0.5rem;
+ --radius-lg: 1rem;
+ --radius-xl: 1.5rem;
+
+ /* Transitions */
+ --transition: all 0.3s ease;
+}
+
+/* Dark Mode */
+[data-theme="dark"] {
+ --bg-primary: #1a1a1a;
+ --bg-secondary: #2d2d2d;
+ --bg-tertiary: #3a3a3a;
+ --text-primary: #f8f9fa;
+ --text-secondary: #adb5bd;
+ --border-color: #495057;
+ --shadow: 0 2px 8px rgba(0, 0, 0, 0.3);
+ --shadow-lg: 0 10px 30px rgba(0, 0, 0, 0.5);
+}
+
+/* Reset & Base Styles */
+* {
+ margin: 0;
+ padding: 0;
+ box-sizing: border-box;
+}
+
+body {
+ font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Cairo', 'Arial', sans-serif;
+ background: var(--bg-primary);
+ color: var(--text-primary);
+ line-height: 1.6;
+ transition: var(--transition);
+}
+
+/* RTL Support */
+[dir="rtl"] {
+ direction: rtl;
+}
+
+[dir="ltr"] {
+ direction: ltr;
+}
+
+/* Header / الترويسة */
+.header {
+ background: var(--bg-secondary);
+ border-bottom: 1px solid var(--border-color);
+ box-shadow: var(--shadow);
+ position: sticky;
+ top: 0;
+ z-index: 100;
+}
+
+.header-content {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ padding: var(--spacing-sm) 0;
+}
+
+.logo {
+ display: flex;
+ align-items: center;
+ gap: var(--spacing-sm);
+}
+
+.logo-icon {
+ font-size: 2rem;
+}
+
+.logo-text {
+ font-size: 1.5rem;
+ font-weight: 700;
+ background: var(--accent-gradient);
+ -webkit-background-clip: text;
+ -webkit-text-fill-color: transparent;
+ background-clip: text;
+}
+
+.header-actions {
+ display: flex;
+ gap: var(--spacing-sm);
+}
+
+/* Container */
+.container {
+ max-width: 1400px;
+ margin: 0 auto;
+ padding: 0 var(--spacing-md);
+}
+
+.main-container {
+ display: grid;
+ grid-template-columns: 250px 1fr;
+ gap: var(--spacing-lg);
+ padding-top: var(--spacing-lg);
+ padding-bottom: var(--spacing-lg);
+ min-height: calc(100vh - 80px);
+}
+
+/* Sidebar / الشريط الجانبي */
+.sidebar {
+ background: var(--bg-secondary);
+ border-radius: var(--radius-lg);
+ padding: var(--spacing-md);
+ box-shadow: var(--shadow);
+ height: fit-content;
+ position: sticky;
+ top: 100px;
+}
+
+.sidebar-nav {
+ display: flex;
+ flex-direction: column;
+ gap: var(--spacing-xs);
+}
+
+.nav-item {
+ display: flex;
+ align-items: center;
+ gap: var(--spacing-sm);
+ padding: var(--spacing-sm);
+ border: none;
+ background: transparent;
+ color: var(--text-secondary);
+ font-size: 1rem;
+ border-radius: var(--radius-md);
+ cursor: pointer;
+ transition: var(--transition);
+ text-align: start;
+}
+
+.nav-item:hover {
+ background: var(--bg-tertiary);
+ color: var(--text-primary);
+}
+
+.nav-item.active {
+ background: var(--accent-gradient);
+ color: white;
+}
+
+.nav-item .icon {
+ font-size: 1.25rem;
+}
+
+.sidebar-footer {
+ margin-top: var(--spacing-lg);
+ padding-top: var(--spacing-md);
+ border-top: 1px solid var(--border-color);
+}
+
+.status-indicator {
+ display: flex;
+ align-items: center;
+ gap: var(--spacing-xs);
+ font-size: 0.875rem;
+ color: var(--text-secondary);
+}
+
+.status-dot {
+ width: 8px;
+ height: 8px;
+ border-radius: 50%;
+ background: var(--error);
+ animation: pulse 2s infinite;
+}
+
+.status-dot.connected {
+ background: var(--success);
+}
+
+@keyframes pulse {
+ 0%, 100% { opacity: 1; }
+ 50% { opacity: 0.5; }
+}
+
+/* Content Area */
+.content {
+ background: var(--bg-secondary);
+ border-radius: var(--radius-lg);
+ box-shadow: var(--shadow);
+ min-height: 600px;
+}
+
+.tab-content {
+ display: none;
+ animation: fadeIn 0.3s ease;
+}
+
+.tab-content.active {
+ display: block;
+}
+
+@keyframes fadeIn {
+ from { opacity: 0; transform: translateY(10px); }
+ to { opacity: 1; transform: translateY(0); }
+}
+
+/* Panel */
+.panel {
+ padding: var(--spacing-lg);
+}
+
+.panel h2 {
+ margin-bottom: var(--spacing-sm);
+ color: var(--text-primary);
+}
+
+.panel p {
+ color: var(--text-secondary);
+ margin-bottom: var(--spacing-lg);
+}
+
+/* Chat Interface */
+.chat-container {
+ display: flex;
+ flex-direction: column;
+ height: calc(100vh - 200px);
+}
+
+.messages {
+ flex: 1;
+ overflow-y: auto;
+ padding: var(--spacing-lg);
+ display: flex;
+ flex-direction: column;
+ gap: var(--spacing-md);
+}
+
+.welcome-message {
+ text-align: center;
+ padding: var(--spacing-xl);
+}
+
+.welcome-message h2 {
+ margin-bottom: var(--spacing-sm);
+ background: var(--accent-gradient);
+ -webkit-background-clip: text;
+ -webkit-text-fill-color: transparent;
+ background-clip: text;
+}
+
+.quick-actions {
+ display: flex;
+ gap: var(--spacing-sm);
+ justify-content: center;
+ flex-wrap: wrap;
+ margin-top: var(--spacing-lg);
+}
+
+.quick-action {
+ padding: var(--spacing-sm) var(--spacing-md);
+ border: 1px solid var(--border-color);
+ background: var(--bg-tertiary);
+ border-radius: var(--radius-lg);
+ cursor: pointer;
+ transition: var(--transition);
+ font-size: 0.9rem;
+}
+
+.quick-action:hover {
+ background: var(--accent-gradient);
+ color: white;
+ border-color: transparent;
+ transform: translateY(-2px);
+}
+
+.message {
+ display: flex;
+ gap: var(--spacing-sm);
+ max-width: 80%;
+ animation: slideIn 0.3s ease;
+}
+
+@keyframes slideIn {
+ from { opacity: 0; transform: translateX(20px); }
+ to { opacity: 1; transform: translateX(0); }
+}
+
+.message.user {
+ align-self: flex-end;
+ flex-direction: row-reverse;
+}
+
+.message-avatar {
+ width: 40px;
+ height: 40px;
+ border-radius: 50%;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ font-size: 1.5rem;
+ flex-shrink: 0;
+}
+
+.message.user .message-avatar {
+ background: var(--accent-gradient);
+}
+
+.message.agent .message-avatar {
+ background: var(--bg-tertiary);
+}
+
+.message-content {
+ background: var(--bg-tertiary);
+ padding: var(--spacing-sm) var(--spacing-md);
+ border-radius: var(--radius-lg);
+ word-wrap: break-word;
+}
+
+.message.user .message-content {
+ background: var(--accent-gradient);
+ color: white;
+}
+
+.chat-input-container {
+ padding: var(--spacing-md);
+ border-top: 1px solid var(--border-color);
+ display: flex;
+ gap: var(--spacing-sm);
+}
+
+.chat-input {
+ flex: 1;
+ padding: var(--spacing-sm);
+ border: 1px solid var(--border-color);
+ border-radius: var(--radius-md);
+ background: var(--bg-primary);
+ color: var(--text-primary);
+ resize: vertical;
+ font-family: inherit;
+}
+
+/* Buttons */
+.btn-send, .btn-primary, .btn-secondary, .btn-danger, .btn-icon {
+ padding: var(--spacing-sm) var(--spacing-md);
+ border: none;
+ border-radius: var(--radius-md);
+ cursor: pointer;
+ font-size: 1rem;
+ transition: var(--transition);
+ display: flex;
+ align-items: center;
+ gap: var(--spacing-xs);
+ font-weight: 500;
+}
+
+.btn-primary, .btn-send {
+ background: var(--accent-gradient);
+ color: white;
+}
+
+.btn-primary:hover, .btn-send:hover {
+ transform: translateY(-2px);
+ box-shadow: var(--shadow-lg);
+}
+
+.btn-secondary {
+ background: var(--bg-tertiary);
+ color: var(--text-primary);
+}
+
+.btn-secondary:hover {
+ background: var(--border-color);
+}
+
+.btn-danger {
+ background: var(--error);
+ color: white;
+}
+
+.btn-danger:hover {
+ background: #dc2626;
+}
+
+.btn-icon {
+ padding: var(--spacing-xs);
+ background: transparent;
+ color: var(--text-secondary);
+}
+
+.btn-icon:hover {
+ background: var(--bg-tertiary);
+ color: var(--text-primary);
+}
+
+/* Forms */
+.command-input, .text-input, .select-input {
+ width: 100%;
+ padding: var(--spacing-sm);
+ border: 1px solid var(--border-color);
+ border-radius: var(--radius-md);
+ background: var(--bg-primary);
+ color: var(--text-primary);
+ font-family: inherit;
+ font-size: 1rem;
+ margin-bottom: var(--spacing-md);
+}
+
+.command-input {
+ resize: vertical;
+ min-height: 120px;
+ font-family: 'Courier New', monospace;
+}
+
+.form-group {
+ margin-bottom: var(--spacing-md);
+}
+
+.form-group label {
+ display: block;
+ margin-bottom: var(--spacing-xs);
+ color: var(--text-secondary);
+ font-weight: 500;
+}
+
+.file-input-group {
+ display: flex;
+ gap: var(--spacing-sm);
+ margin-bottom: var(--spacing-md);
+}
+
+.file-input {
+ display: none;
+}
+
+/* Results */
+.result-container {
+ margin-top: var(--spacing-lg);
+ padding: var(--spacing-md);
+ background: var(--bg-primary);
+ border-radius: var(--radius-md);
+ border: 1px solid var(--border-color);
+ display: none;
+}
+
+.result-container.show {
+ display: block;
+ animation: fadeIn 0.3s ease;
+}
+
+.result-container pre {
+ background: var(--bg-tertiary);
+ padding: var(--spacing-md);
+ border-radius: var(--radius-sm);
+ overflow-x: auto;
+ font-size: 0.9rem;
+}
+
+/* Models List */
+.models-list {
+ margin-top: var(--spacing-md);
+ display: grid;
+ grid-template-columns: repeat(auto-fill, minmax(250px, 1fr));
+ gap: var(--spacing-md);
+}
+
+.model-card {
+ padding: var(--spacing-md);
+ background: var(--bg-primary);
+ border: 1px solid var(--border-color);
+ border-radius: var(--radius-md);
+ transition: var(--transition);
+}
+
+.model-card:hover {
+ border-color: var(--accent-primary);
+ transform: translateY(-2px);
+ box-shadow: var(--shadow);
+}
+
+.model-card h3 {
+ margin-bottom: var(--spacing-xs);
+ color: var(--accent-primary);
+}
+
+/* History */
+.history-actions {
+ display: flex;
+ gap: var(--spacing-sm);
+ margin-bottom: var(--spacing-md);
+}
+
+.history-list {
+ display: flex;
+ flex-direction: column;
+ gap: var(--spacing-sm);
+}
+
+.history-item {
+ padding: var(--spacing-md);
+ background: var(--bg-primary);
+ border: 1px solid var(--border-color);
+ border-radius: var(--radius-md);
+ cursor: pointer;
+ transition: var(--transition);
+}
+
+.history-item:hover {
+ border-color: var(--accent-primary);
+ box-shadow: var(--shadow);
+}
+
+/* Modal */
+.modal {
+ display: none;
+ position: fixed;
+ top: 0;
+ left: 0;
+ width: 100%;
+ height: 100%;
+ background: rgba(0, 0, 0, 0.5);
+ z-index: 1000;
+ align-items: center;
+ justify-content: center;
+}
+
+.modal.show {
+ display: flex;
+}
+
+.modal-content {
+ background: var(--bg-secondary);
+ border-radius: var(--radius-lg);
+ max-width: 500px;
+ width: 90%;
+ max-height: 90vh;
+ overflow-y: auto;
+ box-shadow: var(--shadow-lg);
+ animation: modalSlide 0.3s ease;
+}
+
+@keyframes modalSlide {
+ from { transform: translateY(-50px); opacity: 0; }
+ to { transform: translateY(0); opacity: 1; }
+}
+
+.modal-header {
+ padding: var(--spacing-md);
+ border-bottom: 1px solid var(--border-color);
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+}
+
+.modal-close {
+ background: none;
+ border: none;
+ font-size: 2rem;
+ cursor: pointer;
+ color: var(--text-secondary);
+ line-height: 1;
+}
+
+.modal-body {
+ padding: var(--spacing-lg);
+}
+
+.modal-footer {
+ padding: var(--spacing-md);
+ border-top: 1px solid var(--border-color);
+ display: flex;
+ justify-content: flex-end;
+ gap: var(--spacing-sm);
+}
+
+/* Loading Spinner */
+.loading {
+ display: inline-block;
+ width: 20px;
+ height: 20px;
+ border: 3px solid var(--border-color);
+ border-top-color: var(--accent-primary);
+ border-radius: 50%;
+ animation: spin 1s linear infinite;
+}
+
+@keyframes spin {
+ to { transform: rotate(360deg); }
+}
+
+/* Responsive Design */
+@media (max-width: 768px) {
+ .main-container {
+ grid-template-columns: 1fr;
+ }
+
+ .sidebar {
+ position: static;
+ margin-bottom: var(--spacing-md);
+ }
+
+ .sidebar-nav {
+ flex-direction: row;
+ overflow-x: auto;
+ gap: var(--spacing-xs);
+ }
+
+ .nav-item .text {
+ display: none;
+ }
+
+ .message {
+ max-width: 90%;
+ }
+
+ .models-list {
+ grid-template-columns: 1fr;
+ }
+}
+
+/* Scrollbar Styling */
+::-webkit-scrollbar {
+ width: 8px;
+ height: 8px;
+}
+
+::-webkit-scrollbar-track {
+ background: var(--bg-tertiary);
+}
+
+::-webkit-scrollbar-thumb {
+ background: var(--border-color);
+ border-radius: var(--radius-sm);
+}
+
+::-webkit-scrollbar-thumb:hover {
+ background: var(--text-secondary);
+}
From 44d4421f6a18ac02356df558c9af74497b2625ec Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Mon, 20 Oct 2025 07:25:53 +0000
Subject: [PATCH 4/7] Add documentation, Docker support, and update README for
Supreme Agent
Co-authored-by: wasalstor-web <230709381+wasalstor-web@users.noreply.github.com>
---
Dockerfile | 57 +++++
README.md | 297 +++++++++++++++++++++-
docker-compose.yml | 69 ++++++
docker-entrypoint.sh | 67 +++++
docs/API.md | 577 +++++++++++++++++++++++++++++++++++++++++++
docs/MODELS.md | 433 ++++++++++++++++++++++++++++++++
6 files changed, 1497 insertions(+), 3 deletions(-)
create mode 100644 Dockerfile
create mode 100644 docker-compose.yml
create mode 100755 docker-entrypoint.sh
create mode 100644 docs/API.md
create mode 100644 docs/MODELS.md
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..107cbad
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,57 @@
+FROM python:3.11-slim
+
+# Metadata
+LABEL maintainer="wasalstor-web"
+LABEL description="Supreme Agent - Integrated AI Agent Platform"
+LABEL version="1.0.0"
+
+# Set working directory
+WORKDIR /app
+
+# Install system dependencies
+RUN apt-get update && apt-get install -y \
+ curl \
+ bash \
+ && rm -rf /var/lib/apt/lists/*
+
+# Install Ollama
+RUN curl -fsSL https://ollama.ai/install.sh | sh
+
+# Copy project files
+COPY . /app/
+
+# Install Python dependencies
+RUN pip install --no-cache-dir \
+ requests \
+ flask \
+ flask-cors
+
+# Create directories
+RUN mkdir -p /app/logs /app/data
+
+# Make scripts executable
+RUN chmod +x /app/scripts/*.sh /app/scripts/*.py /app/api/server.py
+
+# Expose ports
+# 5000 - API Server
+# 8080 - Web Interface
+# 11434 - Ollama
+# 3000 - OpenWebUI (optional)
+EXPOSE 5000 8080 11434 3000
+
+# Environment variables
+ENV OLLAMA_HOST=http://localhost:11434
+ENV API_HOST=0.0.0.0
+ENV API_PORT=5000
+ENV WEB_PORT=8080
+
+# Health check
+HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
+ CMD curl -f http://localhost:5000/api/health || exit 1
+
+# Start script
+COPY docker-entrypoint.sh /usr/local/bin/
+RUN chmod +x /usr/local/bin/docker-entrypoint.sh
+
+ENTRYPOINT ["docker-entrypoint.sh"]
+CMD ["api"]
diff --git a/README.md b/README.md
index 7d86b6d..dbca06d 100644
--- a/README.md
+++ b/README.md
@@ -1,17 +1,179 @@
# AI-Agent-Platform
-An AI Agent Platform infrastructure project with automated finalization capabilities and OpenWebUI integration.
+An AI Agent Platform infrastructure project with **Supreme Agent** - a comprehensive, bilingual AI assistant with advanced capabilities for chat, command execution, file analysis, and code generation.
## 🚀 Quick Start
-**The platform is live and accessible at:**
+### Online Demo
**🌐 [https://wasalstor-web.github.io/AI-Agent-Platform/](https://wasalstor-web.github.io/AI-Agent-Platform/)**
+### One-Command Installation
+```bash
+./scripts/quick-start.sh
+```
+
+This will install Supreme Agent, start the API server, and launch the web interface.
+
For complete deployment information, see **[DEPLOYMENT.md](DEPLOYMENT.md)**.
+## 🤖 Supreme Agent
+
+Supreme Agent (الوكيل الأعلى) is an integrated AI system that combines multiple capabilities:
+
+- 💬 **Intelligent Chat**: Natural bilingual (Arabic/English) conversations
+- ⚡ **Command Execution**: Execute any task or command
+- 📊 **File Analysis**: Comprehensive file and code analysis
+- 💻 **Code Generation**: Generate code in any programming language
+- 🎯 **Model Management**: Support for multiple AI models (llama3, aya, mistral, deepseek-coder, qwen2)
+- 🌐 **Modern Web UI**: Beautiful, responsive interface with dark/light themes
+- 🔗 **OpenWebUI Integration**: Seamless integration with OpenWebUI
+- 🐳 **Docker Support**: Easy deployment with Docker and Docker Compose
+
+### Quick Usage
+
+```bash
+# Chat with the agent
+supreme-agent chat "مرحباً! كيف يمكنني مساعدتك؟"
+
+# Execute a command
+supreme-agent execute "Create a Python script for data processing"
+
+# Analyze a file
+supreme-agent analyze-file script.py
+
+# Generate code
+supreme-agent generate-code "A REST API for user management" --lang python
+
+# Check system health
+supreme-agent health
+
+# List available models
+supreme-agent models
+```
+
+### Web Interface
+
+Open the modern web interface:
+```bash
+# Navigate to web directory
+cd web
+
+# Start web server
+python3 -m http.server 8080
+
+# Open http://localhost:8080 in your browser
+```
+
+The web interface includes:
+- Interactive chat with quick actions
+- Command execution panel
+- File analysis with upload support
+- Code generation with syntax highlighting
+- Model management dashboard
+- Conversation history with export
+- Settings with theme and language toggle
+
+### API Server
+
+Start the API server:
+```bash
+python3 api/server.py
+```
+
+Access the API at:
+- Main: http://localhost:5000
+- Docs: http://localhost:5000/api/docs
+- Health: http://localhost:5000/api/health
+
+See [docs/API.md](docs/API.md) for complete API documentation.
+
+### Docker Deployment
+
+```bash
+# Build and run with Docker Compose
+docker-compose up -d
+
+# With OpenWebUI
+docker-compose --profile with-openwebui up -d
+
+# Access services:
+# - Supreme Agent API: http://localhost:5000
+# - Web Interface: http://localhost:8080
+# - OpenWebUI: http://localhost:3000
+```
+
## Overview
-This project provides a platform for building, deploying, and managing AI agents with built-in project lifecycle management tools and OpenWebUI integration for running large language models.
+This project provides a comprehensive platform for building, deploying, and managing AI agents with built-in project lifecycle management tools and OpenWebUI integration for running large language models.
+
+## 🎯 Supreme Agent Features
+
+### Core Capabilities
+
+1. **Bilingual AI Assistant** (عربي/English)
+ - Natural conversations in Arabic and English
+ - Context-aware responses
+ - Continuous learning from interactions
+
+2. **Command Execution**
+ - Execute complex tasks and commands
+ - Generate scripts and automation tools
+ - Problem-solving assistance
+
+3. **File Analysis**
+ - Comprehensive code analysis
+ - Documentation review
+ - Security scanning
+ - Performance recommendations
+
+4. **Code Generation**
+ - Support for 20+ programming languages
+ - Clean, documented, production-ready code
+ - Best practices implementation
+ - Error handling and validation
+
+5. **Model Management**
+ - Multiple AI model support
+ - Custom model creation
+ - Model comparison and selection
+ - Performance optimization
+
+### Supported Models
+
+- **supreme-executor**: Custom bilingual model (Arabic/English) ⭐
+- **llama3**: General-purpose foundation model
+- **aya**: Multilingual specialist with excellent Arabic support
+- **mistral**: Fast and efficient model
+- **deepseek-coder**: Programming specialist
+- **qwen2**: Advanced model with long context
+
+See [docs/MODELS.md](docs/MODELS.md) for detailed model comparison and usage guide.
+
+### Architecture
+
+```
+┌─────────────────────────────────────────────────┐
+│ Supreme Agent │
+│ │
+│ ┌──────────┐ ┌──────────┐ ┌──────────┐ │
+│ │ Web │ │ API │ │ CLI Tool │ │
+│ │ UI │ │ Server │ │ │ │
+│ └────┬─────┘ └────┬─────┘ └────┬─────┘ │
+│ │ │ │ │
+│ └─────────────┴─────────────┘ │
+│ │ │
+│ ┌────────▼────────┐ │
+│ │ Supreme Agent │ │
+│ │ Core Class │ │
+│ └────────┬────────┘ │
+│ │ │
+│ ┌────────▼────────┐ │
+│ │ Ollama │ │
+│ │ (AI Models) │ │
+│ └─────────────────┘ │
+│ │
+└─────────────────────────────────────────────────┘
+```
## Web Interface
@@ -55,12 +217,141 @@ The web interface includes:
## Deployment Status
+✅ **Supreme Agent has been successfully integrated**
✅ **OpenWebUI has been successfully added and integrated**
✅ **Project is deployed and accessible via GitHub Pages**
✅ **Temporary domain active:** https://wasalstor-web.github.io/AI-Agent-Platform/
📖 **For complete deployment information, see [DEPLOYMENT.md](DEPLOYMENT.md)**
+## Installation
+
+### Prerequisites
+
+- Ubuntu/Debian-based system (or macOS/Windows with WSL)
+- Python 3.8 or higher
+- 8GB+ RAM recommended
+- 20GB+ free disk space
+- Internet connection for model downloads
+
+### Quick Installation
+
+#### Method 1: One-Command Setup (Recommended)
+
+```bash
+./scripts/quick-start.sh
+```
+
+This script will:
+1. Install Ollama
+2. Download required AI models
+3. Create the supreme-executor custom model
+4. Install Python dependencies
+5. Start the API server
+6. Launch the web interface
+
+#### Method 2: Step-by-Step Installation
+
+```bash
+# 1. Install Supreme Agent
+./scripts/install-supreme-agent.sh
+
+# 2. Start API Server
+python3 api/server.py &
+
+# 3. Start Web Interface
+cd web && python3 -m http.server 8080 &
+
+# 4. (Optional) Integrate with OpenWebUI
+./scripts/integrate-openwebui.sh
+```
+
+#### Method 3: Docker Installation
+
+```bash
+# Build and run
+docker-compose up -d
+
+# Or with OpenWebUI
+docker-compose --profile with-openwebui up -d
+```
+
+### Manual Installation
+
+```bash
+# 1. Install Ollama
+curl -fsSL https://ollama.ai/install.sh | sh
+
+# 2. Start Ollama
+ollama serve &
+
+# 3. Pull models
+ollama pull llama3
+ollama pull aya
+ollama pull mistral
+ollama pull deepseek-coder
+ollama pull qwen2
+
+# 4. Create custom model
+cd models
+ollama create supreme-executor -f Modelfile
+
+# 5. Install Python dependencies
+pip3 install requests flask flask-cors
+
+# 6. Create supreme-agent command
+sudo ln -s $(pwd)/scripts/supreme_agent.py /usr/local/bin/supreme-agent
+sudo chmod +x /usr/local/bin/supreme-agent
+
+# 7. Start the API server
+python3 api/server.py &
+
+# 8. Start the web interface
+cd web && python3 -m http.server 8080 &
+```
+
+### Verify Installation
+
+```bash
+# Check health
+supreme-agent health
+
+# Test chat
+supreme-agent chat "Hello!"
+
+# List models
+supreme-agent models
+
+# Check API
+curl http://localhost:5000/api/health
+
+# Open web interface
+# Navigate to http://localhost:8080
+```
+
+## Documentation
+
+### Core Documentation
+
+- **[README.md](README.md)** - This file (main documentation)
+- **[docs/API.md](docs/API.md)** - Complete API reference with examples
+- **[docs/MODELS.md](docs/MODELS.md)** - AI models guide and comparison
+- **[DEPLOYMENT.md](DEPLOYMENT.md)** - Deployment guide
+- **[OPENWEBUI.md](OPENWEBUI.md)** - OpenWebUI integration guide
+
+### Configuration Files
+
+- **config/settings.json** - Main configuration
+- **.env.example** - Environment variables template
+- **models/Modelfile** - Custom model definition
+
+### Scripts
+
+- **scripts/install-supreme-agent.sh** - Installation script
+- **scripts/quick-start.sh** - One-command setup
+- **scripts/integrate-openwebui.sh** - OpenWebUI integration
+- **setup-openwebui.sh** - OpenWebUI setup
+
## Project Finalization
The platform includes automated scripts for finalizing projects with proper resource cleanup and archival.
diff --git a/docker-compose.yml b/docker-compose.yml
new file mode 100644
index 0000000..6bfd3e2
--- /dev/null
+++ b/docker-compose.yml
@@ -0,0 +1,69 @@
+version: '3.8'
+
+services:
+ # Supreme Agent API Service
+ supreme-agent:
+ build:
+ context: .
+ dockerfile: Dockerfile
+ container_name: supreme-agent-api
+ restart: unless-stopped
+ ports:
+ - "5000:5000" # API Server
+ - "8080:8080" # Web Interface
+ - "11434:11434" # Ollama
+ environment:
+ - OLLAMA_HOST=http://localhost:11434
+ - API_HOST=0.0.0.0
+ - API_PORT=5000
+ - WEB_PORT=8080
+ - SUPREME_MODEL=supreme-executor
+ volumes:
+ - ./models:/app/models
+ - ollama_data:/root/.ollama
+ - agent_logs:/app/logs
+ - agent_data:/app/data
+ networks:
+ - supreme-network
+ healthcheck:
+ test: ["CMD", "curl", "-f", "http://localhost:5000/api/health"]
+ interval: 30s
+ timeout: 10s
+ retries: 3
+ start_period: 40s
+
+ # OpenWebUI (Optional)
+ openwebui:
+ image: ghcr.io/open-webui/open-webui:latest
+ container_name: supreme-openwebui
+ restart: unless-stopped
+ ports:
+ - "3000:8080"
+ environment:
+ - OLLAMA_API_BASE_URL=http://supreme-agent:11434
+ - WEBUI_SECRET_KEY=${WEBUI_SECRET_KEY:-change-me-to-a-secure-key}
+ - WEBUI_AUTH=true
+ volumes:
+ - openwebui_data:/app/backend/data
+ networks:
+ - supreme-network
+ depends_on:
+ - supreme-agent
+ profiles:
+ - with-openwebui
+
+# Volumes
+volumes:
+ ollama_data:
+ driver: local
+ openwebui_data:
+ driver: local
+ agent_logs:
+ driver: local
+ agent_data:
+ driver: local
+
+# Networks
+networks:
+ supreme-network:
+ driver: bridge
diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh
new file mode 100755
index 0000000..5e3c53c
--- /dev/null
+++ b/docker-entrypoint.sh
@@ -0,0 +1,67 @@
+#!/bin/bash
+set -e
+
+# Colors
+GREEN='\033[0;32m'
+BLUE='\033[0;34m'
+NC='\033[0m'
+
+echo -e "${BLUE}Starting Supreme Agent...${NC}"
+
+# Start Ollama in background
+echo -e "${GREEN}Starting Ollama service...${NC}"
+nohup ollama serve > /app/logs/ollama.log 2>&1 &
+sleep 5
+
+# Wait for Ollama to be ready
+echo -e "${GREEN}Waiting for Ollama...${NC}"
+timeout=30
+counter=0
+until curl -s http://localhost:11434/api/tags > /dev/null; do
+ sleep 1
+ counter=$((counter + 1))
+ if [ $counter -ge $timeout ]; then
+ echo "Ollama failed to start"
+ exit 1
+ fi
+done
+echo -e "${GREEN}Ollama is ready!${NC}"
+
+# Pull models if needed
+if [ "$PULL_MODELS" = "true" ]; then
+ echo -e "${GREEN}Pulling models...${NC}"
+ ollama pull llama3 || true
+ ollama pull aya || true
+ ollama pull mistral || true
+fi
+
+# Create custom model if Modelfile exists
+if [ -f "/app/models/Modelfile" ]; then
+ echo -e "${GREEN}Creating supreme-executor model...${NC}"
+ ollama create supreme-executor -f /app/models/Modelfile || true
+fi
+
+# Start based on command
+case "$1" in
+ api)
+ echo -e "${GREEN}Starting API Server...${NC}"
+ cd /app
+ python3 api/server.py
+ ;;
+ web)
+ echo -e "${GREEN}Starting Web Server...${NC}"
+ cd /app/web
+ python3 -m http.server $WEB_PORT
+ ;;
+ all)
+ echo -e "${GREEN}Starting all services...${NC}"
+ cd /app
+ python3 api/server.py &
+ cd /app/web
+ python3 -m http.server $WEB_PORT
+ ;;
+ *)
+ echo "Usage: $0 {api|web|all}"
+ exit 1
+ ;;
+esac
diff --git a/docs/API.md b/docs/API.md
new file mode 100644
index 0000000..e45b412
--- /dev/null
+++ b/docs/API.md
@@ -0,0 +1,577 @@
+# Supreme Agent API Documentation
+# توثيق واجهة برمجة التطبيقات للوكيل الأعلى
+
+## Overview / نظرة عامة
+
+Supreme Agent API is a RESTful API that provides access to advanced AI capabilities including chat, command execution, file analysis, and code generation.
+
+واجهة برمجة التطبيقات للوكيل الأعلى هي API RESTful توفر الوصول إلى قدرات الذكاء الاصطناعي المتقدمة بما في ذلك المحادثة وتنفيذ الأوامر وتحليل الملفات وتوليد الأكواد.
+
+## Base URL / عنوان الأساس
+
+```
+http://localhost:5000
+```
+
+## Authentication / المصادقة
+
+Currently, the API does not require authentication. You can enable API key authentication in `config/settings.json`.
+
+حالياً، لا تتطلب واجهة برمجة التطبيقات مصادقة. يمكنك تفعيل مفتاح API في `config/settings.json`.
+
+## Endpoints / نقاط النهاية
+
+### 1. Health Check / فحص الصحة
+
+Check the health status of the API server and Ollama connection.
+
+**Endpoint:**
+```
+GET /api/health
+```
+
+**Response / الاستجابة:**
+```json
+{
+ "status": "healthy",
+ "ollama_connected": true,
+ "model_available": true,
+ "current_model": "supreme-executor",
+ "available_models": ["supreme-executor", "llama3", "aya", "mistral"],
+ "conversation_history_size": 10,
+ "timestamp": "2025-10-20T07:10:50.282Z"
+}
+```
+
+**cURL Example:**
+```bash
+curl http://localhost:5000/api/health
+```
+
+**Python Example:**
+```python
+import requests
+
+response = requests.get('http://localhost:5000/api/health')
+print(response.json())
+```
+
+**JavaScript Example:**
+```javascript
+fetch('http://localhost:5000/api/health')
+ .then(response => response.json())
+ .then(data => console.log(data));
+```
+
+---
+
+### 2. Chat / المحادثة
+
+Send a message to the agent and receive an intelligent response.
+
+**Endpoint:**
+```
+POST /api/chat
+```
+
+**Request Body / طلب:**
+```json
+{
+ "message": "مرحباً، كيف يمكنني مساعدتك؟",
+ "context": "نتحدث عن البرمجة" // optional
+}
+```
+
+**Response / الاستجابة:**
+```json
+{
+ "success": true,
+ "response": "مرحباً! أنا هنا لمساعدتك في أي شيء تحتاجه. كيف يمكنني مساعدتك اليوم؟",
+ "timestamp": "2025-10-20T07:10:50.282Z"
+}
+```
+
+**cURL Example:**
+```bash
+curl -X POST http://localhost:5000/api/chat \
+ -H "Content-Type: application/json" \
+ -d '{
+ "message": "Hello! How are you?",
+ "context": "We are discussing programming"
+ }'
+```
+
+**Python Example:**
+```python
+import requests
+
+data = {
+ "message": "اكتب لي برنامج بايثون بسيط",
+ "context": "نحتاج برنامج حاسبة"
+}
+
+response = requests.post('http://localhost:5000/api/chat', json=data)
+print(response.json()['response'])
+```
+
+**JavaScript Example:**
+```javascript
+const data = {
+ message: "What is Python?",
+ context: "Learning programming"
+};
+
+fetch('http://localhost:5000/api/chat', {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify(data)
+})
+ .then(response => response.json())
+ .then(data => console.log(data.response));
+```
+
+---
+
+### 3. Execute Command / تنفيذ أمر
+
+Execute a command or task and get the result.
+
+**Endpoint:**
+```
+POST /api/execute
+```
+
+**Request Body / طلب:**
+```json
+{
+ "command": "اكتب سكريبت bash لنسخ الملفات"
+}
+```
+
+**Response / الاستجابة:**
+```json
+{
+ "success": true,
+ "response": "#!/bin/bash\n# Script to copy files\n\nsource_dir=\"$1\"\ndest_dir=\"$2\"\n\nif [ -z \"$source_dir\" ] || [ -z \"$dest_dir\" ]; then\n echo \"Usage: $0 \"\n exit 1\nfi\n\ncp -r \"$source_dir\"/* \"$dest_dir\"/\necho \"Files copied successfully!\"",
+ "timestamp": "2025-10-20T07:10:50.282Z"
+}
+```
+
+**cURL Example:**
+```bash
+curl -X POST http://localhost:5000/api/execute \
+ -H "Content-Type: application/json" \
+ -d '{
+ "command": "Create a Python script to read CSV files"
+ }'
+```
+
+**Python Example:**
+```python
+import requests
+
+data = {
+ "command": "اكتب دالة لحساب المتوسط الحسابي"
+}
+
+response = requests.post('http://localhost:5000/api/execute', json=data)
+print(response.json()['response'])
+```
+
+---
+
+### 4. Analyze File / تحليل ملف
+
+Analyze a file and get comprehensive insights.
+
+**Endpoint:**
+```
+POST /api/analyze
+```
+
+**Request Body / طلب:**
+```json
+{
+ "filepath": "/path/to/file.py"
+}
+```
+
+**Response / الاستجابة:**
+```json
+{
+ "success": true,
+ "filename": "file.py",
+ "filepath": "/path/to/file.py",
+ "size": 1024,
+ "extension": ".py",
+ "analysis": "هذا ملف Python يحتوي على...\nThis Python file contains...",
+ "timestamp": "2025-10-20T07:10:50.282Z"
+}
+```
+
+**cURL Example:**
+```bash
+curl -X POST http://localhost:5000/api/analyze \
+ -H "Content-Type: application/json" \
+ -d '{
+ "filepath": "/home/user/script.py"
+ }'
+```
+
+**Python Example:**
+```python
+import requests
+
+data = {
+ "filepath": "/path/to/myfile.js"
+}
+
+response = requests.post('http://localhost:5000/api/analyze', json=data)
+result = response.json()
+
+if result['success']:
+ print(f"File: {result['filename']}")
+ print(f"Size: {result['size']} bytes")
+ print(f"Analysis:\n{result['analysis']}")
+```
+
+---
+
+### 5. Generate Code / توليد كود
+
+Generate code based on a description in any programming language.
+
+**Endpoint:**
+```
+POST /api/generate-code
+```
+
+**Request Body / طلب:**
+```json
+{
+ "description": "برنامج حاسبة بسيط",
+ "language": "python"
+}
+```
+
+**Response / الاستجابة:**
+```json
+{
+ "success": true,
+ "code": "# Simple Calculator Program\n\ndef add(x, y):\n return x + y\n\ndef subtract(x, y):\n return x - y\n\n# ... more code",
+ "language": "python",
+ "timestamp": "2025-10-20T07:10:50.282Z"
+}
+```
+
+**Supported Languages / اللغات المدعومة:**
+- `python`
+- `javascript`
+- `java`
+- `cpp` (C++)
+- `csharp` (C#)
+- `go`
+- `rust`
+- `php`
+- `ruby`
+- `swift`
+
+**cURL Example:**
+```bash
+curl -X POST http://localhost:5000/api/generate-code \
+ -H "Content-Type: application/json" \
+ -d '{
+ "description": "A function to calculate factorial",
+ "language": "python"
+ }'
+```
+
+**Python Example:**
+```python
+import requests
+
+data = {
+ "description": "دالة لترتيب قائمة من الأرقام",
+ "language": "python"
+}
+
+response = requests.post('http://localhost:5000/api/generate-code', json=data)
+print(response.json()['code'])
+```
+
+**JavaScript Example:**
+```javascript
+const data = {
+ description: "A REST API endpoint for user authentication",
+ language: "javascript"
+};
+
+fetch('http://localhost:5000/api/generate-code', {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify(data)
+})
+ .then(response => response.json())
+ .then(data => console.log(data.code));
+```
+
+---
+
+### 6. List Models / قائمة النماذج
+
+Get a list of all available AI models.
+
+**Endpoint:**
+```
+GET /api/models
+```
+
+**Response / الاستجابة:**
+```json
+{
+ "success": true,
+ "models": [
+ "supreme-executor",
+ "llama3",
+ "aya",
+ "mistral",
+ "deepseek-coder",
+ "qwen2"
+ ],
+ "current_model": "supreme-executor",
+ "timestamp": "2025-10-20T07:10:50.282Z"
+}
+```
+
+**cURL Example:**
+```bash
+curl http://localhost:5000/api/models
+```
+
+**Python Example:**
+```python
+import requests
+
+response = requests.get('http://localhost:5000/api/models')
+models = response.json()['models']
+
+print("Available models:")
+for model in models:
+ print(f" - {model}")
+```
+
+---
+
+## Error Handling / معالجة الأخطاء
+
+All endpoints return a consistent error format:
+
+```json
+{
+ "success": false,
+ "error": "Error message description"
+}
+```
+
+**HTTP Status Codes:**
+- `200` - Success / نجاح
+- `400` - Bad Request / طلب خاطئ
+- `404` - Not Found / غير موجود
+- `500` - Internal Server Error / خطأ في الخادم
+
+## Rate Limiting / تحديد المعدل
+
+Currently, there is no rate limiting. You can enable it in `config/settings.json`.
+
+حالياً، لا يوجد تحديد للمعدل. يمكنك تفعيله في `config/settings.json`.
+
+## CORS / مشاركة الموارد عبر المصادر
+
+CORS is enabled by default for all origins. You can configure allowed origins in `config/settings.json`.
+
+CORS مفعّل افتراضياً لجميع المصادر. يمكنك تكوين المصادر المسموحة في `config/settings.json`.
+
+## WebSocket Support / دعم WebSocket
+
+WebSocket support for streaming responses is planned for a future release.
+
+دعم WebSocket للاستجابات المتدفقة مخطط له في إصدار مستقبلي.
+
+## Best Practices / أفضل الممارسات
+
+1. **Error Handling / معالجة الأخطاء**
+ - Always check the `success` field in responses
+ - Handle network errors gracefully
+
+2. **Context Management / إدارة السياق**
+ - Use the `context` parameter in chat for better responses
+ - The agent maintains conversation history automatically
+
+3. **File Paths / مسارات الملفات**
+ - Use absolute paths for file analysis
+ - Ensure files are readable by the API server
+
+4. **Code Generation / توليد الأكواد**
+ - Be specific in your descriptions
+ - Mention any specific requirements or constraints
+
+## Examples / أمثلة
+
+### Complete Python Script / سكريبت بايثون كامل
+
+```python
+#!/usr/bin/env python3
+"""
+Supreme Agent API Client Example
+"""
+
+import requests
+import json
+
+class SupremeAgentClient:
+ def __init__(self, base_url='http://localhost:5000'):
+ self.base_url = base_url
+
+ def health_check(self):
+ """Check API health"""
+ response = requests.get(f'{self.base_url}/api/health')
+ return response.json()
+
+ def chat(self, message, context=None):
+ """Send a chat message"""
+ data = {'message': message}
+ if context:
+ data['context'] = context
+
+ response = requests.post(f'{self.base_url}/api/chat', json=data)
+ return response.json()
+
+ def execute(self, command):
+ """Execute a command"""
+ data = {'command': command}
+ response = requests.post(f'{self.base_url}/api/execute', json=data)
+ return response.json()
+
+ def analyze_file(self, filepath):
+ """Analyze a file"""
+ data = {'filepath': filepath}
+ response = requests.post(f'{self.base_url}/api/analyze', json=data)
+ return response.json()
+
+ def generate_code(self, description, language='python'):
+ """Generate code"""
+ data = {'description': description, 'language': language}
+ response = requests.post(f'{self.base_url}/api/generate-code', json=data)
+ return response.json()
+
+ def get_models(self):
+ """Get available models"""
+ response = requests.get(f'{self.base_url}/api/models')
+ return response.json()
+
+# Usage Example
+if __name__ == '__main__':
+ client = SupremeAgentClient()
+
+ # Health check
+ health = client.health_check()
+ print(f"Status: {health['status']}")
+
+ # Chat
+ chat_response = client.chat("مرحباً! كيف حالك؟")
+ print(f"Agent: {chat_response['response']}")
+
+ # Generate code
+ code = client.generate_code("برنامج حاسبة بسيط", "python")
+ print(f"Generated code:\n{code['code']}")
+```
+
+### Complete JavaScript Example / مثال JavaScript كامل
+
+```javascript
+/**
+ * Supreme Agent API Client
+ */
+class SupremeAgentClient {
+ constructor(baseUrl = 'http://localhost:5000') {
+ this.baseUrl = baseUrl;
+ }
+
+ async healthCheck() {
+ const response = await fetch(`${this.baseUrl}/api/health`);
+ return await response.json();
+ }
+
+ async chat(message, context = null) {
+ const data = { message };
+ if (context) data.context = context;
+
+ const response = await fetch(`${this.baseUrl}/api/chat`, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify(data)
+ });
+ return await response.json();
+ }
+
+ async execute(command) {
+ const response = await fetch(`${this.baseUrl}/api/execute`, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({ command })
+ });
+ return await response.json();
+ }
+
+ async analyzeFile(filepath) {
+ const response = await fetch(`${this.baseUrl}/api/analyze`, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({ filepath })
+ });
+ return await response.json();
+ }
+
+ async generateCode(description, language = 'python') {
+ const response = await fetch(`${this.baseUrl}/api/generate-code`, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({ description, language })
+ });
+ return await response.json();
+ }
+
+ async getModels() {
+ const response = await fetch(`${this.baseUrl}/api/models`);
+ return await response.json();
+ }
+}
+
+// Usage
+const client = new SupremeAgentClient();
+
+// Health check
+client.healthCheck().then(health => {
+ console.log('Status:', health.status);
+});
+
+// Chat
+client.chat('Hello!').then(response => {
+ console.log('Agent:', response.response);
+});
+```
+
+## Support / الدعم
+
+For issues or questions:
+- GitHub Issues: https://github.com/wasalstor-web/AI-Agent-Platform/issues
+- Documentation: See README.md and docs/
+
+للأسئلة أو المشاكل:
+- GitHub Issues: https://github.com/wasalstor-web/AI-Agent-Platform/issues
+- التوثيق: انظر README.md و docs/
+
+---
+
+**Supreme Agent API Documentation** v1.0.0
+© 2025 wasalstor-web
diff --git a/docs/MODELS.md b/docs/MODELS.md
new file mode 100644
index 0000000..3a26114
--- /dev/null
+++ b/docs/MODELS.md
@@ -0,0 +1,433 @@
+# Supreme Agent Models Documentation
+# توثيق نماذج الوكيل الأعلى
+
+## Overview / نظرة عامة
+
+Supreme Agent supports multiple AI models, each optimized for different tasks. This guide helps you understand and choose the right model for your needs.
+
+الوكيل الأعلى يدعم نماذج ذكاء اصطناعي متعددة، كل منها محسّن لمهام مختلفة. هذا الدليل يساعدك على فهم واختيار النموذج المناسب لاحتياجاتك.
+
+## Available Models / النماذج المتاحة
+
+### 1. Supreme Executor (supreme-executor) ⭐
+
+**النموذج الرئيسي / Primary Model**
+
+This is the custom-built model specifically designed for Supreme Agent. It combines the best features of multiple models with enhanced bilingual (Arabic/English) support.
+
+هذا هو النموذج المخصص المصمم خصيصاً للوكيل الأعلى. يجمع أفضل ميزات النماذج المتعددة مع دعم ثنائي اللغة محسّن (عربي/إنجليزي).
+
+**Capabilities / القدرات:**
+- ✅ Bilingual chat (Arabic/English) / محادثة ثنائية اللغة
+- ✅ Command execution / تنفيذ الأوامر
+- ✅ File analysis / تحليل الملفات
+- ✅ Code generation / توليد الأكواد
+- ✅ Context understanding / فهم السياق
+- ✅ Creative problem-solving / حل المشكلات الإبداعي
+
+**Specifications / المواصفات:**
+- Base Model: llama3
+- Parameters: ~7B
+- Context Length: 4096 tokens
+- Temperature: 0.7 (default)
+- Languages: Arabic, English (primary)
+
+**Best For / الأفضل لـ:**
+- General-purpose tasks / مهام عامة
+- Bilingual conversations / محادثات ثنائية اللغة
+- Complex problem-solving / حل المشكلات المعقدة
+- Educational purposes / أغراض تعليمية
+
+**Example Usage / مثال الاستخدام:**
+```bash
+supreme-agent chat "اشرح لي مفهوم الذكاء الاصطناعي"
+supreme-agent execute "Create a Python web scraper"
+```
+
+---
+
+### 2. Llama 3 (llama3)
+
+**General Purpose Foundation Model / نموذج أساسي متعدد الأغراض**
+
+Meta's Llama 3 is a powerful open-source model with excellent general knowledge and reasoning capabilities.
+
+Llama 3 من Meta هو نموذج مفتوح المصدر قوي مع معرفة عامة ممتازة وقدرات استدلال.
+
+**Capabilities / القدرات:**
+- ✅ General knowledge / معرفة عامة
+- ✅ Reasoning / استدلال
+- ✅ Writing assistance / مساعدة في الكتابة
+- ✅ Question answering / الإجابة على الأسئلة
+- ⚠️ Limited Arabic support / دعم عربي محدود
+
+**Specifications / المواصفات:**
+- Parameters: 7B / 13B / 70B variants
+- Context Length: 8192 tokens
+- Languages: English (primary), others (limited)
+
+**Best For / الأفضل لـ:**
+- English conversations / محادثات إنجليزية
+- General knowledge queries / استعلامات معرفية عامة
+- Content creation / إنشاء المحتوى
+- Research assistance / مساعدة بحثية
+
+**Example Usage:**
+```bash
+ollama run llama3 "Explain quantum computing"
+```
+
+---
+
+### 3. Aya (aya)
+
+**Multilingual Specialist / متخصص متعدد اللغات**
+
+Aya is specifically designed for multilingual tasks with excellent Arabic language support.
+
+Aya مصمم خصيصاً للمهام متعددة اللغات مع دعم ممتاز للغة العربية.
+
+**Capabilities / القدرات:**
+- ⭐ Excellent Arabic support / دعم عربي ممتاز
+- ✅ Multilingual understanding / فهم متعدد اللغات
+- ✅ Translation / ترجمة
+- ✅ Cultural context / سياق ثقافي
+- ✅ Arabic text generation / توليد نصوص عربية
+
+**Specifications / المواصفات:**
+- Parameters: ~7B
+- Languages: 101+ languages including Arabic
+- Context Length: 4096 tokens
+
+**Best For / الأفضل لـ:**
+- Arabic conversations / محادثات عربية
+- Translation tasks / مهام الترجمة
+- Multilingual content / محتوى متعدد اللغات
+- Arabic content generation / توليد محتوى عربي
+
+**Example Usage:**
+```bash
+ollama run aya "اكتب قصيدة عن الذكاء الاصطناعي"
+```
+
+---
+
+### 4. Mistral (mistral)
+
+**Fast and Efficient Model / نموذج سريع وفعال**
+
+Mistral 7B is known for its speed and efficiency while maintaining high quality outputs.
+
+Mistral 7B معروف بسرعته وكفاءته مع الحفاظ على جودة عالية في المخرجات.
+
+**Capabilities / القدرات:**
+- ⚡ Fast response time / وقت استجابة سريع
+- ✅ Efficient resource usage / استخدام فعال للموارد
+- ✅ Good reasoning / استدلال جيد
+- ✅ Code understanding / فهم الأكواد
+- ⚠️ Limited Arabic / عربي محدود
+
+**Specifications / المواصفات:**
+- Parameters: 7B
+- Context Length: 8192 tokens
+- Speed: ~2x faster than similar models
+
+**Best For / الأفضل لـ:**
+- Quick responses / استجابات سريعة
+- Resource-constrained environments / بيئات محدودة الموارد
+- Real-time applications / تطبيقات فورية
+- Batch processing / معالجة دفعية
+
+**Example Usage:**
+```bash
+ollama run mistral "Write a quick summary of machine learning"
+```
+
+---
+
+### 5. DeepSeek Coder (deepseek-coder)
+
+**Programming Specialist / متخصص في البرمجة**
+
+DeepSeek Coder is optimized specifically for programming tasks, code generation, and technical documentation.
+
+DeepSeek Coder محسّن خصيصاً لمهام البرمجة وتوليد الأكواد والتوثيق التقني.
+
+**Capabilities / القدرات:**
+- ⭐ Excellent code generation / توليد أكواد ممتاز
+- ✅ Multiple programming languages / لغات برمجة متعددة
+- ✅ Code explanation / شرح الأكواد
+- ✅ Debugging assistance / مساعدة في تصحيح الأخطاء
+- ✅ Algorithm optimization / تحسين الخوارزميات
+
+**Specifications / المواصفات:**
+- Parameters: 6.7B
+- Context Length: 16384 tokens
+- Supports: Python, Java, C++, JavaScript, and 80+ languages
+
+**Best For / الأفضل لـ:**
+- Code generation / توليد الأكواد
+- Programming assistance / مساعدة برمجية
+- Code review / مراجعة الأكواد
+- Technical documentation / توثيق تقني
+
+**Example Usage:**
+```bash
+supreme-agent generate-code "Binary search algorithm" --lang python
+ollama run deepseek-coder "Optimize this Python function"
+```
+
+---
+
+### 6. Qwen2 (qwen2)
+
+**Advanced Chinese-English Model / نموذج صيني-إنجليزي متقدم**
+
+Qwen2 is an advanced model with strong performance in both English and Chinese, with some multilingual capabilities.
+
+Qwen2 هو نموذج متقدم بأداء قوي في الإنجليزية والصينية، مع بعض القدرات متعددة اللغات.
+
+**Capabilities / القدرات:**
+- ✅ Strong reasoning / استدلال قوي
+- ✅ Mathematical tasks / مهام رياضية
+- ✅ Code generation / توليد أكواد
+- ✅ Long context understanding / فهم سياق طويل
+- ⚠️ Limited Arabic / عربي محدود
+
+**Specifications / المواصفات:**
+- Parameters: 7B
+- Context Length: 32768 tokens (very long!)
+- Languages: English, Chinese (primary)
+
+**Best For / الأفضل لـ:**
+- Long document analysis / تحليل مستندات طويلة
+- Mathematical reasoning / استدلال رياضي
+- Complex problem-solving / حل مشكلات معقدة
+- Extended conversations / محادثات ممتدة
+
+**Example Usage:**
+```bash
+ollama run qwen2 "Solve this complex math problem"
+```
+
+---
+
+## Model Comparison / مقارنة النماذج
+
+| Model | Arabic Support | Speed | Code Gen | Context | Best Use Case |
+|-------|---------------|-------|----------|---------|---------------|
+| **supreme-executor** | ⭐⭐⭐ | ⭐⭐⭐ | ⭐⭐⭐ | 4K | All-purpose, Bilingual |
+| **aya** | ⭐⭐⭐ | ⭐⭐ | ⭐⭐ | 4K | Arabic conversations |
+| **llama3** | ⭐ | ⭐⭐ | ⭐⭐ | 8K | English general tasks |
+| **mistral** | ⭐ | ⭐⭐⭐ | ⭐⭐ | 8K | Fast responses |
+| **deepseek-coder** | ⭐ | ⭐⭐ | ⭐⭐⭐ | 16K | Programming |
+| **qwen2** | ⭐ | ⭐⭐ | ⭐⭐⭐ | 32K | Long contexts |
+
+⭐⭐⭐ Excellent / ممتاز
+⭐⭐ Good / جيد
+⭐ Limited / محدود
+
+## Choosing the Right Model / اختيار النموذج المناسب
+
+### For Arabic Users / للمستخدمين العرب
+1. **supreme-executor** - أفضل خيار للاستخدام العام
+2. **aya** - للمحادثات العربية البحتة
+3. **deepseek-coder** - للبرمجة (مع ملاحظات بالعربية)
+
+### For English Users
+1. **supreme-executor** - Best for general use
+2. **llama3** - For knowledge-intensive tasks
+3. **mistral** - For quick responses
+4. **deepseek-coder** - For programming
+
+### For Specific Tasks / لمهام محددة
+
+**Programming / البرمجة:**
+1. deepseek-coder
+2. supreme-executor
+3. qwen2
+
+**Arabic Content / محتوى عربي:**
+1. aya
+2. supreme-executor
+
+**Speed-Critical / السرعة مهمة:**
+1. mistral
+2. supreme-executor
+
+**Long Documents / مستندات طويلة:**
+1. qwen2
+2. llama3
+
+## Customizing Models / تخصيص النماذج
+
+You can create your own custom models based on these foundation models.
+
+يمكنك إنشاء نماذج مخصصة خاصة بك بناءً على هذه النماذج الأساسية.
+
+### Creating a Custom Modelfile / إنشاء Modelfile مخصص
+
+```modelfile
+# My Custom Model
+FROM llama3
+
+# System prompt
+SYSTEM """
+You are a specialized assistant for [your domain].
+أنت مساعد متخصص في [مجالك].
+"""
+
+# Parameters
+PARAMETER temperature 0.8
+PARAMETER top_p 0.9
+PARAMETER top_k 40
+```
+
+### Building the Model / بناء النموذج
+
+```bash
+ollama create my-custom-model -f Modelfile
+```
+
+### Using Custom Models / استخدام النماذج المخصصة
+
+```bash
+# في سطر الأوامر / Command line
+ollama run my-custom-model "Your prompt"
+
+# في Supreme Agent / In Supreme Agent
+# Update config/settings.json:
+{
+ "agent": {
+ "default_model": "my-custom-model"
+ }
+}
+```
+
+## Model Management / إدارة النماذج
+
+### List Models / قائمة النماذج
+```bash
+ollama list
+```
+
+### Pull a Model / تحميل نموذج
+```bash
+ollama pull llama3
+ollama pull aya
+```
+
+### Remove a Model / حذف نموذج
+```bash
+ollama rm model-name
+```
+
+### Update a Model / تحديث نموذج
+```bash
+ollama pull model-name
+```
+
+## Performance Optimization / تحسين الأداء
+
+### Memory Requirements / متطلبات الذاكرة
+
+| Model | Minimum RAM | Recommended RAM |
+|-------|-------------|-----------------|
+| 7B models | 8 GB | 16 GB |
+| 13B models | 16 GB | 32 GB |
+| 70B models | 64 GB | 128 GB |
+
+### CPU vs GPU / المعالج مقابل بطاقة الرسوميات
+
+- **CPU Only**: Slower but accessible / أبطأ لكن متاح
+- **GPU (NVIDIA)**: 10-100x faster / أسرع 10-100 مرة
+- **Apple Silicon**: Good performance with Metal / أداء جيد مع Metal
+
+### Tips for Better Performance / نصائح لأداء أفضل
+
+1. **Use smaller models** for faster responses
+ استخدم نماذج أصغر للاستجابات الأسرع
+
+2. **Adjust temperature** for different use cases
+ اضبط درجة الحرارة لحالات استخدام مختلفة
+
+3. **Clear cache** periodically
+ امسح الذاكرة المؤقتة بشكل دوري
+
+4. **Monitor resource usage**
+ راقب استخدام الموارد
+
+## Troubleshooting / حل المشاكل
+
+### Model Not Loading / النموذج لا يتحمل
+```bash
+# Check if model exists
+ollama list
+
+# Re-pull the model
+ollama pull model-name
+
+# Check system resources
+free -h # Linux
+```
+
+### Slow Response Times / بطء في الاستجابة
+- Use a smaller model
+- Close other applications
+- Consider using GPU
+- Reduce context length
+
+### Out of Memory Errors / أخطاء نفاد الذاكرة
+- Use a smaller model
+- Reduce max_tokens
+- Close other applications
+- Add swap space (Linux)
+
+## Best Practices / أفضل الممارسات
+
+1. **Start with supreme-executor** for general use
+ ابدأ بـ supreme-executor للاستخدام العام
+
+2. **Use specialized models** for specific tasks
+ استخدم نماذج متخصصة لمهام محددة
+
+3. **Monitor performance** and switch models as needed
+ راقب الأداء وبدّل النماذج حسب الحاجة
+
+4. **Keep models updated** for improvements
+ حدّث النماذج للحصول على تحسينات
+
+5. **Experiment** to find what works best
+ جرّب لتجد ما يعمل أفضل
+
+## Resources / المصادر
+
+- **Ollama Library**: https://ollama.ai/library
+- **Model Cards**: Detailed information about each model
+- **Community Forum**: Share experiences and tips
+- **GitHub**: Model implementations and examples
+
+## Frequently Asked Questions / الأسئلة الشائعة
+
+**Q: Can I use multiple models simultaneously?**
+**س: هل يمكنني استخدام نماذج متعددة في نفس الوقت؟**
+
+A: Yes, but they will share system resources. Be mindful of RAM usage.
+ج: نعم، لكنها ستشارك موارد النظام. انتبه لاستخدام الذاكرة.
+
+**Q: Which model is best for Arabic?**
+**س: أي نموذج أفضل للعربية؟**
+
+A: supreme-executor and aya are best for Arabic.
+ج: supreme-executor و aya هما الأفضل للعربية.
+
+**Q: Can I fine-tune these models?**
+**س: هل يمكنني ضبط هذه النماذج بدقة؟**
+
+A: Yes, but it requires advanced knowledge and significant compute resources.
+ج: نعم، لكن يتطلب معرفة متقدمة وموارد حوسبة كبيرة.
+
+---
+
+**Supreme Agent Models Documentation** v1.0.0
+© 2025 wasalstor-web
From 1c2a6d6f12ed26835ad3ab132b848acace09db1d Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Mon, 20 Oct 2025 07:28:27 +0000
Subject: [PATCH 5/7] Add utility scripts (update, backup) and test files for
Supreme Agent
Co-authored-by: wasalstor-web <230709381+wasalstor-web@users.noreply.github.com>
---
.gitignore | 7 ++
scripts/backup.sh | 234 ++++++++++++++++++++++++++++++++++++++++++++
scripts/update.sh | 193 ++++++++++++++++++++++++++++++++++++
tests/test_agent.py | 121 +++++++++++++++++++++++
tests/test_api.py | 160 ++++++++++++++++++++++++++++++
5 files changed, 715 insertions(+)
create mode 100755 scripts/backup.sh
create mode 100755 scripts/update.sh
create mode 100755 tests/test_agent.py
create mode 100755 tests/test_api.py
diff --git a/.gitignore b/.gitignore
index a6b37da..4d822f4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -65,6 +65,13 @@ package-lock.json
logs/
*.log
+# Supreme Agent specific
+supreme_agent.log
+conversation_history.json
+/tmp/supreme-*.log
+/tmp/supreme-*.pid
+supreme-agent-backup-*.tar.gz
+
# OS
Thumbs.db
Desktop.ini
diff --git a/scripts/backup.sh b/scripts/backup.sh
new file mode 100755
index 0000000..0129988
--- /dev/null
+++ b/scripts/backup.sh
@@ -0,0 +1,234 @@
+#!/bin/bash
+
+################################################################################
+# Backup Script - سكريبت النسخ الاحتياطي
+# Supreme Agent - Backup Utility
+#
+# المؤلف / Author: wasalstor-web
+# التاريخ / Date: 2025-10-20
+################################################################################
+
+set -e
+
+# الألوان / Colors
+GREEN='\033[0;32m'
+BLUE='\033[0;34m'
+YELLOW='\033[1;33m'
+NC='\033[0m'
+
+echo -e "${BLUE}"
+cat << "EOF"
+╔═══════════════════════════════════════════════════╗
+║ Supreme Agent - Backup / النسخ الاحتياطي ║
+╚═══════════════════════════════════════════════════╝
+EOF
+echo -e "${NC}\n"
+
+# دالة للطباعة / Print function
+print_success() {
+ echo -e "${GREEN}✓ $1${NC}"
+}
+
+print_info() {
+ echo -e "${YELLOW}ℹ $1${NC}"
+}
+
+print_step() {
+ echo -e "${BLUE}➜ $1${NC}"
+}
+
+# الانتقال إلى مجلد المشروع / Navigate to project directory
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
+cd "$PROJECT_DIR"
+
+# إنشاء اسم النسخة الاحتياطية / Create backup name
+TIMESTAMP=$(date +%Y%m%d_%H%M%S)
+BACKUP_DIR="/tmp/supreme-agent-backup-$TIMESTAMP"
+BACKUP_ARCHIVE="supreme-agent-backup-$TIMESTAMP.tar.gz"
+
+print_info "مجلد المشروع / Project directory: $PROJECT_DIR"
+print_info "مجلد النسخة الاحتياطية / Backup directory: $BACKUP_DIR"
+echo
+
+# إنشاء مجلد النسخة الاحتياطية / Create backup directory
+print_step "إنشاء مجلد النسخة الاحتياطية / Creating backup directory..."
+mkdir -p "$BACKUP_DIR"
+print_success "تم إنشاء المجلد / Directory created"
+echo
+
+# 1. نسخ الإعدادات / Backup configurations
+print_step "[1/6] نسخ الإعدادات / Backing up configurations..."
+mkdir -p "$BACKUP_DIR/config"
+
+# نسخ ملفات الإعدادات / Copy configuration files
+if [ -f "config/settings.json" ]; then
+ cp config/settings.json "$BACKUP_DIR/config/"
+ print_success "settings.json"
+fi
+
+if [ -f ".env" ]; then
+ cp .env "$BACKUP_DIR/config/"
+ print_success ".env"
+fi
+
+if [ -f ".env.example" ]; then
+ cp .env.example "$BACKUP_DIR/config/"
+ print_success ".env.example"
+fi
+
+echo
+
+# 2. نسخ النماذج المخصصة / Backup custom models
+print_step "[2/6] نسخ النماذج المخصصة / Backing up custom models..."
+mkdir -p "$BACKUP_DIR/models"
+
+if [ -d "models" ]; then
+ cp -r models/* "$BACKUP_DIR/models/" 2>/dev/null || true
+ print_success "تم نسخ النماذج / Models backed up"
+else
+ print_info "لا توجد نماذج مخصصة / No custom models"
+fi
+
+echo
+
+# 3. نسخ السجلات / Backup logs
+print_step "[3/6] نسخ السجلات / Backing up logs..."
+mkdir -p "$BACKUP_DIR/logs"
+
+# نسخ سجلات Supreme Agent / Copy Supreme Agent logs
+if [ -f "supreme_agent.log" ]; then
+ cp supreme_agent.log "$BACKUP_DIR/logs/"
+ print_success "supreme_agent.log"
+fi
+
+# نسخ سجلات API / Copy API logs
+if [ -f "/tmp/supreme-api.log" ]; then
+ cp /tmp/supreme-api.log "$BACKUP_DIR/logs/"
+ print_success "API logs"
+fi
+
+# نسخ سجلات Web / Copy Web logs
+if [ -f "/tmp/supreme-web.log" ]; then
+ cp /tmp/supreme-web.log "$BACKUP_DIR/logs/"
+ print_success "Web logs"
+fi
+
+echo
+
+# 4. نسخ البيانات / Backup data
+print_step "[4/6] نسخ البيانات / Backing up data..."
+mkdir -p "$BACKUP_DIR/data"
+
+# نسخ سجل المحادثات / Copy conversation history
+if [ -f "conversation_history.json" ]; then
+ cp conversation_history.json "$BACKUP_DIR/data/"
+ print_success "conversation_history.json"
+fi
+
+echo
+
+# 5. نسخ معلومات النماذج من Ollama / Backup Ollama model info
+print_step "[5/6] نسخ معلومات النماذج / Backing up model information..."
+
+if command -v ollama &> /dev/null; then
+ # حفظ قائمة النماذج / Save model list
+ ollama list > "$BACKUP_DIR/ollama_models.txt" 2>/dev/null || true
+ print_success "قائمة النماذج / Model list saved"
+
+ # حفظ معلومات supreme-executor / Save supreme-executor info
+ ollama show supreme-executor > "$BACKUP_DIR/supreme-executor-info.txt" 2>/dev/null || true
+else
+ print_info "Ollama غير متوفر / Ollama not available"
+fi
+
+echo
+
+# 6. إنشاء معلومات النسخة الاحتياطية / Create backup info
+print_step "[6/6] إنشاء معلومات النسخة الاحتياطية / Creating backup information..."
+
+cat > "$BACKUP_DIR/backup_info.txt" << EOF
+Supreme Agent Backup Information
+النسخة الاحتياطية للوكيل الأعلى
+
+التاريخ / Date: $(date)
+الوقت / Time: $(date +%H:%M:%S)
+المجلد / Directory: $PROJECT_DIR
+المستخدم / User: $(whoami)
+النظام / System: $(uname -s)
+الإصدار / Version: 1.0.0
+
+الملفات المحفوظة / Backed up files:
+- config/settings.json
+- .env
+- models/Modelfile
+- logs/
+- conversation_history.json
+- ollama models list
+
+الاستعادة / Restore:
+1. Extract archive: tar -xzf $BACKUP_ARCHIVE
+2. Copy files back to project
+3. Restart services
+EOF
+
+print_success "تم إنشاء معلومات النسخة / Backup info created"
+echo
+
+# ضغط النسخة الاحتياطية / Compress backup
+print_step "ضغط النسخة الاحتياطية / Compressing backup..."
+
+cd /tmp
+tar -czf "$BACKUP_ARCHIVE" "$(basename "$BACKUP_DIR")"
+
+BACKUP_SIZE=$(du -h "$BACKUP_ARCHIVE" | cut -f1)
+print_success "تم الضغط / Compressed: $BACKUP_SIZE"
+echo
+
+# حذف المجلد المؤقت / Remove temporary directory
+rm -rf "$BACKUP_DIR"
+
+echo -e "${GREEN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
+echo -e "${GREEN} النسخ الاحتياطي مكتمل! / Backup Complete! ${NC}"
+echo -e "${GREEN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}\n"
+
+cat << EOF
+${YELLOW}معلومات النسخة الاحتياطية / Backup Information:${NC}
+
+ ${BLUE}الملف / File:${NC}
+ /tmp/$BACKUP_ARCHIVE
+
+ ${BLUE}الحجم / Size:${NC}
+ $BACKUP_SIZE
+
+ ${BLUE}المحتويات / Contents:${NC}
+ - Configuration files / ملفات الإعدادات
+ - Custom models / النماذج المخصصة
+ - Logs / السجلات
+ - Data / البيانات
+ - Model information / معلومات النماذج
+
+${YELLOW}الاستعادة / Restore:${NC}
+
+ ${BLUE}# استخراج الملفات / Extract files${NC}
+ tar -xzf /tmp/$BACKUP_ARCHIVE -C /tmp
+
+ ${BLUE}# نسخ الملفات / Copy files${NC}
+ cp -r /tmp/supreme-agent-backup-$TIMESTAMP/config/* $PROJECT_DIR/config/
+ cp -r /tmp/supreme-agent-backup-$TIMESTAMP/models/* $PROJECT_DIR/models/
+ cp /tmp/supreme-agent-backup-$TIMESTAMP/data/* $PROJECT_DIR/
+
+ ${BLUE}# إعادة تشغيل الخدمات / Restart services${NC}
+ ./scripts/quick-start.sh
+
+${YELLOW}نقل النسخة الاحتياطية / Move Backup:${NC}
+
+ ${BLUE}# إلى مجلد المشروع / To project directory${NC}
+ mv /tmp/$BACKUP_ARCHIVE $PROJECT_DIR/
+
+ ${BLUE}# إلى مكان آمن / To safe location${NC}
+ mv /tmp/$BACKUP_ARCHIVE ~/backups/
+
+${GREEN}تم حفظ النسخة الاحتياطية بنجاح! 💾${NC}
+${GREEN}Backup saved successfully! 💾${NC}
+EOF
diff --git a/scripts/update.sh b/scripts/update.sh
new file mode 100755
index 0000000..feb5016
--- /dev/null
+++ b/scripts/update.sh
@@ -0,0 +1,193 @@
+#!/bin/bash
+
+################################################################################
+# Update Script - سكريبت التحديث
+# Supreme Agent - System Update Utility
+#
+# المؤلف / Author: wasalstor-web
+# التاريخ / Date: 2025-10-20
+################################################################################
+
+set -e
+
+# الألوان / Colors
+GREEN='\033[0;32m'
+BLUE='\033[0;34m'
+YELLOW='\033[1;33m'
+RED='\033[0;31m'
+NC='\033[0m'
+
+echo -e "${BLUE}"
+cat << "EOF"
+╔═══════════════════════════════════════════════════╗
+║ Supreme Agent - System Update / التحديث ║
+╚═══════════════════════════════════════════════════╝
+EOF
+echo -e "${NC}\n"
+
+# دالة للطباعة / Print function
+print_success() {
+ echo -e "${GREEN}✓ $1${NC}"
+}
+
+print_error() {
+ echo -e "${RED}✗ $1${NC}"
+}
+
+print_info() {
+ echo -e "${YELLOW}ℹ $1${NC}"
+}
+
+print_step() {
+ echo -e "${BLUE}➜ $1${NC}"
+}
+
+# الانتقال إلى مجلد المشروع / Navigate to project directory
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
+cd "$PROJECT_DIR"
+
+print_info "مجلد المشروع / Project directory: $PROJECT_DIR"
+echo
+
+# 1. تحديث المشروع من Git / Update project from Git
+print_step "[1/5] تحديث المشروع من Git / Updating project from Git..."
+if [ -d ".git" ]; then
+ git fetch origin
+ CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
+ print_info "الفرع الحالي / Current branch: $CURRENT_BRANCH"
+
+ read -p "هل تريد سحب التحديثات؟ / Pull updates? (y/n): " -n 1 -r
+ echo
+ if [[ $REPLY =~ ^[Yy]$ ]]; then
+ git pull origin "$CURRENT_BRANCH"
+ print_success "تم تحديث المشروع / Project updated"
+ else
+ print_info "تم تخطي التحديث / Update skipped"
+ fi
+else
+ print_info "ليس مشروع git / Not a git repository"
+fi
+echo
+
+# 2. تحديث نماذج Ollama / Update Ollama models
+print_step "[2/5] تحديث نماذج Ollama / Updating Ollama models..."
+if command -v ollama &> /dev/null; then
+ # قائمة النماذج المثبتة / List installed models
+ print_info "النماذج المثبتة / Installed models:"
+ ollama list
+
+ read -p "هل تريد تحديث النماذج؟ / Update models? (y/n): " -n 1 -r
+ echo
+ if [[ $REPLY =~ ^[Yy]$ ]]; then
+ # تحديث كل نموذج / Update each model
+ for model in $(ollama list | tail -n +2 | awk '{print $1}'); do
+ print_info "تحديث / Updating: $model"
+ ollama pull "$model" || print_error "فشل تحديث / Failed to update: $model"
+ done
+ print_success "تم تحديث النماذج / Models updated"
+ else
+ print_info "تم تخطي تحديث النماذج / Model update skipped"
+ fi
+else
+ print_error "Ollama غير مثبت / Ollama not installed"
+fi
+echo
+
+# 3. تحديث مكتبات Python / Update Python dependencies
+print_step "[3/5] تحديث مكتبات Python / Updating Python dependencies..."
+if command -v pip3 &> /dev/null; then
+ print_info "تحديث pip / Updating pip..."
+ pip3 install --upgrade pip
+
+ print_info "تحديث المكتبات / Updating libraries..."
+ pip3 install --upgrade requests flask flask-cors
+
+ print_success "تم تحديث المكتبات / Libraries updated"
+else
+ print_error "pip3 غير مثبت / pip3 not installed"
+fi
+echo
+
+# 4. إعادة إنشاء النموذج المخصص / Recreate custom model
+print_step "[4/5] إعادة إنشاء النموذج المخصص / Recreating custom model..."
+if [ -f "models/Modelfile" ]; then
+ read -p "هل تريد إعادة إنشاء supreme-executor؟ / Recreate supreme-executor? (y/n): " -n 1 -r
+ echo
+ if [[ $REPLY =~ ^[Yy]$ ]]; then
+ # حذف النموذج القديم / Remove old model
+ ollama rm supreme-executor 2>/dev/null || true
+
+ # إنشاء النموذج الجديد / Create new model
+ ollama create supreme-executor -f models/Modelfile
+ print_success "تم إعادة إنشاء النموذج / Model recreated"
+ else
+ print_info "تم تخطي إعادة الإنشاء / Recreation skipped"
+ fi
+else
+ print_error "ملف Modelfile غير موجود / Modelfile not found"
+fi
+echo
+
+# 5. إعادة تشغيل الخدمات / Restart services
+print_step "[5/5] إعادة تشغيل الخدمات / Restarting services..."
+
+# إيقاف الخدمات الحالية / Stop current services
+print_info "إيقاف الخدمات / Stopping services..."
+if [ -f "/tmp/supreme-api.pid" ]; then
+ kill $(cat /tmp/supreme-api.pid 2>/dev/null) 2>/dev/null || true
+ rm -f /tmp/supreme-api.pid
+fi
+
+if [ -f "/tmp/supreme-web.pid" ]; then
+ kill $(cat /tmp/supreme-web.pid 2>/dev/null) 2>/dev/null || true
+ rm -f /tmp/supreme-web.pid
+fi
+
+read -p "هل تريد إعادة تشغيل الخدمات؟ / Restart services? (y/n): " -n 1 -r
+echo
+if [[ $REPLY =~ ^[Yy]$ ]]; then
+ # تشغيل API Server / Start API Server
+ print_info "تشغيل API Server..."
+ nohup python3 api/server.py > /tmp/supreme-api.log 2>&1 &
+ echo $! > /tmp/supreme-api.pid
+
+ # تشغيل Web Interface / Start Web Interface
+ print_info "تشغيل Web Interface..."
+ cd web
+ nohup python3 -m http.server 8080 > /tmp/supreme-web.log 2>&1 &
+ echo $! > /tmp/supreme-web.pid
+ cd ..
+
+ sleep 3
+ print_success "تم إعادة تشغيل الخدمات / Services restarted"
+else
+ print_info "تم تخطي إعادة التشغيل / Restart skipped"
+fi
+
+echo -e "\n${GREEN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
+echo -e "${GREEN} التحديث مكتمل! / Update Complete! ${NC}"
+echo -e "${GREEN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}\n"
+
+cat << EOF
+${YELLOW}معلومات الخدمات / Service Information:${NC}
+
+ ${BLUE}API Server:${NC}
+ http://localhost:5000
+ Log: /tmp/supreme-api.log
+ PID: $(cat /tmp/supreme-api.pid 2>/dev/null || echo "Not running")
+
+ ${BLUE}Web Interface:${NC}
+ http://localhost:8080
+ Log: /tmp/supreme-web.log
+ PID: $(cat /tmp/supreme-web.pid 2>/dev/null || echo "Not running")
+
+${YELLOW}اختبار النظام / Test System:${NC}
+
+ ${BLUE}supreme-agent health${NC}
+ ${BLUE}supreme-agent models${NC}
+ ${BLUE}curl http://localhost:5000/api/health${NC}
+
+${GREEN}جميع التحديثات مكتملة! 🚀${NC}
+${GREEN}All updates complete! 🚀${NC}
+EOF
diff --git a/tests/test_agent.py b/tests/test_agent.py
new file mode 100755
index 0000000..c64ee52
--- /dev/null
+++ b/tests/test_agent.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python3
+"""
+Supreme Agent Tests
+Tests for the Supreme Agent core functionality
+
+المؤلف / Author: wasalstor-web
+التاريخ / Date: 2025-10-20
+"""
+
+import sys
+import os
+from pathlib import Path
+
+# Add parent directory to path
+sys.path.insert(0, str(Path(__file__).parent.parent))
+
+from scripts.supreme_agent import SupremeAgent
+
+def test_agent_initialization():
+ """Test agent initialization / اختبار تهيئة الوكيل"""
+ print("Testing agent initialization...")
+ agent = SupremeAgent()
+ assert agent.model == "supreme-executor"
+ assert agent.ollama_host == "http://localhost:11434"
+ assert agent.temperature == 0.7
+ print("✓ Agent initialization test passed")
+
+def test_health_check():
+ """Test health check / اختبار فحص الصحة"""
+ print("Testing health check...")
+ agent = SupremeAgent()
+ health = agent.health_check()
+ assert "status" in health
+ assert "timestamp" in health
+ print(f"✓ Health check test passed - Status: {health['status']}")
+
+def test_get_models():
+ """Test get models / اختبار الحصول على النماذج"""
+ print("Testing get models...")
+ agent = SupremeAgent()
+ models = agent.get_models()
+ assert isinstance(models, list)
+ print(f"✓ Get models test passed - Found {len(models)} models")
+
+def test_conversation_history():
+ """Test conversation history / اختبار سجل المحادثات"""
+ print("Testing conversation history...")
+ agent = SupremeAgent()
+
+ # Initially empty
+ assert len(agent.conversation_history) == 0
+
+ # Add an entry manually
+ agent.conversation_history.append({
+ "type": "test",
+ "message": "test message",
+ "timestamp": "2025-10-20T00:00:00"
+ })
+
+ assert len(agent.conversation_history) == 1
+ print("✓ Conversation history test passed")
+
+def test_save_load_history():
+ """Test save and load history / اختبار حفظ وتحميل السجل"""
+ print("Testing save and load history...")
+ agent = SupremeAgent()
+
+ # Add test data
+ agent.conversation_history = [
+ {"type": "test", "message": "test1"},
+ {"type": "test", "message": "test2"}
+ ]
+
+ # Save
+ test_file = "/tmp/test_history.json"
+ agent.save_history(test_file)
+
+ # Load in new agent
+ agent2 = SupremeAgent()
+ agent2.load_history(test_file)
+
+ assert len(agent2.conversation_history) == 2
+ print("✓ Save/load history test passed")
+
+ # Cleanup
+ os.remove(test_file)
+
+def run_all_tests():
+ """Run all tests / تشغيل جميع الاختبارات"""
+ print("\n" + "="*60)
+ print("Supreme Agent Tests / اختبارات الوكيل الأعلى")
+ print("="*60 + "\n")
+
+ tests = [
+ test_agent_initialization,
+ test_conversation_history,
+ test_save_load_history,
+ test_get_models,
+ test_health_check,
+ ]
+
+ passed = 0
+ failed = 0
+
+ for test in tests:
+ try:
+ test()
+ passed += 1
+ except Exception as e:
+ print(f"✗ {test.__name__} failed: {e}")
+ failed += 1
+
+ print("\n" + "="*60)
+ print(f"Results / النتائج: {passed} passed, {failed} failed")
+ print("="*60 + "\n")
+
+ return failed == 0
+
+if __name__ == "__main__":
+ success = run_all_tests()
+ sys.exit(0 if success else 1)
diff --git a/tests/test_api.py b/tests/test_api.py
new file mode 100755
index 0000000..4cc99de
--- /dev/null
+++ b/tests/test_api.py
@@ -0,0 +1,160 @@
+#!/usr/bin/env python3
+"""
+Supreme Agent API Tests
+Tests for the API Server
+
+المؤلف / Author: wasalstor-web
+التاريخ / Date: 2025-10-20
+"""
+
+import requests
+import json
+import time
+
+# API Base URL
+API_URL = "http://localhost:5000"
+
+def wait_for_api(timeout=30):
+ """Wait for API to be available / انتظار توفر API"""
+ print("Waiting for API to be available...")
+ start = time.time()
+ while time.time() - start < timeout:
+ try:
+ response = requests.get(f"{API_URL}/api/health", timeout=2)
+ if response.status_code == 200:
+ print("✓ API is available")
+ return True
+ except requests.exceptions.RequestException:
+ pass
+ time.sleep(1)
+ print("✗ API not available after timeout")
+ return False
+
+def test_health_endpoint():
+ """Test health endpoint / اختبار نقطة الصحة"""
+ print("Testing /api/health...")
+ response = requests.get(f"{API_URL}/api/health")
+ assert response.status_code == 200
+ data = response.json()
+ assert "status" in data
+ assert "timestamp" in data
+ print(f"✓ Health endpoint test passed - Status: {data['status']}")
+
+def test_models_endpoint():
+ """Test models endpoint / اختبار نقطة النماذج"""
+ print("Testing /api/models...")
+ response = requests.get(f"{API_URL}/api/models")
+ assert response.status_code == 200
+ data = response.json()
+ assert "models" in data
+ assert "current_model" in data
+ print(f"✓ Models endpoint test passed - {len(data['models'])} models")
+
+def test_chat_endpoint():
+ """Test chat endpoint / اختبار نقطة المحادثة"""
+ print("Testing /api/chat...")
+ payload = {
+ "message": "Hello, this is a test message"
+ }
+ response = requests.post(f"{API_URL}/api/chat", json=payload)
+ assert response.status_code == 200
+ data = response.json()
+ assert "success" in data
+ assert "response" in data
+ print("✓ Chat endpoint test passed")
+
+def test_execute_endpoint():
+ """Test execute endpoint / اختبار نقطة التنفيذ"""
+ print("Testing /api/execute...")
+ payload = {
+ "command": "Return a simple greeting"
+ }
+ response = requests.post(f"{API_URL}/api/execute", json=payload)
+ assert response.status_code == 200
+ data = response.json()
+ assert "success" in data
+ assert "response" in data
+ print("✓ Execute endpoint test passed")
+
+def test_generate_code_endpoint():
+ """Test generate code endpoint / اختبار نقطة توليد الكود"""
+ print("Testing /api/generate-code...")
+ payload = {
+ "description": "A simple hello world function",
+ "language": "python"
+ }
+ response = requests.post(f"{API_URL}/api/generate-code", json=payload)
+ assert response.status_code == 200
+ data = response.json()
+ assert "success" in data
+ assert "code" in data
+ assert "language" in data
+ print("✓ Generate code endpoint test passed")
+
+def test_error_handling():
+ """Test error handling / اختبار معالجة الأخطاء"""
+ print("Testing error handling...")
+
+ # Test missing message in chat
+ response = requests.post(f"{API_URL}/api/chat", json={})
+ assert response.status_code == 400
+
+ # Test missing command in execute
+ response = requests.post(f"{API_URL}/api/execute", json={})
+ assert response.status_code == 400
+
+ print("✓ Error handling test passed")
+
+def test_cors_headers():
+ """Test CORS headers / اختبار رؤوس CORS"""
+ print("Testing CORS headers...")
+ response = requests.get(f"{API_URL}/api/health")
+ assert "Access-Control-Allow-Origin" in response.headers or response.status_code == 200
+ print("✓ CORS headers test passed")
+
+def run_all_tests():
+ """Run all API tests / تشغيل جميع اختبارات API"""
+ print("\n" + "="*60)
+ print("Supreme Agent API Tests / اختبارات API للوكيل الأعلى")
+ print("="*60 + "\n")
+
+ # Wait for API
+ if not wait_for_api():
+ print("\n✗ API is not running. Please start it first:")
+ print(" python3 api/server.py")
+ return False
+
+ tests = [
+ test_health_endpoint,
+ test_models_endpoint,
+ test_error_handling,
+ test_cors_headers,
+ test_chat_endpoint,
+ test_execute_endpoint,
+ test_generate_code_endpoint,
+ ]
+
+ passed = 0
+ failed = 0
+
+ for test in tests:
+ try:
+ test()
+ passed += 1
+ except AssertionError as e:
+ print(f"✗ {test.__name__} failed: {e}")
+ failed += 1
+ except Exception as e:
+ print(f"✗ {test.__name__} error: {e}")
+ failed += 1
+
+ print("\n" + "="*60)
+ print(f"Results / النتائج: {passed} passed, {failed} failed")
+ print("="*60 + "\n")
+
+ return failed == 0
+
+if __name__ == "__main__":
+ import sys
+ success = run_all_tests()
+ sys.exit(0 if success else 1)
From 4a37518b4984304e3eec1caa8fc63b5b8ccf214b Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Mon, 20 Oct 2025 07:30:02 +0000
Subject: [PATCH 6/7] Fix code review issues: improve error handling and
cross-platform compatibility
Co-authored-by: wasalstor-web <230709381+wasalstor-web@users.noreply.github.com>
---
scripts/backup.sh | 4 ++--
scripts/update.sh | 23 +++++++++++++++++------
tests/test_agent.py | 29 +++++++++++++++++------------
3 files changed, 36 insertions(+), 20 deletions(-)
diff --git a/scripts/backup.sh b/scripts/backup.sh
index 0129988..fb0012f 100755
--- a/scripts/backup.sh
+++ b/scripts/backup.sh
@@ -83,8 +83,8 @@ echo
print_step "[2/6] نسخ النماذج المخصصة / Backing up custom models..."
mkdir -p "$BACKUP_DIR/models"
-if [ -d "models" ]; then
- cp -r models/* "$BACKUP_DIR/models/" 2>/dev/null || true
+if [ -d "models" ] && [ "$(ls -A models 2>/dev/null)" ]; then
+ cp -r models/* "$BACKUP_DIR/models/"
print_success "تم نسخ النماذج / Models backed up"
else
print_info "لا توجد نماذج مخصصة / No custom models"
diff --git a/scripts/update.sh b/scripts/update.sh
index feb5016..70eec6f 100755
--- a/scripts/update.sh
+++ b/scripts/update.sh
@@ -80,12 +80,23 @@ if command -v ollama &> /dev/null; then
read -p "هل تريد تحديث النماذج؟ / Update models? (y/n): " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
- # تحديث كل نموذج / Update each model
- for model in $(ollama list | tail -n +2 | awk '{print $1}'); do
- print_info "تحديث / Updating: $model"
- ollama pull "$model" || print_error "فشل تحديث / Failed to update: $model"
- done
- print_success "تم تحديث النماذج / Models updated"
+ # Get list of installed models, skipping header
+ models=$(ollama list 2>/dev/null | tail -n +2 | awk '{print $1}')
+
+ if [ -z "$models" ]; then
+ print_info "لا توجد نماذج مثبتة / No models installed"
+ else
+ # Update each model with error handling
+ while IFS= read -r model; do
+ if [ -n "$model" ]; then
+ print_info "تحديث / Updating: $model"
+ if ! ollama pull "$model" 2>&1; then
+ print_error "فشل تحديث / Failed to update: $model"
+ fi
+ fi
+ done <<< "$models"
+ print_success "تم تحديث النماذج / Models updated"
+ fi
else
print_info "تم تخطي تحديث النماذج / Model update skipped"
fi
diff --git a/tests/test_agent.py b/tests/test_agent.py
index c64ee52..f0ce708 100755
--- a/tests/test_agent.py
+++ b/tests/test_agent.py
@@ -71,19 +71,24 @@ def test_save_load_history():
{"type": "test", "message": "test2"}
]
- # Save
- test_file = "/tmp/test_history.json"
- agent.save_history(test_file)
+ # Save to temporary file
+ import tempfile
+ with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
+ test_file = f.name
- # Load in new agent
- agent2 = SupremeAgent()
- agent2.load_history(test_file)
-
- assert len(agent2.conversation_history) == 2
- print("✓ Save/load history test passed")
-
- # Cleanup
- os.remove(test_file)
+ try:
+ agent.save_history(test_file)
+
+ # Load in new agent
+ agent2 = SupremeAgent()
+ agent2.load_history(test_file)
+
+ assert len(agent2.conversation_history) == 2
+ print("✓ Save/load history test passed")
+ finally:
+ # Cleanup
+ if os.path.exists(test_file):
+ os.remove(test_file)
def run_all_tests():
"""Run all tests / تشغيل جميع الاختبارات"""
From 5c66ee6b80dfa61c85ed553e05edc8632fd35c92 Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Mon, 20 Oct 2025 07:31:34 +0000
Subject: [PATCH 7/7] Add SUPREME_AGENT.md - comprehensive summary and quick
reference guide
Co-authored-by: wasalstor-web <230709381+wasalstor-web@users.noreply.github.com>
---
SUPREME_AGENT.md | 221 ++++++++++++++++++++++++++++++++++++++++++++++
scripts/backup.sh | 15 +++-
2 files changed, 232 insertions(+), 4 deletions(-)
create mode 100644 SUPREME_AGENT.md
diff --git a/SUPREME_AGENT.md b/SUPREME_AGENT.md
new file mode 100644
index 0000000..e5c622f
--- /dev/null
+++ b/SUPREME_AGENT.md
@@ -0,0 +1,221 @@
+# Supreme Agent - الوكيل الأعلى المتكامل
+
+## 🎯 Overview / نظرة عامة
+
+Supreme Agent is a comprehensive, bilingual (Arabic/English) AI agent platform that combines multiple AI capabilities into a single, easy-to-use system.
+
+الوكيل الأعلى هو منصة شاملة ثنائية اللغة (عربي/إنجليزي) للذكاء الاصطناعي تجمع قدرات متعددة في نظام واحد سهل الاستخدام.
+
+## ✨ Features / المميزات
+
+### 🤖 AI Capabilities
+- **Intelligent Chat**: Natural conversations in Arabic and English
+- **Command Execution**: Execute any task or command
+- **File Analysis**: Comprehensive code and file analysis
+- **Code Generation**: Generate code in 20+ programming languages
+- **Multiple Models**: Support for 6 AI models
+
+### 💻 Technical Features
+- **REST API**: 6 endpoints with full CRUD operations
+- **Modern Web UI**: Responsive interface with dark/light themes
+- **Docker Support**: Full containerization
+- **OpenWebUI Integration**: Seamless integration
+- **One-Command Install**: Quick and easy setup
+- **Cross-Platform**: Works on Linux, macOS, Windows (WSL)
+
+## 🚀 Quick Start
+
+### Installation
+
+```bash
+# One-command installation
+./scripts/quick-start.sh
+```
+
+### Usage
+
+**Command Line:**
+```bash
+# Chat
+supreme-agent chat "مرحباً! كيف حالك؟"
+
+# Execute command
+supreme-agent execute "Create a Python web scraper"
+
+# Analyze file
+supreme-agent analyze-file script.py
+
+# Generate code
+supreme-agent generate-code "REST API for users" --lang python
+
+# Check health
+supreme-agent health
+
+# List models
+supreme-agent models
+```
+
+**API:**
+```bash
+curl -X POST http://localhost:5000/api/chat \
+ -H "Content-Type: application/json" \
+ -d '{"message": "Hello!"}'
+```
+
+**Web Interface:**
+```bash
+# Open in browser
+open http://localhost:8080
+```
+
+## 📁 Project Structure
+
+```
+AI-Agent-Platform/
+├── models/
+│ └── Modelfile # Custom supreme-executor model
+├── scripts/
+│ ├── supreme_agent.py # Core agent class
+│ ├── install-supreme-agent.sh # Installation script
+│ ├── quick-start.sh # One-command setup
+│ ├── integrate-openwebui.sh # OpenWebUI integration
+│ ├── update.sh # Update utility
+│ └── backup.sh # Backup utility
+├── api/
+│ └── server.py # REST API server
+├── web/
+│ ├── index.html # Web interface
+│ ├── style.css # Responsive styles
+│ └── app.js # Frontend logic
+├── config/
+│ └── settings.json # Configuration
+├── docs/
+│ ├── API.md # API documentation
+│ └── MODELS.md # Models guide
+├── tests/
+│ ├── test_agent.py # Agent tests
+│ └── test_api.py # API tests
+├── Dockerfile # Docker container
+├── docker-compose.yml # Service orchestration
+└── README.md # Main documentation
+```
+
+## 🤝 Supported AI Models
+
+1. **supreme-executor** ⭐ - Custom bilingual model (recommended)
+2. **llama3** - General-purpose foundation model
+3. **aya** - Multilingual specialist (excellent Arabic)
+4. **mistral** - Fast and efficient
+5. **deepseek-coder** - Programming specialist
+6. **qwen2** - Advanced long-context model
+
+See [docs/MODELS.md](docs/MODELS.md) for detailed comparison.
+
+## 🌐 API Endpoints
+
+- `GET /api/health` - System health check
+- `POST /api/chat` - Intelligent conversation
+- `POST /api/execute` - Command execution
+- `POST /api/analyze` - File analysis
+- `POST /api/generate-code` - Code generation
+- `GET /api/models` - List available models
+
+See [docs/API.md](docs/API.md) for complete documentation.
+
+## 🐳 Docker Deployment
+
+```bash
+# Build and run
+docker-compose up -d
+
+# With OpenWebUI
+docker-compose --profile with-openwebui up -d
+
+# Access:
+# - API: http://localhost:5000
+# - Web: http://localhost:8080
+# - OpenWebUI: http://localhost:3000
+```
+
+## 🛠️ Maintenance
+
+### Update System
+```bash
+./scripts/update.sh
+```
+
+### Backup Data
+```bash
+./scripts/backup.sh
+```
+
+### Check Status
+```bash
+supreme-agent health
+curl http://localhost:5000/api/health
+```
+
+## 📚 Documentation
+
+- **[README.md](README.md)** - Main documentation
+- **[docs/API.md](docs/API.md)** - API reference
+- **[docs/MODELS.md](docs/MODELS.md)** - Models guide
+- **[OPENWEBUI.md](OPENWEBUI.md)** - OpenWebUI integration
+
+## 🧪 Testing
+
+```bash
+# Test agent
+python3 tests/test_agent.py
+
+# Test API (requires running server)
+python3 api/server.py &
+python3 tests/test_api.py
+```
+
+## 🔧 Configuration
+
+Edit `config/settings.json` to customize:
+- Default model
+- API settings
+- Web interface preferences
+- Ollama configuration
+
+## 📝 Examples
+
+### Python Client
+```python
+from scripts.supreme_agent import SupremeAgent
+
+agent = SupremeAgent()
+response = agent.chat("مرحباً")
+print(response)
+```
+
+### API Call
+```python
+import requests
+
+response = requests.post('http://localhost:5000/api/chat',
+ json={'message': 'Hello!'})
+print(response.json()['response'])
+```
+
+## 🤝 Contributing
+
+This project is part of the AI-Agent-Platform. Contributions are welcome!
+
+## 📄 License
+
+© 2025 wasalstor-web
+
+## 🆘 Support
+
+For issues or questions:
+- GitHub Issues: https://github.com/wasalstor-web/AI-Agent-Platform/issues
+- Documentation: See docs/
+
+---
+
+**Supreme Agent** - الوكيل الأعلى المتكامل
+Built with ❤️ for the AI community
diff --git a/scripts/backup.sh b/scripts/backup.sh
index fb0012f..b01796e 100755
--- a/scripts/backup.sh
+++ b/scripts/backup.sh
@@ -133,11 +133,18 @@ print_step "[5/6] نسخ معلومات النماذج / Backing up model inform
if command -v ollama &> /dev/null; then
# حفظ قائمة النماذج / Save model list
- ollama list > "$BACKUP_DIR/ollama_models.txt" 2>/dev/null || true
- print_success "قائمة النماذج / Model list saved"
+ if ollama list > "$BACKUP_DIR/ollama_models.txt" 2>&1; then
+ print_success "قائمة النماذج / Model list saved"
+ else
+ print_info "فشل حفظ قائمة النماذج / Failed to save model list"
+ fi
- # حفظ معلومات supreme-executor / Save supreme-executor info
- ollama show supreme-executor > "$BACKUP_DIR/supreme-executor-info.txt" 2>/dev/null || true
+ # حفظ معلومات supreme-executor إذا كان موجوداً / Save supreme-executor info if it exists
+ if ollama list 2>/dev/null | grep -q "supreme-executor"; then
+ if ollama show supreme-executor > "$BACKUP_DIR/supreme-executor-info.txt" 2>&1; then
+ print_success "معلومات supreme-executor / supreme-executor info saved"
+ fi
+ fi
else
print_info "Ollama غير متوفر / Ollama not available"
fi