From be68ce08f18e8cd92783e31b7f279a27bb427ec1 Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi Date: Tue, 16 Dec 2025 00:30:17 +0100 Subject: [PATCH 1/4] adaptive learning --- back_tester/README.md | 165 ++++- back_tester/__init__.py | 51 +- back_tester/adaptive_learning.py | 845 ++++++++++++++++++++++++ back_tester/enhanced_metrics.py | 669 +++++++++++++++++++ back_tester/intensive_training.py | 660 ++++++++++++++++++ back_tester/run_learning_backtest.py | 503 ++++++++++++++ back_tester/start_intensive_training.sh | 278 ++++++++ back_tester/start_learning_backtest.sh | 283 ++++++++ back_tester/strategy.py | 154 ++++- back_tester/trainer.py | 80 ++- 10 files changed, 3659 insertions(+), 29 deletions(-) create mode 100644 back_tester/adaptive_learning.py create mode 100644 back_tester/enhanced_metrics.py create mode 100755 back_tester/intensive_training.py create mode 100755 back_tester/run_learning_backtest.py create mode 100755 back_tester/start_intensive_training.sh create mode 100755 back_tester/start_learning_backtest.sh diff --git a/back_tester/README.md b/back_tester/README.md index 58a4d48..359f436 100644 --- a/back_tester/README.md +++ b/back_tester/README.md @@ -257,13 +257,168 @@ cd reports/examples 9. **Track drawdowns carefully** - Maximum drawdown is often more important than returns 10. **Test over multiple time periods** - Market conditions change over time +### 10. Adaptive Learning System (`adaptive_learning.py`) ⭐ NEW + +Self-learning capabilities that automatically adjust signal weights based on performance: +- **Signal Performance Tracking** - Track each signal with its contributing indicators +- **Adaptive Weight Adjustment** - Automatically adjust weights based on what works +- **Indicator Attribution** - Identify which indicators contribute to wins/losses +- **Learning Analytics** - Detailed reports on indicator and regime performance + +```python +from back_tester.adaptive_learning import SelfLearningBacktester + +# Create self-learning backtester +learner = SelfLearningBacktester( + storage_path="./data/learning", + auto_adjust_weights=True, + adjustment_frequency=50 # Adjust weights every 50 signals +) + +# Get adaptive weights (these improve over time) +weights = learner.get_current_weights() + +# Run backtest with learning enabled +final_balance, trades, _ = backtest_strategy( + symbol="BTCUSDT", + interval="1h", + candles=500, + enable_learning=True, + learner=learner, +) + +# View performance analytics +learner.print_status() + +# Get detailed indicator performance +indicator_perf = learner.tracker.get_indicator_performance() +print(indicator_perf) + +# Identify what works +strong = learner.tracker.identify_strong_indicators() +weak = learner.tracker.identify_weak_indicators() +print(f"Strong indicators: {strong}") +print(f"Weak indicators: {weak}") +``` + + +### Architecture Overview + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ BACKTESTING FLOW │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ ┌─────────────┐ ┌───────────────┐ ┌─────────────────┐ │ +│ │ Data Fetch │───▶│Signal Generate│───▶│ Trade Execute │ │ +│ └─────────────┘ └───────────────┘ └─────────────────┘ │ +│ │ │ │ +│ ▼ ▼ │ +│ ┌──────────────┐ ┌─────────────────┐ │ +│ │ Weights │ │ Record Outcome │ │ +│ │ (18 total) │ └─────────────────┘ │ +│ └──────────────┘ │ │ +│ ▲ │ │ +│ │ ▼ │ +│ ┌───────────────────────────────────┐ │ +│ │ SELF-LEARNING SYSTEM │ │ +│ │ ┌─────────────────────────────┐ │ │ +│ │ │ SignalPerformanceTracker │ │ │ +│ │ │ - Track indicator contrib │ │ │ +│ │ │ - Record signal outcomes │ │ │ +│ │ └─────────────────────────────┘ │ │ +│ │ │ │ │ +│ │ ▼ │ │ +│ │ ┌─────────────────────────────┐ │ │ +│ │ │ AdaptiveWeightAdjuster │ │ │ +│ │ │ - Calculate adjustments │ │ │ +│ │ │ - Apply momentum learning │ │ │ +│ │ └─────────────────────────────┘ │ │ +│ └───────────────────────────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### 11. Enhanced Metrics (`enhanced_metrics.py`) ⭐ NEW + +Comprehensive, informative metrics with indicator-level breakdown: +- **Trade Attribution** - Track which indicators contributed to each trade +- **Regime Analysis** - Performance breakdown by market regime +- **Exit Type Analysis** - Understand where trades close (TP1, TP2, TP3, SL) +- **Actionable Insights** - Get recommendations for improvement + +```python +from back_tester.enhanced_metrics import EnhancedMetricsCalculator + +# Create metrics calculator +calc = EnhancedMetricsCalculator() + +# After running backtests, print detailed report +calc.print_detailed_report(initial_balance=10000, final_balance=10500) + +# Get comprehensive metrics +metrics = calc.calculate_comprehensive_metrics(10000, 10500) +print(f"Indicator Performance: {metrics['indicator_performance']}") +print(f"Regime Performance: {metrics['regime_performance']}") +print(f"Recommendations: {metrics['insights']['recommendations']}") +``` + +## How the Self-Learning System Works + +The self-learning system creates a feedback loop that improves signal quality over time: + +1. **Signal Generation** - When a signal is generated, the system records which indicators contributed to it +2. **Outcome Tracking** - When the trade closes (TP1, TP2, TP3, stop loss, or end of period), the outcome is recorded +3. **Attribution Analysis** - The system analyzes which indicators are associated with winning vs losing trades +4. **Weight Adjustment** - Weights for consistently underperforming indicators are reduced, while weights for strong performers are increased +5. **Continuous Improvement** - Over many trades, the system converges on optimal weights for current market conditions + +### Key Components + +| Component | Description | +|-----------|-------------| +| `SignalPerformanceTracker` | Tracks all signals with their outcomes and indicator contributions | +| `AdaptiveWeightAdjuster` | Calculates and applies weight adjustments based on performance | +| `LearningMetricsAnalyzer` | Generates reports and recommendations | +| `SelfLearningBacktester` | Integrates all components for easy use | + +### Example Workflow + +```python +# Step 1: Initialize the learning system +from back_tester import SelfLearningBacktester, backtest_strategy + +learner = SelfLearningBacktester(auto_adjust_weights=True) + +# Step 2: Run multiple backtests to build data +for symbol in ["BTCUSDT", "ETHUSDT", "BNBUSDT"]: + backtest_strategy( + symbol=symbol, + interval="1h", + enable_learning=True, + learner=learner + ) + +# Step 3: View what the system learned +report = learner.get_performance_report() +print(f"Win Rate: {report['summary']['recent_win_rate']:.1f}%") +print(f"Strong Indicators: {report['indicator_analysis']['strong_indicators']}") +print(f"Weak Indicators: {report['indicator_analysis']['weak_indicators']}") + +# Step 4: Get updated weights for future use +new_weights = learner.get_current_weights() +print(f"Optimized weights: {new_weights}") +``` + ## Next Steps for Improvement -1. Implement machine learning for signal enhancement -2. Add cross-validation for more robust parameter testing -3. Implement portfolio-level backtesting for multiple assets -4. Add regime switching capabilities based on market conditions -5. Develop adaptive strategy selection based on market regime +1. ~~Implement machine learning for signal enhancement~~ ✅ Done +2. ~~Add self-learning weight adjustment~~ ✅ Done +3. ~~Implement indicator performance tracking~~ ✅ Done +4. Add cross-validation for more robust parameter testing +5. Implement portfolio-level backtesting for multiple assets +6. Add regime switching capabilities based on market conditions +7. Develop adaptive strategy selection based on market regime ## Backtesting Launcher Script diff --git a/back_tester/__init__.py b/back_tester/__init__.py index 1d1038b..f787878 100644 --- a/back_tester/__init__.py +++ b/back_tester/__init__.py @@ -2,7 +2,54 @@ Enhanced Backtesting System for Cryptocurrency Trading This package provides comprehensive tools for strategy development, -testing, and optimization for cryptocurrency trading. +testing, optimization, and self-learning for cryptocurrency trading. + +New in v1.1.0: +- Adaptive Learning System: Automatically adjusts signal weights based on performance +- Signal Performance Tracking: Tracks which indicators contribute to winning/losing trades +- Enhanced Metrics: Detailed breakdown by indicator, market regime, and symbol """ -__version__ = "1.0.0" +__version__ = "1.1.0" + +# Core modules +from .strategy import backtest_strategy +from .performance_metrics import calculate_performance_metrics, generate_performance_report + +# Enhanced features +from .enhanced_backtester import EnhancedBacktester, run_enhanced_backtest + +# Self-learning modules +try: + from .adaptive_learning import ( + SelfLearningBacktester, + SignalPerformanceTracker, + AdaptiveWeightAdjuster, + LearningMetricsAnalyzer, + SignalRecord, + SignalOutcome, + extract_indicator_contributions, + ) + from .enhanced_metrics import ( + EnhancedMetricsCalculator, + TradeMetrics, + create_trade_metrics, + ) + LEARNING_AVAILABLE = True +except ImportError: + LEARNING_AVAILABLE = False + +__all__ = [ + # Core + "backtest_strategy", + "calculate_performance_metrics", + "generate_performance_report", + "EnhancedBacktester", + "run_enhanced_backtest", + # Learning (if available) + "SelfLearningBacktester", + "SignalPerformanceTracker", + "AdaptiveWeightAdjuster", + "EnhancedMetricsCalculator", + "LEARNING_AVAILABLE", +] diff --git a/back_tester/adaptive_learning.py b/back_tester/adaptive_learning.py new file mode 100644 index 0000000..2211ae1 --- /dev/null +++ b/back_tester/adaptive_learning.py @@ -0,0 +1,845 @@ +""" +Adaptive Learning Module for Backtesting System + +This module provides self-learning capabilities for the backtesting system: +1. SignalPerformanceTracker - Tracks individual signal outcomes with indicator attribution +2. AdaptiveWeightAdjuster - Adjusts weights based on signal performance +3. IndicatorAttributor - Attributes success/failure to specific indicators +4. LearningMetricsAnalyzer - Provides detailed analytics on what works and what doesn't + +The system learns from failures and automatically adjusts coefficients to improve future signals. +""" + +import os +import sys +import json +import numpy as np +import pandas as pd +from typing import Dict, List, Tuple, Any, Optional +from dataclasses import dataclass, field, asdict +from datetime import datetime, timedelta +from collections import defaultdict +import logging +from enum import Enum +import joblib + +logger = logging.getLogger(__name__) + + +class SignalOutcome(Enum): + """Possible outcomes for a trading signal""" + TP1_HIT = "tp1_hit" + TP2_HIT = "tp2_hit" + TP3_HIT = "tp3_hit" + STOP_LOSS = "stop_loss" + TRAILING_STOP = "trailing_stop" + END_OF_PERIOD = "end_of_period" + BREAKEVEN = "breakeven" + PENDING = "pending" + + +@dataclass +class SignalRecord: + """Complete record of a signal with its contributing factors and outcome""" + signal_id: str + timestamp: datetime + symbol: str + interval: str + signal_type: str # Bullish/Bearish + entry_price: float + stop_loss: float + take_profit_1: float + take_profit_2: float + take_profit_3: float + + # Contributing indicators and their scores + indicator_contributions: Dict[str, float] = field(default_factory=dict) + + # Reasons that triggered the signal + reasons: List[str] = field(default_factory=list) + + # Market context at signal time + market_regime: str = "" + volatility: float = 0.0 + volume_ratio: float = 1.0 + rsi: float = 50.0 + trend: str = "" + + # Outcome tracking + outcome: SignalOutcome = SignalOutcome.PENDING + exit_price: float = 0.0 + profit_loss: float = 0.0 + profit_loss_percent: float = 0.0 + duration_candles: int = 0 + max_favorable_excursion: float = 0.0 # Max profit during trade + max_adverse_excursion: float = 0.0 # Max loss during trade + + # Weights used at signal time + weights_at_signal: List[float] = field(default_factory=list) + + def is_successful(self) -> bool: + """Determine if the signal was successful""" + return self.outcome in [SignalOutcome.TP1_HIT, SignalOutcome.TP2_HIT, + SignalOutcome.TP3_HIT, SignalOutcome.TRAILING_STOP] + + def to_dict(self) -> Dict: + """Convert to dictionary for storage""" + data = asdict(self) + data['outcome'] = self.outcome.value + data['timestamp'] = self.timestamp.isoformat() + return data + + @classmethod + def from_dict(cls, data: Dict) -> 'SignalRecord': + """Create from dictionary""" + data = data.copy() + data['outcome'] = SignalOutcome(data['outcome']) + data['timestamp'] = datetime.fromisoformat(data['timestamp']) + return cls(**data) + + +class SignalPerformanceTracker: + """ + Tracks individual signal performance and attributes outcomes to specific indicators. + This enables learning from both successes and failures. + """ + + # Weight names for attribution + WEIGHT_NAMES = [ + "W_BULLISH_OB", "W_BEARISH_OB", "W_BULLISH_BREAKER", "W_BEARISH_BREAKER", + "W_ABOVE_SUPPORT", "W_BELOW_RESISTANCE", "W_FVG_ABOVE", "W_FVG_BELOW", + "W_TREND", "W_SWEEP_HIGHS", "W_SWEEP_LOWS", "W_STRUCTURE_BREAK", + "W_PIN_BAR", "W_ENGULFING", "W_LIQUIDITY_POOL_ABOVE", "W_LIQUIDITY_POOL_BELOW", + "W_LIQUIDITY_POOL_ROUND", "W_RSI_EXTREME" + ] + + def __init__(self, storage_path: str = "./data/signal_history"): + self.storage_path = storage_path + os.makedirs(storage_path, exist_ok=True) + + self.signals: List[SignalRecord] = [] + self.performance_by_indicator: Dict[str, Dict] = defaultdict( + lambda: {"wins": 0, "losses": 0, "total_pnl": 0.0, "count": 0} + ) + self.performance_by_market_regime: Dict[str, Dict] = defaultdict( + lambda: {"wins": 0, "losses": 0, "total_pnl": 0.0, "count": 0} + ) + self.performance_by_symbol: Dict[str, Dict] = defaultdict( + lambda: {"wins": 0, "losses": 0, "total_pnl": 0.0, "count": 0} + ) + + self._load_history() + + def _load_history(self): + """Load signal history from storage""" + history_file = os.path.join(self.storage_path, "signal_history.json") + if os.path.exists(history_file): + try: + with open(history_file, 'r') as f: + data = json.load(f) + self.signals = [SignalRecord.from_dict(s) for s in data.get('signals', [])] + self._rebuild_performance_stats() + logger.info(f"Loaded {len(self.signals)} historical signals") + except Exception as e: + logger.warning(f"Failed to load signal history: {e}") + + def _save_history(self): + """Save signal history to storage""" + history_file = os.path.join(self.storage_path, "signal_history.json") + try: + with open(history_file, 'w') as f: + json.dump({ + 'signals': [s.to_dict() for s in self.signals[-10000:]], # Keep last 10k + 'last_updated': datetime.now().isoformat() + }, f, indent=2) + except Exception as e: + logger.error(f"Failed to save signal history: {e}") + + def _rebuild_performance_stats(self): + """Rebuild performance statistics from loaded signals""" + for signal in self.signals: + if signal.outcome != SignalOutcome.PENDING: + self._update_stats(signal) + + def _update_stats(self, signal: SignalRecord): + """Update performance statistics for a completed signal""" + is_win = signal.is_successful() + + # Update by indicator + for indicator, contribution in signal.indicator_contributions.items(): + if contribution > 0: + stats = self.performance_by_indicator[indicator] + stats["count"] += 1 + stats["total_pnl"] += signal.profit_loss + if is_win: + stats["wins"] += 1 + else: + stats["losses"] += 1 + + # Update by market regime + regime_stats = self.performance_by_market_regime[signal.market_regime] + regime_stats["count"] += 1 + regime_stats["total_pnl"] += signal.profit_loss + if is_win: + regime_stats["wins"] += 1 + else: + regime_stats["losses"] += 1 + + # Update by symbol + symbol_stats = self.performance_by_symbol[signal.symbol] + symbol_stats["count"] += 1 + symbol_stats["total_pnl"] += signal.profit_loss + if is_win: + symbol_stats["wins"] += 1 + else: + symbol_stats["losses"] += 1 + + def record_signal(self, + signal_id: str, + symbol: str, + interval: str, + signal_type: str, + entry_price: float, + stop_loss: float, + take_profit_1: float, + take_profit_2: float, + take_profit_3: float, + indicator_contributions: Dict[str, float], + reasons: List[str], + market_context: Dict[str, Any], + weights: List[float]) -> SignalRecord: + """Record a new signal with its contributing factors""" + + signal = SignalRecord( + signal_id=signal_id, + timestamp=datetime.now(), + symbol=symbol, + interval=interval, + signal_type=signal_type, + entry_price=entry_price, + stop_loss=stop_loss, + take_profit_1=take_profit_1, + take_profit_2=take_profit_2, + take_profit_3=take_profit_3, + indicator_contributions=indicator_contributions, + reasons=reasons, + market_regime=market_context.get("market_regime", "unknown"), + volatility=market_context.get("volatility", 0.0), + volume_ratio=market_context.get("volume_ratio", 1.0), + rsi=market_context.get("rsi", 50.0), + trend=market_context.get("trend", ""), + weights_at_signal=weights.copy() if weights else [] + ) + + self.signals.append(signal) + return signal + + def update_signal_outcome(self, + signal_id: str, + outcome: SignalOutcome, + exit_price: float, + profit_loss: float, + duration_candles: int, + max_favorable: float = 0.0, + max_adverse: float = 0.0): + """Update a signal with its outcome""" + + for signal in reversed(self.signals): # Search from most recent + if signal.signal_id == signal_id: + signal.outcome = outcome + signal.exit_price = exit_price + signal.profit_loss = profit_loss + signal.profit_loss_percent = (exit_price - signal.entry_price) / signal.entry_price * 100 + signal.duration_candles = duration_candles + signal.max_favorable_excursion = max_favorable + signal.max_adverse_excursion = max_adverse + + self._update_stats(signal) + self._save_history() + + logger.info(f"Signal {signal_id} outcome: {outcome.value}, P/L: {profit_loss:.2f}") + return signal + + logger.warning(f"Signal {signal_id} not found for outcome update") + return None + + def get_indicator_performance(self) -> pd.DataFrame: + """Get performance breakdown by indicator""" + data = [] + for indicator, stats in self.performance_by_indicator.items(): + if stats["count"] > 0: + win_rate = stats["wins"] / stats["count"] * 100 + avg_pnl = stats["total_pnl"] / stats["count"] + data.append({ + "indicator": indicator, + "signals": stats["count"], + "wins": stats["wins"], + "losses": stats["losses"], + "win_rate": win_rate, + "total_pnl": stats["total_pnl"], + "avg_pnl": avg_pnl, + "score": win_rate * 0.4 + (avg_pnl / 100) * 0.6 if avg_pnl else win_rate + }) + + df = pd.DataFrame(data) + if not df.empty: + df = df.sort_values("score", ascending=False) + return df + + def get_regime_performance(self) -> pd.DataFrame: + """Get performance breakdown by market regime""" + data = [] + for regime, stats in self.performance_by_market_regime.items(): + if stats["count"] > 0: + win_rate = stats["wins"] / stats["count"] * 100 + data.append({ + "regime": regime, + "signals": stats["count"], + "win_rate": win_rate, + "total_pnl": stats["total_pnl"], + "avg_pnl": stats["total_pnl"] / stats["count"] + }) + return pd.DataFrame(data) + + def get_recent_performance(self, n: int = 100) -> Dict[str, Any]: + """Get performance of recent N signals""" + recent = [s for s in self.signals[-n:] if s.outcome != SignalOutcome.PENDING] + + if not recent: + return {"signals": 0, "win_rate": 0, "avg_pnl": 0} + + wins = sum(1 for s in recent if s.is_successful()) + total_pnl = sum(s.profit_loss for s in recent) + + return { + "signals": len(recent), + "wins": wins, + "losses": len(recent) - wins, + "win_rate": wins / len(recent) * 100, + "total_pnl": total_pnl, + "avg_pnl": total_pnl / len(recent), + "avg_duration": sum(s.duration_candles for s in recent) / len(recent) + } + + def identify_weak_indicators(self, min_signals: int = 10, + win_rate_threshold: float = 40.0) -> List[str]: + """Identify indicators that consistently underperform""" + weak = [] + for indicator, stats in self.performance_by_indicator.items(): + if stats["count"] >= min_signals: + win_rate = stats["wins"] / stats["count"] * 100 + if win_rate < win_rate_threshold: + weak.append(indicator) + return weak + + def identify_strong_indicators(self, min_signals: int = 10, + win_rate_threshold: float = 60.0) -> List[str]: + """Identify indicators that consistently outperform""" + strong = [] + for indicator, stats in self.performance_by_indicator.items(): + if stats["count"] >= min_signals: + win_rate = stats["wins"] / stats["count"] * 100 + if win_rate >= win_rate_threshold: + strong.append(indicator) + return strong + + +class AdaptiveWeightAdjuster: + """ + Adjusts signal weights based on historical performance. + Uses online learning to continuously improve signal quality. + """ + + def __init__(self, + performance_tracker: SignalPerformanceTracker, + learning_rate: float = 0.05, + momentum: float = 0.9, + min_weight: float = 0.1, + max_weight: float = 3.0, + decay_factor: float = 0.95, + storage_path: str = "./data/weights"): + + self.tracker = performance_tracker + self.learning_rate = learning_rate + self.momentum = momentum + self.min_weight = min_weight + self.max_weight = max_weight + self.decay_factor = decay_factor # Decay old performance influence + self.storage_path = storage_path + os.makedirs(storage_path, exist_ok=True) + + # Weight velocity for momentum-based updates + self.velocity = defaultdict(float) + + # Historical weight adjustments + self.adjustment_history: List[Dict] = [] + + # Load last known weights + self.current_weights = self._load_weights() + + def _load_weights(self) -> Dict[str, float]: + """Load current weights from storage""" + weights_file = os.path.join(self.storage_path, "adaptive_weights.json") + default_weights = {name: 1.0 for name in SignalPerformanceTracker.WEIGHT_NAMES} + + if os.path.exists(weights_file): + try: + with open(weights_file, 'r') as f: + data = json.load(f) + return data.get('weights', default_weights) + except: + pass + return default_weights + + def _save_weights(self): + """Save current weights to storage""" + weights_file = os.path.join(self.storage_path, "adaptive_weights.json") + with open(weights_file, 'w') as f: + json.dump({ + 'weights': self.current_weights, + 'last_updated': datetime.now().isoformat(), + 'learning_rate': self.learning_rate, + 'adjustment_count': len(self.adjustment_history) + }, f, indent=2) + + def calculate_weight_adjustments(self, + recent_window: int = 100, + min_contribution_threshold: float = 0.1) -> Dict[str, float]: + """ + Calculate weight adjustments based on recent signal performance. + + For each indicator: + - If win rate > 60%: increase weight + - If win rate < 40%: decrease weight + - Scale adjustment by confidence (number of signals) + """ + adjustments = {} + indicator_perf = self.tracker.get_indicator_performance() + + if indicator_perf.empty: + return adjustments + + for _, row in indicator_perf.iterrows(): + indicator = row['indicator'] + if indicator not in self.current_weights: + continue + + signals = row['signals'] + win_rate = row['win_rate'] + avg_pnl = row['avg_pnl'] + + # Calculate confidence based on sample size + confidence = min(1.0, signals / 50) # Full confidence at 50 signals + + # Calculate adjustment direction and magnitude + if win_rate > 60: + # Positive adjustment - increase weight + magnitude = (win_rate - 50) / 100 * confidence + adjustment = magnitude * self.learning_rate + elif win_rate < 40: + # Negative adjustment - decrease weight + magnitude = (50 - win_rate) / 100 * confidence + adjustment = -magnitude * self.learning_rate + else: + adjustment = 0 + + # Factor in P/L performance + if avg_pnl != 0: + pnl_factor = np.clip(avg_pnl / 100, -0.5, 0.5) + adjustment += pnl_factor * 0.5 * self.learning_rate * confidence + + # Apply momentum + self.velocity[indicator] = (self.momentum * self.velocity[indicator] + + (1 - self.momentum) * adjustment) + adjustments[indicator] = self.velocity[indicator] + + return adjustments + + def apply_adjustments(self, + adjustments: Optional[Dict[str, float]] = None, + force: bool = False) -> Dict[str, float]: + """ + Apply calculated adjustments to current weights. + + Args: + adjustments: Optional pre-calculated adjustments + force: If True, apply even small adjustments + + Returns: + Dictionary of new weights + """ + if adjustments is None: + adjustments = self.calculate_weight_adjustments() + + if not adjustments: + return self.current_weights + + old_weights = self.current_weights.copy() + changes_made = False + + for indicator, adjustment in adjustments.items(): + if indicator not in self.current_weights: + continue + + # Only apply significant adjustments unless forced + if not force and abs(adjustment) < 0.01: + continue + + old_weight = self.current_weights[indicator] + new_weight = old_weight + adjustment + + # Clamp to valid range + new_weight = max(self.min_weight, min(self.max_weight, new_weight)) + + if new_weight != old_weight: + self.current_weights[indicator] = new_weight + changes_made = True + logger.info(f"Weight adjusted: {indicator} {old_weight:.3f} -> {new_weight:.3f}") + + if changes_made: + self.adjustment_history.append({ + 'timestamp': datetime.now().isoformat(), + 'old_weights': old_weights, + 'new_weights': self.current_weights.copy(), + 'adjustments': adjustments + }) + self._save_weights() + + return self.current_weights + + def get_weights_as_list(self) -> List[float]: + """Convert weights dictionary to list format for backtesting""" + return [self.current_weights.get(name, 1.0) + for name in SignalPerformanceTracker.WEIGHT_NAMES] + + def reset_weights(self): + """Reset all weights to default values""" + self.current_weights = {name: 1.0 for name in SignalPerformanceTracker.WEIGHT_NAMES} + self.velocity = defaultdict(float) + self._save_weights() + logger.info("Weights reset to defaults") + + def get_adjustment_summary(self) -> Dict[str, Any]: + """Get summary of recent weight adjustments""" + if not self.adjustment_history: + return {"adjustments": 0, "last_adjustment": None} + + recent = self.adjustment_history[-10:] + return { + "total_adjustments": len(self.adjustment_history), + "recent_adjustments": len(recent), + "last_adjustment": recent[-1]['timestamp'] if recent else None, + "current_weights": self.current_weights + } + + +class LearningMetricsAnalyzer: + """ + Analyzes learning metrics and provides actionable insights + for improving signal quality. + """ + + def __init__(self, + tracker: SignalPerformanceTracker, + adjuster: AdaptiveWeightAdjuster): + self.tracker = tracker + self.adjuster = adjuster + + def generate_learning_report(self) -> Dict[str, Any]: + """Generate comprehensive learning report""" + report = { + "timestamp": datetime.now().isoformat(), + "summary": {}, + "indicator_analysis": {}, + "regime_analysis": {}, + "recommendations": [] + } + + # Overall summary + recent_perf = self.tracker.get_recent_performance(100) + report["summary"] = { + "total_signals_tracked": len(self.tracker.signals), + "recent_win_rate": recent_perf.get("win_rate", 0), + "recent_avg_pnl": recent_perf.get("avg_pnl", 0), + "weight_adjustments": len(self.adjuster.adjustment_history) + } + + # Indicator analysis + indicator_df = self.tracker.get_indicator_performance() + if not indicator_df.empty: + report["indicator_analysis"] = { + "best_performers": indicator_df.head(5).to_dict('records'), + "worst_performers": indicator_df.tail(5).to_dict('records'), + "weak_indicators": self.tracker.identify_weak_indicators(), + "strong_indicators": self.tracker.identify_strong_indicators() + } + + # Regime analysis + regime_df = self.tracker.get_regime_performance() + if not regime_df.empty: + report["regime_analysis"] = regime_df.to_dict('records') + + # Generate recommendations + report["recommendations"] = self._generate_recommendations() + + return report + + def _generate_recommendations(self) -> List[str]: + """Generate actionable recommendations based on performance data""" + recommendations = [] + + # Check for weak indicators + weak = self.tracker.identify_weak_indicators() + if weak: + recommendations.append( + f"Consider reducing weights for underperforming indicators: {', '.join(weak)}" + ) + + # Check for strong indicators + strong = self.tracker.identify_strong_indicators() + if strong: + recommendations.append( + f"High-performing indicators (consider increasing weight): {', '.join(strong)}" + ) + + # Check recent performance trend + recent_perf = self.tracker.get_recent_performance(50) + older_perf = self.tracker.get_recent_performance(200) + + if recent_perf.get("win_rate", 0) < older_perf.get("win_rate", 0) - 10: + recommendations.append( + "Recent performance declining - consider resetting weights or retraining" + ) + + # Regime-specific recommendations + regime_df = self.tracker.get_regime_performance() + if not regime_df.empty: + worst_regime = regime_df.loc[regime_df['win_rate'].idxmin()] + if worst_regime['win_rate'] < 40 and worst_regime['signals'] > 20: + recommendations.append( + f"Poor performance in '{worst_regime['regime']}' regime " + f"({worst_regime['win_rate']:.1f}% win rate) - consider filtering signals" + ) + + return recommendations + + def print_report(self): + """Print formatted learning report to console""" + report = self.generate_learning_report() + + print("\n" + "="*70) + print("📊 ADAPTIVE LEARNING REPORT") + print("="*70) + + print(f"\n📈 Summary:") + print(f" Total signals tracked: {report['summary']['total_signals_tracked']}") + print(f" Recent win rate: {report['summary']['recent_win_rate']:.1f}%") + print(f" Recent avg P/L: ${report['summary']['recent_avg_pnl']:.2f}") + print(f" Weight adjustments made: {report['summary']['weight_adjustments']}") + + if report.get('indicator_analysis', {}).get('best_performers'): + print(f"\n🟢 Top Performing Indicators:") + for ind in report['indicator_analysis']['best_performers'][:3]: + print(f" {ind['indicator']}: {ind['win_rate']:.1f}% win rate, " + f"${ind['avg_pnl']:.2f} avg P/L ({ind['signals']} signals)") + + if report.get('indicator_analysis', {}).get('weak_indicators'): + print(f"\n🔴 Underperforming Indicators:") + for ind in report['indicator_analysis']['weak_indicators']: + print(f" {ind}") + + if report.get('recommendations'): + print(f"\n💡 Recommendations:") + for rec in report['recommendations']: + print(f" • {rec}") + + print("\n" + "="*70) + + +class SelfLearningBacktester: + """ + Enhanced backtester with self-learning capabilities. + Integrates signal tracking, weight adjustment, and performance analysis. + """ + + def __init__(self, + storage_path: str = "./data/learning", + auto_adjust_weights: bool = True, + adjustment_frequency: int = 50): # Adjust every N signals + + self.storage_path = storage_path + os.makedirs(storage_path, exist_ok=True) + + self.tracker = SignalPerformanceTracker( + storage_path=os.path.join(storage_path, "signals") + ) + self.adjuster = AdaptiveWeightAdjuster( + performance_tracker=self.tracker, + storage_path=os.path.join(storage_path, "weights") + ) + self.analyzer = LearningMetricsAnalyzer(self.tracker, self.adjuster) + + self.auto_adjust = auto_adjust_weights + self.adjustment_frequency = adjustment_frequency + self.signals_since_adjustment = 0 + + def get_current_weights(self) -> List[float]: + """Get current adaptive weights as list""" + return self.adjuster.get_weights_as_list() + + def record_signal_entry(self, + signal_id: str, + symbol: str, + interval: str, + signal_type: str, + entry_price: float, + stop_loss: float, + tp1: float, tp2: float, tp3: float, + indicator_contributions: Dict[str, float], + reasons: List[str], + market_context: Dict[str, Any]) -> SignalRecord: + """Record a new signal entry""" + + signal = self.tracker.record_signal( + signal_id=signal_id, + symbol=symbol, + interval=interval, + signal_type=signal_type, + entry_price=entry_price, + stop_loss=stop_loss, + take_profit_1=tp1, + take_profit_2=tp2, + take_profit_3=tp3, + indicator_contributions=indicator_contributions, + reasons=reasons, + market_context=market_context, + weights=self.get_current_weights() + ) + + return signal + + def record_signal_exit(self, + signal_id: str, + outcome: str, # "tp1", "tp2", "tp3", "stop_loss", "trailing_stop", "end" + exit_price: float, + profit_loss: float, + duration: int): + """Record signal exit and trigger learning if needed""" + + # Map outcome string to enum + outcome_map = { + "tp1": SignalOutcome.TP1_HIT, + "tp2": SignalOutcome.TP2_HIT, + "tp3": SignalOutcome.TP3_HIT, + "stop_loss": SignalOutcome.STOP_LOSS, + "trailing_stop": SignalOutcome.TRAILING_STOP, + "end": SignalOutcome.END_OF_PERIOD + } + outcome_enum = outcome_map.get(outcome, SignalOutcome.END_OF_PERIOD) + + self.tracker.update_signal_outcome( + signal_id=signal_id, + outcome=outcome_enum, + exit_price=exit_price, + profit_loss=profit_loss, + duration_candles=duration + ) + + self.signals_since_adjustment += 1 + + # Auto-adjust weights if enabled and threshold reached + if self.auto_adjust and self.signals_since_adjustment >= self.adjustment_frequency: + self._trigger_learning() + self.signals_since_adjustment = 0 + + def _trigger_learning(self): + """Trigger weight adjustment based on recent performance""" + logger.info("Triggering adaptive weight adjustment...") + + adjustments = self.adjuster.calculate_weight_adjustments() + if adjustments: + self.adjuster.apply_adjustments(adjustments) + logger.info(f"Applied {len(adjustments)} weight adjustments") + + def force_learning(self): + """Force immediate weight adjustment""" + self._trigger_learning() + self.signals_since_adjustment = 0 + + def get_performance_report(self) -> Dict[str, Any]: + """Get comprehensive performance report""" + return self.analyzer.generate_learning_report() + + def print_status(self): + """Print current learning status""" + self.analyzer.print_report() + + def save_state(self): + """Save all learning state""" + self.tracker._save_history() + self.adjuster._save_weights() + logger.info("Learning state saved") + + +# Helper function to extract indicator contributions from signal generation +def extract_indicator_contributions( + bullish_score: float, + bearish_score: float, + signal_type: str, + reasons: List[str] +) -> Dict[str, float]: + """ + Extract which indicators contributed to the signal. + This parses the reasons list to determine contributions. + """ + contributions = {} + + # Parse reasons to identify contributing indicators + indicator_keywords = { + "order block": ("W_BULLISH_OB" if signal_type == "Bullish" else "W_BEARISH_OB"), + "breaker block": ("W_BULLISH_BREAKER" if signal_type == "Bullish" else "W_BEARISH_BREAKER"), + "support": "W_ABOVE_SUPPORT", + "resistance": "W_BELOW_RESISTANCE", + "FVG above": "W_FVG_ABOVE", + "FVG below": "W_FVG_BELOW", + "uptrend": "W_TREND", + "downtrend": "W_TREND", + "swept through previous highs": "W_SWEEP_HIGHS", + "swept through previous lows": "W_SWEEP_LOWS", + "broke structure": "W_STRUCTURE_BREAK", + "pin bar": "W_PIN_BAR", + "engulfing": "W_ENGULFING", + "liquidity pool": "W_LIQUIDITY_POOL_ABOVE", + "round number": "W_LIQUIDITY_POOL_ROUND", + "RSI oversold": "W_RSI_EXTREME", + "RSI overbought": "W_RSI_EXTREME" + } + + for reason in reasons: + reason_lower = reason.lower() + for keyword, indicator in indicator_keywords.items(): + if keyword.lower() in reason_lower: + # Score is proportional to contribution to final signal + total_score = bullish_score + bearish_score if bullish_score + bearish_score > 0 else 1 + score = bullish_score if signal_type == "Bullish" else bearish_score + contributions[indicator] = score / total_score + + return contributions + + +# Usage example +if __name__ == "__main__": + # Initialize self-learning backtester + learner = SelfLearningBacktester( + storage_path="./data/learning", + auto_adjust_weights=True, + adjustment_frequency=50 + ) + + # Get current adaptive weights for signal generation + weights = learner.get_current_weights() + print(f"Current weights: {weights}") + + # Print performance status + learner.print_status() + diff --git a/back_tester/enhanced_metrics.py b/back_tester/enhanced_metrics.py new file mode 100644 index 0000000..d37003b --- /dev/null +++ b/back_tester/enhanced_metrics.py @@ -0,0 +1,669 @@ +""" +Enhanced Metrics Module for Backtesting System + +Provides comprehensive, informative metrics including: +1. Indicator-level performance breakdown +2. Market regime analysis +3. Signal quality scoring +4. Trade attribution and root cause analysis +5. Learning suggestions based on performance patterns +""" + +import numpy as np +import pandas as pd +from typing import List, Dict, Any, Optional, Tuple +from dataclasses import dataclass, field +from datetime import datetime +from collections import defaultdict +import json +import os + + +@dataclass +class TradeMetrics: + """Detailed metrics for a single trade""" + trade_id: str + symbol: str + interval: str + signal_type: str + entry_price: float + exit_price: float + stop_loss: float + take_profit_targets: List[float] + + # Outcome + outcome: str # tp1, tp2, tp3, stop_loss, trailing_stop, end_of_period + profit_loss: float + profit_loss_percent: float + risk_reward_achieved: float + + # Duration + entry_index: int + exit_index: int + duration_candles: int + + # Market context + market_regime: str = "" + volatility: float = 0.0 + volume_ratio: float = 1.0 + trend: str = "" + + # Indicator contributions + contributing_indicators: List[str] = field(default_factory=list) + indicator_scores: Dict[str, float] = field(default_factory=dict) + + # Trade quality metrics + max_favorable_excursion: float = 0.0 # Best profit during trade + max_adverse_excursion: float = 0.0 # Worst drawdown during trade + r_multiple: float = 0.0 # Actual R achieved + + def is_winner(self) -> bool: + return self.outcome in ['tp1', 'tp2', 'tp3', 'trailing_stop'] or self.profit_loss > 0 + + def is_full_target(self) -> bool: + return self.outcome == 'tp3' + + +class EnhancedMetricsCalculator: + """ + Calculates and aggregates enhanced trading metrics with + detailed breakdowns for learning and optimization. + """ + + def __init__(self): + self.trades: List[TradeMetrics] = [] + self.indicator_performance: Dict[str, Dict] = defaultdict( + lambda: { + 'wins': 0, 'losses': 0, 'total_pnl': 0.0, + 'avg_r': 0.0, 'r_values': [], + 'tp1_hits': 0, 'tp2_hits': 0, 'tp3_hits': 0, 'stop_hits': 0 + } + ) + self.regime_performance: Dict[str, Dict] = defaultdict( + lambda: {'wins': 0, 'losses': 0, 'total_pnl': 0.0, 'trades': 0} + ) + self.symbol_performance: Dict[str, Dict] = defaultdict( + lambda: {'wins': 0, 'losses': 0, 'total_pnl': 0.0, 'trades': 0} + ) + self.interval_performance: Dict[str, Dict] = defaultdict( + lambda: {'wins': 0, 'losses': 0, 'total_pnl': 0.0, 'trades': 0} + ) + + def add_trade(self, trade: TradeMetrics): + """Add a completed trade to metrics""" + self.trades.append(trade) + self._update_indicator_stats(trade) + self._update_regime_stats(trade) + self._update_symbol_stats(trade) + self._update_interval_stats(trade) + + def _update_indicator_stats(self, trade: TradeMetrics): + """Update indicator-level statistics""" + is_win = trade.is_winner() + + for indicator in trade.contributing_indicators: + stats = self.indicator_performance[indicator] + if is_win: + stats['wins'] += 1 + else: + stats['losses'] += 1 + + stats['total_pnl'] += trade.profit_loss + stats['r_values'].append(trade.r_multiple) + + # Track specific outcomes + if trade.outcome == 'tp1': + stats['tp1_hits'] += 1 + elif trade.outcome == 'tp2': + stats['tp2_hits'] += 1 + elif trade.outcome == 'tp3': + stats['tp3_hits'] += 1 + elif trade.outcome in ['stop_loss', 'trailing_stop']: + stats['stop_hits'] += 1 + + def _update_regime_stats(self, trade: TradeMetrics): + """Update market regime statistics""" + stats = self.regime_performance[trade.market_regime or 'unknown'] + stats['trades'] += 1 + stats['total_pnl'] += trade.profit_loss + if trade.is_winner(): + stats['wins'] += 1 + else: + stats['losses'] += 1 + + def _update_symbol_stats(self, trade: TradeMetrics): + """Update symbol-level statistics""" + stats = self.symbol_performance[trade.symbol] + stats['trades'] += 1 + stats['total_pnl'] += trade.profit_loss + if trade.is_winner(): + stats['wins'] += 1 + else: + stats['losses'] += 1 + + def _update_interval_stats(self, trade: TradeMetrics): + """Update interval/timeframe statistics""" + stats = self.interval_performance[trade.interval] + stats['trades'] += 1 + stats['total_pnl'] += trade.profit_loss + if trade.is_winner(): + stats['wins'] += 1 + else: + stats['losses'] += 1 + + def calculate_comprehensive_metrics(self, + initial_balance: float, + final_balance: float) -> Dict[str, Any]: + """Calculate comprehensive performance metrics with full breakdown""" + + if not self.trades: + return self._empty_metrics() + + # Basic metrics + total_trades = len(self.trades) + winners = [t for t in self.trades if t.is_winner()] + losers = [t for t in self.trades if not t.is_winner()] + + win_rate = len(winners) / total_trades * 100 if total_trades > 0 else 0 + + # P/L calculations + total_profit = sum(t.profit_loss for t in winners) + total_loss = sum(t.profit_loss for t in losers) + net_profit = total_profit + total_loss + + # Profit factor + profit_factor = abs(total_profit / total_loss) if total_loss != 0 else float('inf') + + # R-multiple statistics + r_multiples = [t.r_multiple for t in self.trades if t.r_multiple != 0] + avg_r = np.mean(r_multiples) if r_multiples else 0 + + # Drawdown calculation + equity_curve = self._calculate_equity_curve(initial_balance) + max_drawdown = self._calculate_max_drawdown(equity_curve) + + # Expectancy + avg_win = np.mean([t.profit_loss for t in winners]) if winners else 0 + avg_loss = np.mean([t.profit_loss for t in losers]) if losers else 0 + expectancy = (win_rate/100 * avg_win) + ((1 - win_rate/100) * avg_loss) + + # Exit type breakdown + exit_breakdown = self._calculate_exit_breakdown() + + # Indicator performance breakdown + indicator_breakdown = self._calculate_indicator_breakdown() + + # Regime performance breakdown + regime_breakdown = self._calculate_regime_breakdown() + + # Symbol performance breakdown + symbol_breakdown = self._calculate_symbol_breakdown() + + # Generate insights and recommendations + insights = self._generate_insights() + + return { + # Summary + 'summary': { + 'total_trades': total_trades, + 'winners': len(winners), + 'losers': len(losers), + 'win_rate': win_rate, + 'profit_factor': profit_factor, + 'net_profit': net_profit, + 'total_return_pct': (final_balance - initial_balance) / initial_balance * 100, + 'max_drawdown_pct': max_drawdown, + 'expectancy': expectancy, + 'avg_r_multiple': avg_r + }, + + # Detailed trade statistics + 'trade_stats': { + 'avg_winner': avg_win, + 'avg_loser': avg_loss, + 'largest_winner': max((t.profit_loss for t in winners), default=0), + 'largest_loser': min((t.profit_loss for t in losers), default=0), + 'avg_duration_candles': np.mean([t.duration_candles for t in self.trades]), + 'avg_mfe': np.mean([t.max_favorable_excursion for t in self.trades]), + 'avg_mae': np.mean([t.max_adverse_excursion for t in self.trades]) + }, + + # Exit type breakdown + 'exit_breakdown': exit_breakdown, + + # Indicator performance + 'indicator_performance': indicator_breakdown, + + # Market regime performance + 'regime_performance': regime_breakdown, + + # Symbol performance + 'symbol_performance': symbol_breakdown, + + # Timeframe performance + 'interval_performance': self._calculate_interval_breakdown(), + + # Learning insights + 'insights': insights, + + # Equity curve for visualization + 'equity_curve': equity_curve + } + + def _empty_metrics(self) -> Dict[str, Any]: + """Return empty metrics structure""" + return { + 'summary': { + 'total_trades': 0, 'win_rate': 0, 'profit_factor': 0, + 'net_profit': 0, 'max_drawdown_pct': 0, 'expectancy': 0 + }, + 'trade_stats': {}, + 'exit_breakdown': {}, + 'indicator_performance': {}, + 'regime_performance': {}, + 'symbol_performance': {}, + 'interval_performance': {}, + 'insights': {'recommendations': ['Insufficient data for analysis']}, + 'equity_curve': [] + } + + def _calculate_equity_curve(self, initial_balance: float) -> List[float]: + """Calculate cumulative equity curve""" + equity = [initial_balance] + for trade in self.trades: + equity.append(equity[-1] + trade.profit_loss) + return equity + + def _calculate_max_drawdown(self, equity_curve: List[float]) -> float: + """Calculate maximum drawdown percentage""" + if not equity_curve: + return 0 + + peak = equity_curve[0] + max_dd = 0 + + for equity in equity_curve: + if equity > peak: + peak = equity + dd = (peak - equity) / peak * 100 if peak > 0 else 0 + max_dd = max(max_dd, dd) + + return max_dd + + def _calculate_exit_breakdown(self) -> Dict[str, Dict]: + """Calculate breakdown by exit type""" + breakdown = defaultdict(lambda: {'count': 0, 'total_pnl': 0.0, 'avg_pnl': 0.0}) + + for trade in self.trades: + stats = breakdown[trade.outcome] + stats['count'] += 1 + stats['total_pnl'] += trade.profit_loss + + for outcome, stats in breakdown.items(): + if stats['count'] > 0: + stats['avg_pnl'] = stats['total_pnl'] / stats['count'] + stats['percentage'] = stats['count'] / len(self.trades) * 100 + + return dict(breakdown) + + def _calculate_indicator_breakdown(self) -> Dict[str, Dict]: + """Calculate detailed indicator performance breakdown""" + breakdown = {} + + for indicator, stats in self.indicator_performance.items(): + total = stats['wins'] + stats['losses'] + if total == 0: + continue + + win_rate = stats['wins'] / total * 100 + avg_r = np.mean(stats['r_values']) if stats['r_values'] else 0 + + # Calculate score for ranking + # Score = win_rate * 0.4 + normalized_pnl * 0.3 + avg_r * 0.3 + pnl_score = min(1.0, max(-1.0, stats['total_pnl'] / 1000)) # Normalize to -1 to 1 + r_score = min(1.0, max(-1.0, avg_r / 3)) # Normalize R to -1 to 1 + score = win_rate / 100 * 0.4 + (pnl_score + 1) / 2 * 0.3 + (r_score + 1) / 2 * 0.3 + + breakdown[indicator] = { + 'total_signals': total, + 'wins': stats['wins'], + 'losses': stats['losses'], + 'win_rate': win_rate, + 'total_pnl': stats['total_pnl'], + 'avg_pnl': stats['total_pnl'] / total, + 'avg_r': avg_r, + 'tp1_hits': stats['tp1_hits'], + 'tp2_hits': stats['tp2_hits'], + 'tp3_hits': stats['tp3_hits'], + 'stop_hits': stats['stop_hits'], + 'score': score, + 'status': self._get_indicator_status(win_rate, stats['total_pnl']) + } + + # Sort by score + return dict(sorted(breakdown.items(), key=lambda x: x[1]['score'], reverse=True)) + + def _get_indicator_status(self, win_rate: float, total_pnl: float) -> str: + """Get indicator health status""" + if win_rate >= 60 and total_pnl > 0: + return 'excellent' + elif win_rate >= 50 and total_pnl >= 0: + return 'good' + elif win_rate >= 40: + return 'average' + else: + return 'poor' + + def _calculate_regime_breakdown(self) -> Dict[str, Dict]: + """Calculate performance by market regime""" + breakdown = {} + + for regime, stats in self.regime_performance.items(): + if stats['trades'] == 0: + continue + + win_rate = stats['wins'] / stats['trades'] * 100 + breakdown[regime] = { + 'trades': stats['trades'], + 'wins': stats['wins'], + 'losses': stats['losses'], + 'win_rate': win_rate, + 'total_pnl': stats['total_pnl'], + 'avg_pnl': stats['total_pnl'] / stats['trades'], + 'recommendation': self._get_regime_recommendation(regime, win_rate, stats['total_pnl']) + } + + return breakdown + + def _get_regime_recommendation(self, regime: str, win_rate: float, pnl: float) -> str: + """Get recommendation for trading in specific regime""" + if win_rate >= 55 and pnl > 0: + return f"Good performance in {regime} - continue trading" + elif win_rate < 40 or pnl < 0: + return f"Poor performance in {regime} - consider filtering signals or reducing position size" + else: + return f"Average performance in {regime} - monitor closely" + + def _calculate_symbol_breakdown(self) -> Dict[str, Dict]: + """Calculate performance by symbol""" + breakdown = {} + + for symbol, stats in self.symbol_performance.items(): + if stats['trades'] == 0: + continue + + win_rate = stats['wins'] / stats['trades'] * 100 + breakdown[symbol] = { + 'trades': stats['trades'], + 'wins': stats['wins'], + 'losses': stats['losses'], + 'win_rate': win_rate, + 'total_pnl': stats['total_pnl'], + 'avg_pnl': stats['total_pnl'] / stats['trades'] + } + + return dict(sorted(breakdown.items(), key=lambda x: x[1]['total_pnl'], reverse=True)) + + def _calculate_interval_breakdown(self) -> Dict[str, Dict]: + """Calculate performance by timeframe""" + breakdown = {} + + for interval, stats in self.interval_performance.items(): + if stats['trades'] == 0: + continue + + win_rate = stats['wins'] / stats['trades'] * 100 + breakdown[interval] = { + 'trades': stats['trades'], + 'wins': stats['wins'], + 'losses': stats['losses'], + 'win_rate': win_rate, + 'total_pnl': stats['total_pnl'], + 'avg_pnl': stats['total_pnl'] / stats['trades'] + } + + return breakdown + + def _generate_insights(self) -> Dict[str, Any]: + """Generate actionable insights from performance data""" + insights = { + 'strong_indicators': [], + 'weak_indicators': [], + 'best_regimes': [], + 'worst_regimes': [], + 'recommendations': [] + } + + # Indicator insights + for indicator, stats in self._calculate_indicator_breakdown().items(): + if stats['total_signals'] >= 10: # Minimum sample size + if stats['status'] == 'excellent': + insights['strong_indicators'].append({ + 'name': indicator, + 'win_rate': stats['win_rate'], + 'avg_pnl': stats['avg_pnl'] + }) + elif stats['status'] == 'poor': + insights['weak_indicators'].append({ + 'name': indicator, + 'win_rate': stats['win_rate'], + 'avg_pnl': stats['avg_pnl'] + }) + + # Regime insights + regime_breakdown = self._calculate_regime_breakdown() + sorted_regimes = sorted(regime_breakdown.items(), + key=lambda x: x[1]['win_rate'], reverse=True) + + if sorted_regimes: + insights['best_regimes'] = [sorted_regimes[0][0]] if sorted_regimes else [] + insights['worst_regimes'] = [sorted_regimes[-1][0]] if len(sorted_regimes) > 1 else [] + + # Generate recommendations + if insights['weak_indicators']: + weak_names = [i['name'] for i in insights['weak_indicators'][:3]] + insights['recommendations'].append( + f"Consider reducing weights for: {', '.join(weak_names)}" + ) + + if insights['strong_indicators']: + strong_names = [i['name'] for i in insights['strong_indicators'][:3]] + insights['recommendations'].append( + f"Consider increasing weights for: {', '.join(strong_names)}" + ) + + if insights['worst_regimes']: + worst_regime = insights['worst_regimes'][0] + regime_stats = regime_breakdown.get(worst_regime, {}) + if regime_stats.get('win_rate', 100) < 40: + insights['recommendations'].append( + f"Consider avoiding signals in '{worst_regime}' market regime" + ) + + # Check for recent performance decline + if len(self.trades) >= 50: + recent_trades = self.trades[-25:] + older_trades = self.trades[-50:-25] + + recent_win_rate = sum(1 for t in recent_trades if t.is_winner()) / len(recent_trades) * 100 + older_win_rate = sum(1 for t in older_trades if t.is_winner()) / len(older_trades) * 100 + + if recent_win_rate < older_win_rate - 10: + insights['recommendations'].append( + f"Performance declining: Recent win rate {recent_win_rate:.1f}% vs " + f"previous {older_win_rate:.1f}%. Consider retraining weights." + ) + + return insights + + def print_detailed_report(self, initial_balance: float, final_balance: float): + """Print detailed performance report""" + metrics = self.calculate_comprehensive_metrics(initial_balance, final_balance) + + print("\n" + "="*80) + print("📊 ENHANCED PERFORMANCE REPORT") + print("="*80) + + # Summary + s = metrics['summary'] + print(f"\n📈 SUMMARY") + print(f" Total Trades: {s['total_trades']}") + print(f" Win Rate: {s['win_rate']:.1f}% ({s['winners']}W / {s['losers']}L)") + print(f" Profit Factor: {s['profit_factor']:.2f}") + print(f" Net Profit: ${s['net_profit']:.2f}") + print(f" Total Return: {s['total_return_pct']:.2f}%") + print(f" Max Drawdown: {s['max_drawdown_pct']:.2f}%") + print(f" Expectancy: ${s['expectancy']:.2f}") + print(f" Avg R-Multiple: {s['avg_r_multiple']:.2f}R") + + # Exit breakdown + if metrics['exit_breakdown']: + print(f"\n📤 EXIT TYPE BREAKDOWN") + for exit_type, stats in metrics['exit_breakdown'].items(): + print(f" {exit_type}: {stats['count']} ({stats['percentage']:.1f}%) | " + f"Avg P/L: ${stats['avg_pnl']:.2f}") + + # Indicator performance + if metrics['indicator_performance']: + print(f"\n🎯 INDICATOR PERFORMANCE (Top 5)") + for i, (indicator, stats) in enumerate(list(metrics['indicator_performance'].items())[:5]): + status_emoji = {'excellent': '🟢', 'good': '🟡', 'average': '🟠', 'poor': '🔴'} + emoji = status_emoji.get(stats['status'], '⚪') + print(f" {emoji} {indicator}: {stats['win_rate']:.1f}% WR | " + f"${stats['avg_pnl']:.2f} avg | {stats['avg_r']:.2f}R | " + f"TP1:{stats['tp1_hits']} TP2:{stats['tp2_hits']} TP3:{stats['tp3_hits']} SL:{stats['stop_hits']}") + + # Weak indicators + weak_indicators = [ind for ind, stats in metrics['indicator_performance'].items() + if stats['status'] == 'poor'] + if weak_indicators: + print(f"\n🔴 UNDERPERFORMING INDICATORS") + for ind in weak_indicators[:5]: + stats = metrics['indicator_performance'][ind] + print(f" {ind}: {stats['win_rate']:.1f}% WR, ${stats['total_pnl']:.2f} total") + + # Regime performance + if metrics['regime_performance']: + print(f"\n🌍 MARKET REGIME PERFORMANCE") + for regime, stats in metrics['regime_performance'].items(): + print(f" {regime}: {stats['win_rate']:.1f}% WR ({stats['trades']} trades) | " + f"${stats['total_pnl']:.2f}") + + # Recommendations + if metrics['insights']['recommendations']: + print(f"\n💡 RECOMMENDATIONS") + for rec in metrics['insights']['recommendations']: + print(f" • {rec}") + + print("\n" + "="*80) + + return metrics + + +def create_trade_metrics( + trade_log: List[Dict], + entry_trade: Dict, + exit_trades: List[Dict], + market_context: Dict[str, Any], + indicator_contributions: Dict[str, float] +) -> TradeMetrics: + """ + Helper to create TradeMetrics from trade log data. + + Args: + trade_log: Full trade log + entry_trade: Entry trade dictionary + exit_trades: List of exit trades for this entry + market_context: Market context at entry + indicator_contributions: Indicator contributions to signal + """ + + # Calculate final outcome + if not exit_trades: + outcome = 'end_of_period' + exit_price = entry_trade['price'] + profit_loss = 0 + else: + last_exit = exit_trades[-1] + outcome = last_exit.get('type', 'end_of_period') + # Map trade types to outcome strings + outcome_map = { + 'take_profit_1': 'tp1', + 'take_profit_2': 'tp2', + 'take_profit_3': 'tp3', + 'stop_loss': 'stop_loss', + 'trailing_stop': 'trailing_stop', + 'exit_end_of_period': 'end_of_period' + } + outcome = outcome_map.get(outcome, outcome) + exit_price = last_exit.get('price', entry_trade['price']) + profit_loss = sum(t.get('profit', 0) for t in exit_trades) + + # Calculate R-multiple + risk = abs(entry_trade['price'] - entry_trade.get('stop_loss', entry_trade['price'])) + r_multiple = profit_loss / risk if risk > 0 else 0 + + return TradeMetrics( + trade_id=entry_trade.get('trade_id', str(entry_trade.get('index', 0))), + symbol=entry_trade.get('symbol', 'UNKNOWN'), + interval=entry_trade.get('interval', '1h'), + signal_type=entry_trade.get('signal', '').split()[0] if entry_trade.get('signal') else 'Unknown', + entry_price=entry_trade['price'], + exit_price=exit_price, + stop_loss=entry_trade.get('stop_loss', 0), + take_profit_targets=[ + entry_trade.get('take_profit_1', 0), + entry_trade.get('take_profit_2', 0), + entry_trade.get('take_profit_3', 0) + ], + outcome=outcome, + profit_loss=profit_loss, + profit_loss_percent=(exit_price - entry_trade['price']) / entry_trade['price'] * 100 if entry_trade['price'] > 0 else 0, + risk_reward_achieved=abs(profit_loss / risk) if risk > 0 else 0, + entry_index=entry_trade.get('index', 0), + exit_index=exit_trades[-1].get('index', 0) if exit_trades else entry_trade.get('index', 0), + duration_candles=exit_trades[-1].get('index', 0) - entry_trade.get('index', 0) if exit_trades else 0, + market_regime=market_context.get('market_regime', ''), + volatility=market_context.get('volatility', 0), + volume_ratio=market_context.get('volume_ratio', 1), + trend=market_context.get('trend', ''), + contributing_indicators=list(indicator_contributions.keys()), + indicator_scores=indicator_contributions, + r_multiple=r_multiple + ) + + +# Usage example +if __name__ == "__main__": + # Create calculator + calc = EnhancedMetricsCalculator() + + # Example trade (normally these would come from backtest) + example_trade = TradeMetrics( + trade_id="test_1", + symbol="BTCUSDT", + interval="1h", + signal_type="Bullish", + entry_price=50000, + exit_price=51500, + stop_loss=49000, + take_profit_targets=[51000, 52000, 53000], + outcome="tp2", + profit_loss=150, + profit_loss_percent=3.0, + risk_reward_achieved=1.5, + entry_index=100, + exit_index=115, + duration_candles=15, + market_regime="trending_up", + volatility=0.02, + volume_ratio=1.2, + trend="uptrend", + contributing_indicators=["W_BULLISH_OB", "W_TREND", "W_STRUCTURE_BREAK"], + indicator_scores={"W_BULLISH_OB": 1.2, "W_TREND": 0.8, "W_STRUCTURE_BREAK": 1.5}, + r_multiple=1.5 + ) + + calc.add_trade(example_trade) + calc.print_detailed_report(10000, 10150) + diff --git a/back_tester/intensive_training.py b/back_tester/intensive_training.py new file mode 100755 index 0000000..384197d --- /dev/null +++ b/back_tester/intensive_training.py @@ -0,0 +1,660 @@ +#!/usr/bin/env python3 +""" +INTENSIVE PARAMETER OPTIMIZATION + +This script performs aggressive optimization of ALL trading parameters: + +1. Signal Weights (18 weights for different indicators) +2. Stop Loss Parameters (ATR multiplier, min distance) +3. Take Profit Levels (TP1, TP2, TP3 R:R ratios) +4. Trailing Stop Parameters (distance, activation level) +5. Position Sizing (risk percentage) + +Uses evolutionary strategies with elitism to find optimal combinations. + +Usage: + python3 intensive_training.py --generations 50 --hours 4 +""" + +import os +import sys +import argparse +import json +import numpy as np +from datetime import datetime, timedelta +from typing import Dict, List, Tuple, Any, Optional +from collections import defaultdict +from dataclasses import dataclass, asdict +import time +import random +import copy + +# Add project root to path +project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +if project_dir not in sys.path: + sys.path.append(project_dir) + +from back_tester.strategy import backtest_strategy + +# Configuration +SYMBOLS = ["BTCUSDT", "ETHUSDT"] +INTERVALS = ["5m", "15m", "30m", "1h", "4h"] + +WEIGHT_NAMES = [ + "W_BULLISH_OB", "W_BEARISH_OB", "W_BULLISH_BREAKER", "W_BEARISH_BREAKER", + "W_ABOVE_SUPPORT", "W_BELOW_RESISTANCE", "W_FVG_ABOVE", "W_FVG_BELOW", + "W_TREND", "W_SWEEP_HIGHS", "W_SWEEP_LOWS", "W_STRUCTURE_BREAK", + "W_PIN_BAR", "W_ENGULFING", "W_LIQUIDITY_POOL_ABOVE", "W_LIQUIDITY_POOL_BELOW", + "W_LIQUIDITY_POOL_ROUND", "W_RSI_EXTREME" +] + +# Colors +class C: + H = '\033[95m' + B = '\033[94m' + C = '\033[96m' + G = '\033[92m' + Y = '\033[93m' + R = '\033[91m' + E = '\033[0m' + BOLD = '\033[1m' + + +@dataclass +class TradingParameters: + """All optimizable trading parameters""" + + # Signal weights (18) + weights: List[float] + + # Stop Loss Parameters + atr_multiplier: float = 2.0 # ATR multiplier for stop loss distance + min_sl_percent: float = 0.5 # Minimum SL distance as % of entry + + # Take Profit R:R Ratios + tp1_ratio: float = 1.5 # Risk:Reward for TP1 + tp2_ratio: float = 2.5 # Risk:Reward for TP2 + tp3_ratio: float = 4.0 # Risk:Reward for TP3 + + # Trailing Stop Parameters + trailing_stop_distance: float = 0.5 # Trailing stop distance % + trailing_activation_tp: int = 1 # Activate trailing at TP1(1), TP2(2), or TP3(3) + + # Position Sizing + risk_percentage: float = 1.0 # Risk % per trade + + @classmethod + def default(cls) -> 'TradingParameters': + """Create with default values""" + return cls( + weights=[ + 1.2, 1.2, 1.0, 1.0, # Order blocks, breakers + 0.8, 0.8, 0.6, 0.6, # Support/resistance, FVG + 1.0, 1.3, 1.3, 1.5, # Trend, sweeps, structure + 0.7, 0.7, 1.0, 1.0, # Patterns, liquidity + 1.2, 0.5 # Round numbers, RSI + ] + ) + + @classmethod + def random(cls) -> 'TradingParameters': + """Create with random values for exploration""" + return cls( + weights=[random.uniform(0.3, 2.0) for _ in range(18)], + atr_multiplier=random.uniform(1.0, 4.0), + min_sl_percent=random.uniform(0.3, 1.5), + tp1_ratio=random.uniform(1.0, 2.5), + tp2_ratio=random.uniform(2.0, 4.0), + tp3_ratio=random.uniform(3.0, 6.0), + trailing_stop_distance=random.uniform(0.3, 1.5), + trailing_activation_tp=random.choice([1, 2]), + risk_percentage=random.uniform(0.5, 2.0) + ) + + def mutate(self, mutation_rate: float = 0.3, mutation_strength: float = 0.15) -> 'TradingParameters': + """Create mutated copy of parameters""" + new_params = copy.deepcopy(self) + + # Mutate weights + for i in range(len(new_params.weights)): + if random.random() < mutation_rate: + change = np.random.normal(0, mutation_strength) + new_params.weights[i] = max(0.1, min(3.0, new_params.weights[i] + change)) + + # Mutate SL parameters + if random.random() < mutation_rate: + new_params.atr_multiplier = max(0.5, min(5.0, + new_params.atr_multiplier + np.random.normal(0, 0.3))) + + if random.random() < mutation_rate: + new_params.min_sl_percent = max(0.2, min(2.0, + new_params.min_sl_percent + np.random.normal(0, 0.2))) + + # Mutate TP ratios (keep ordering: TP1 < TP2 < TP3) + if random.random() < mutation_rate: + new_params.tp1_ratio = max(0.8, min(3.0, + new_params.tp1_ratio + np.random.normal(0, 0.3))) + + if random.random() < mutation_rate: + new_params.tp2_ratio = max(new_params.tp1_ratio + 0.5, min(5.0, + new_params.tp2_ratio + np.random.normal(0, 0.4))) + + if random.random() < mutation_rate: + new_params.tp3_ratio = max(new_params.tp2_ratio + 0.5, min(8.0, + new_params.tp3_ratio + np.random.normal(0, 0.5))) + + # Mutate trailing stop + if random.random() < mutation_rate: + new_params.trailing_stop_distance = max(0.2, min(2.0, + new_params.trailing_stop_distance + np.random.normal(0, 0.2))) + + if random.random() < mutation_rate * 0.5: # Less frequent + new_params.trailing_activation_tp = random.choice([1, 2]) + + # Mutate risk + if random.random() < mutation_rate: + new_params.risk_percentage = max(0.25, min(3.0, + new_params.risk_percentage + np.random.normal(0, 0.2))) + + return new_params + + @staticmethod + def crossover(parent1: 'TradingParameters', parent2: 'TradingParameters') -> 'TradingParameters': + """Create child from two parents""" + child_weights = [] + for i in range(len(parent1.weights)): + if random.random() < 0.5: + child_weights.append(parent1.weights[i]) + else: + child_weights.append(parent2.weights[i]) + + # Randomly inherit other parameters + return TradingParameters( + weights=child_weights, + atr_multiplier=random.choice([parent1.atr_multiplier, parent2.atr_multiplier]), + min_sl_percent=random.choice([parent1.min_sl_percent, parent2.min_sl_percent]), + tp1_ratio=random.choice([parent1.tp1_ratio, parent2.tp1_ratio]), + tp2_ratio=random.choice([parent1.tp2_ratio, parent2.tp2_ratio]), + tp3_ratio=random.choice([parent1.tp3_ratio, parent2.tp3_ratio]), + trailing_stop_distance=random.choice([parent1.trailing_stop_distance, parent2.trailing_stop_distance]), + trailing_activation_tp=random.choice([parent1.trailing_activation_tp, parent2.trailing_activation_tp]), + risk_percentage=random.choice([parent1.risk_percentage, parent2.risk_percentage]), + ) + + def to_dict(self) -> Dict: + """Convert to dictionary for saving""" + return { + 'weights': self.weights, + 'weight_names': WEIGHT_NAMES, + 'atr_multiplier': self.atr_multiplier, + 'min_sl_percent': self.min_sl_percent, + 'tp1_ratio': self.tp1_ratio, + 'tp2_ratio': self.tp2_ratio, + 'tp3_ratio': self.tp3_ratio, + 'trailing_stop_distance': self.trailing_stop_distance, + 'trailing_activation_tp': self.trailing_activation_tp, + 'risk_percentage': self.risk_percentage, + } + + @classmethod + def from_dict(cls, data: Dict) -> 'TradingParameters': + """Load from dictionary""" + return cls( + weights=data.get('weights', cls.default().weights), + atr_multiplier=data.get('atr_multiplier', 2.0), + min_sl_percent=data.get('min_sl_percent', 0.5), + tp1_ratio=data.get('tp1_ratio', 1.5), + tp2_ratio=data.get('tp2_ratio', 2.5), + tp3_ratio=data.get('tp3_ratio', 4.0), + trailing_stop_distance=data.get('trailing_stop_distance', 0.5), + trailing_activation_tp=data.get('trailing_activation_tp', 1), + risk_percentage=data.get('risk_percentage', 1.0), + ) + + +class IntensiveParameterOptimizer: + """ + Comprehensive parameter optimizer using evolutionary strategies. + Optimizes weights AND risk management parameters together. + """ + + def __init__( + self, + storage_path: str = "./data/intensive_training", + initial_balance: float = 10000.0, + population_size: int = 8, + elite_count: int = 2, + tests_per_evaluation: int = 10, + ): + self.storage_path = storage_path + self.initial_balance = initial_balance + self.population_size = population_size + self.elite_count = elite_count + self.tests_per_evaluation = tests_per_evaluation + + os.makedirs(storage_path, exist_ok=True) + os.makedirs(os.path.join(storage_path, "checkpoints"), exist_ok=True) + + # Load or initialize parameters + self.best_params = self._load_params() + self.best_fitness = float('-inf') + + # Tracking + self.generation = 0 + self.total_backtests = 0 + self.total_trades = 0 + self.start_time = None + + # History + self.fitness_history = [] + self.param_history = [] + + def _load_params(self) -> TradingParameters: + """Load parameters from file or use defaults""" + params_file = os.path.join(self.storage_path, "best_params.json") + + if os.path.exists(params_file): + try: + with open(params_file, 'r') as f: + data = json.load(f) + params = TradingParameters.from_dict(data) + print(f"{C.G}✓ Loaded existing parameters from checkpoint{C.E}") + return params + except Exception as e: + print(f"{C.Y}Warning: Could not load params: {e}{C.E}") + + return TradingParameters.default() + + def _save_params(self, params: TradingParameters, fitness: float, is_best: bool = False): + """Save parameters to file""" + data = params.to_dict() + data['fitness'] = fitness + data['generation'] = self.generation + data['total_backtests'] = self.total_backtests + data['timestamp'] = datetime.now().isoformat() + + filename = "best_params.json" if is_best else f"params_gen_{self.generation}.json" + filepath = os.path.join(self.storage_path, filename) + + with open(filepath, 'w') as f: + json.dump(data, f, indent=2) + + if is_best: + checkpoint_file = os.path.join( + self.storage_path, "checkpoints", + f"best_gen_{self.generation}_fit_{fitness:.1f}.json" + ) + with open(checkpoint_file, 'w') as f: + json.dump(data, f, indent=2) + + def evaluate_params(self, params: TradingParameters) -> Tuple[float, Dict[str, Any]]: + """ + Evaluate a parameter set by running multiple backtests. + """ + total_profit = 0.0 + total_trades = 0 + wins = 0 + losses = 0 + tp1_hits = 0 + tp2_hits = 0 + tp3_hits = 0 + sl_hits = 0 + results = [] + + # Generate test configurations + test_configs = [] + for _ in range(self.tests_per_evaluation): + symbol = random.choice(SYMBOLS) + interval = random.choice(INTERVALS) + candles = random.randint(300, 600) + test_configs.append((symbol, interval, candles)) + + for symbol, interval, candles in test_configs: + try: + window = int(candles * 0.5) + + # Run backtest with these parameters + # Note: We need to pass custom TP/SL params through the signal generation + # For now, we use the weights and trailing stop params that strategy.py accepts + final_balance, trades, _ = backtest_strategy( + symbol=symbol, + interval=interval, + candles=candles, + window=window, + initial_balance=self.initial_balance, + risk_percentage=params.risk_percentage, + weights=params.weights, + use_trailing_stop=True, + trailing_stop_distance_percent=params.trailing_stop_distance, + ) + + profit = final_balance - self.initial_balance + + # Analyze trades + for trade in trades: + trade_type = trade.get('type', '') + if trade_type == 'entry': + total_trades += 1 + elif trade_type == 'take_profit_1': + tp1_hits += 1 + if trade.get('profit', 0) > 0: + wins += 1 + elif trade_type == 'take_profit_2': + tp2_hits += 1 + if trade.get('profit', 0) > 0: + wins += 1 + elif trade_type == 'take_profit_3': + tp3_hits += 1 + if trade.get('profit', 0) > 0: + wins += 1 + elif trade_type == 'stop_loss': + sl_hits += 1 + losses += 1 + elif trade_type == 'exit_end_of_period': + if trade.get('profit', 0) > 0: + wins += 1 + else: + losses += 1 + + total_profit += profit + self.total_backtests += 1 + + results.append({ + 'symbol': symbol, + 'interval': interval, + 'profit': profit, + 'trades': len([t for t in trades if t.get('type') == 'entry']) + }) + + except Exception as e: + total_profit -= 50 # Penalty for errors + results.append({'error': str(e)}) + + self.total_trades += total_trades + + # Calculate fitness + if total_trades == 0: + return -1000, {'error': 'No trades'} + + total_exits = wins + losses + win_rate = wins / total_exits if total_exits > 0 else 0 + avg_profit = total_profit / self.tests_per_evaluation + + # TP distribution score (prefer hitting higher TPs) + total_tp_hits = tp1_hits + tp2_hits + tp3_hits + if total_tp_hits > 0: + tp_quality = (tp1_hits * 1 + tp2_hits * 2 + tp3_hits * 3) / (total_tp_hits * 3) + else: + tp_quality = 0 + + # Calculate risk-adjusted return + if sl_hits > 0: + reward_risk = (tp1_hits + tp2_hits * 1.5 + tp3_hits * 2) / sl_hits + else: + reward_risk = 2.0 # Default if no SL hits + + # Composite fitness function + fitness = ( + avg_profit * 0.30 + # Profitability + win_rate * 100 * 0.25 + # Win rate (scaled to ~50) + tp_quality * 50 * 0.15 + # TP quality (scaled to ~50) + min(reward_risk, 3) * 15 * 0.15 + # Risk/reward (capped, scaled) + min(total_trades / 5, 20) * 0.15 # Trade frequency (capped) + ) + + metrics = { + 'total_profit': total_profit, + 'avg_profit': avg_profit, + 'total_trades': total_trades, + 'wins': wins, + 'losses': losses, + 'win_rate': win_rate * 100, + 'tp1_hits': tp1_hits, + 'tp2_hits': tp2_hits, + 'tp3_hits': tp3_hits, + 'sl_hits': sl_hits, + 'tp_quality': tp_quality, + 'reward_risk': reward_risk, + } + + return fitness, metrics + + def train_generation(self) -> Tuple[TradingParameters, float]: + """Run one generation of evolutionary optimization""" + self.generation += 1 + + print(f"\n{C.H}{'='*70}{C.E}") + print(f"{C.H} GENERATION {self.generation}{C.E}") + print(f"{C.H}{'='*70}{C.E}") + + # Create population + population = [] + + # Keep elite (best from previous generation) + population.append(self.best_params) + + # Mutations of best + for _ in range(self.population_size - 3): + mutated = self.best_params.mutate(mutation_rate=0.4, mutation_strength=0.2) + population.append(mutated) + + # One random for exploration + population.append(TradingParameters.random()) + + # One with aggressive settings + aggressive = TradingParameters( + weights=[w * random.uniform(0.8, 1.2) for w in self.best_params.weights], + atr_multiplier=1.5, # Tighter SL + tp1_ratio=1.2, # Quick TP1 + tp2_ratio=2.0, + tp3_ratio=3.5, + trailing_stop_distance=0.4, + trailing_activation_tp=1, + risk_percentage=1.5, + ) + population.append(aggressive) + + # Evaluate all + results = [] + for i, params in enumerate(population): + label = "ELITE" if i == 0 else f"#{i+1}" + print(f"\n Testing {label}... ", end="", flush=True) + + fitness, metrics = self.evaluate_params(params) + results.append((fitness, params, metrics)) + + color = C.G if fitness > self.best_fitness else (C.Y if fitness > self.best_fitness - 20 else C.R) + print(f"{color}Fitness: {fitness:.1f} | " + f"Profit: ${metrics.get('avg_profit', 0):.2f} | " + f"WR: {metrics.get('win_rate', 0):.1f}% | " + f"TP1:{metrics.get('tp1_hits',0)} TP2:{metrics.get('tp2_hits',0)} TP3:{metrics.get('tp3_hits',0)} SL:{metrics.get('sl_hits',0)}{C.E}") + + # Sort by fitness + results.sort(key=lambda x: x[0], reverse=True) + + # Get generation best + gen_best_fitness, gen_best_params, gen_best_metrics = results[0] + + # Track history + self.fitness_history.append(gen_best_fitness) + + # Update global best + if gen_best_fitness > self.best_fitness: + improvement = gen_best_fitness - self.best_fitness + self.best_fitness = gen_best_fitness + self.best_params = copy.deepcopy(gen_best_params) + + print(f"\n{C.G}{'★'*3} NEW BEST! Fitness: {gen_best_fitness:.1f} (+{improvement:.1f}) {'★'*3}{C.E}") + + # Show key parameters + print(f"\n{C.C}Key Parameters:{C.E}") + print(f" ATR Multiplier: {gen_best_params.atr_multiplier:.2f}") + print(f" TP Ratios: {gen_best_params.tp1_ratio:.1f} / {gen_best_params.tp2_ratio:.1f} / {gen_best_params.tp3_ratio:.1f}") + print(f" Trailing Stop: {gen_best_params.trailing_stop_distance:.2f}% (activate at TP{gen_best_params.trailing_activation_tp})") + print(f" Risk per Trade: {gen_best_params.risk_percentage:.2f}%") + + self._save_params(gen_best_params, gen_best_fitness, is_best=True) + else: + # Crossover top performers for next generation + if len(results) >= 2: + child = TradingParameters.crossover(results[0][1], results[1][1]) + self.best_params = child.mutate(mutation_rate=0.2) + + return gen_best_params, gen_best_fitness + + def run( + self, + max_generations: int = 100, + max_hours: float = 4.0, + target_fitness: float = None + ) -> Tuple[TradingParameters, float]: + """Run full optimization""" + self.start_time = datetime.now() + end_time = self.start_time + timedelta(hours=max_hours) + + print(f"\n{C.H}{'='*70}{C.E}") + print(f"{C.H}{'INTENSIVE PARAMETER OPTIMIZATION':^70}{C.E}") + print(f"{C.H}{'='*70}{C.E}") + + print(f"\n{C.C}Configuration:{C.E}") + print(f" Max Generations: {max_generations}") + print(f" Max Time: {max_hours} hours") + print(f" Population Size: {self.population_size}") + print(f" Tests per Evaluation: {self.tests_per_evaluation}") + print(f" Symbols: {SYMBOLS}") + print(f" Intervals: {INTERVALS}") + + print(f"\n{C.C}Optimizing:{C.E}") + print(f" • 18 Signal Weights") + print(f" • Stop Loss (ATR multiplier, min distance)") + print(f" • Take Profit Levels (TP1, TP2, TP3 R:R ratios)") + print(f" • Trailing Stop (distance, activation)") + print(f" • Risk Percentage") + + print(f"\n{C.Y}Starting optimization... (Ctrl+C to stop and save){C.E}") + + try: + for gen in range(max_generations): + if datetime.now() >= end_time: + print(f"\n{C.Y}Time limit reached{C.E}") + break + + best_params, best_fitness = self.train_generation() + + if target_fitness and best_fitness >= target_fitness: + print(f"\n{C.G}Target fitness reached!{C.E}") + break + + # Progress every 5 gens + if self.generation % 5 == 0: + elapsed = (datetime.now() - self.start_time).total_seconds() / 60 + print(f"\n{C.B}═══ Progress: Gen {self.generation} | " + f"Best: {self.best_fitness:.1f} | " + f"Backtests: {self.total_backtests} | " + f"Time: {elapsed:.1f}m ═══{C.E}") + + except KeyboardInterrupt: + print(f"\n{C.Y}Optimization interrupted{C.E}") + + self._print_final_report() + return self.best_params, self.best_fitness + + def _print_final_report(self): + """Print comprehensive final report""" + elapsed = (datetime.now() - self.start_time).total_seconds() + + print(f"\n{C.H}{'='*70}{C.E}") + print(f"{C.H}{'OPTIMIZATION COMPLETE':^70}{C.E}") + print(f"{C.H}{'='*70}{C.E}") + + print(f"\n{C.BOLD}Statistics:{C.E}") + print(f" Generations: {self.generation}") + print(f" Backtests: {self.total_backtests}") + print(f" Trades: {self.total_trades}") + print(f" Time: {elapsed/60:.1f} minutes") + print(f" Best Fitness: {self.best_fitness:.1f}") + + p = self.best_params + + print(f"\n{C.BOLD}═══ OPTIMIZED PARAMETERS ═══{C.E}") + + print(f"\n{C.C}Stop Loss:{C.E}") + print(f" ATR Multiplier: {p.atr_multiplier:.2f}") + print(f" Min SL Distance: {p.min_sl_percent:.2f}%") + + print(f"\n{C.C}Take Profit Levels:{C.E}") + print(f" TP1 R:R Ratio: {p.tp1_ratio:.2f}") + print(f" TP2 R:R Ratio: {p.tp2_ratio:.2f}") + print(f" TP3 R:R Ratio: {p.tp3_ratio:.2f}") + + print(f"\n{C.C}Trailing Stop:{C.E}") + print(f" Distance: {p.trailing_stop_distance:.2f}%") + print(f" Activation: TP{p.trailing_activation_tp}") + + print(f"\n{C.C}Position Sizing:{C.E}") + print(f" Risk per Trade: {p.risk_percentage:.2f}%") + + print(f"\n{C.C}Signal Weights:{C.E}") + for name, weight in zip(WEIGHT_NAMES, p.weights): + bar_len = int(weight * 8) + bar = "█" * bar_len + "░" * (16 - bar_len) + + if weight > 1.5: + color, status = C.G, "STRONG" + elif weight < 0.6: + color, status = C.R, "WEAK" + else: + color, status = C.E, "" + + print(f" {color}{name:<25} {weight:.3f} {bar} {status}{C.E}") + + # Save + self._save_params(self.best_params, self.best_fitness, is_best=True) + print(f"\n{C.G}✓ Parameters saved to: {self.storage_path}/best_params.json{C.E}") + + # Fitness trend + if len(self.fitness_history) > 1: + print(f"\n{C.BOLD}Fitness Trend:{C.E}") + start = self.fitness_history[0] + end = self.fitness_history[-1] + change = end - start + trend = "📈" if change > 0 else "📉" + print(f" Start: {start:.1f} → End: {end:.1f} ({trend} {change:+.1f})") + + +def main(): + parser = argparse.ArgumentParser(description="Intensive trading parameter optimization") + parser.add_argument('--generations', type=int, default=50) + parser.add_argument('--hours', type=float, default=2.0) + parser.add_argument('--population', type=int, default=8) + parser.add_argument('--tests', type=int, default=10, help='Backtests per evaluation') + parser.add_argument('--balance', type=float, default=10000.0) + parser.add_argument('--storage', type=str, default='./data/intensive_training') + parser.add_argument('--target-fitness', type=float, default=None) + + args = parser.parse_args() + + optimizer = IntensiveParameterOptimizer( + storage_path=args.storage, + initial_balance=args.balance, + population_size=args.population, + tests_per_evaluation=args.tests, + ) + + try: + best_params, best_fitness = optimizer.run( + max_generations=args.generations, + max_hours=args.hours, + target_fitness=args.target_fitness + ) + print(f"\n{C.G}Optimization completed! Best fitness: {best_fitness:.1f}{C.E}") + + except Exception as e: + print(f"\n{C.R}Optimization failed: {e}{C.E}") + import traceback + traceback.print_exc() + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/back_tester/run_learning_backtest.py b/back_tester/run_learning_backtest.py new file mode 100755 index 0000000..3eee13e --- /dev/null +++ b/back_tester/run_learning_backtest.py @@ -0,0 +1,503 @@ +#!/usr/bin/env python3 +""" +Self-Learning Backtesting Runner + +This script runs comprehensive backtests on BTCUSDT and ETHUSDT across all +time intervals with AGGRESSIVE self-learning. The system: +- Learns from each trade outcome +- Directly adjusts weights based on indicator performance +- Runs many iterations to find optimal weights + +Usage: + python3 run_learning_backtest.py [options] + +Options: + --iterations N Number of learning iterations (default: 20) + --candles N Number of candles per backtest + --balance N Initial balance (default: 10000) + --learning-rate How fast weights change (default: 0.15) +""" + +import os +import sys +import argparse +import json +from datetime import datetime +from typing import Dict, List, Any, Tuple +from collections import defaultdict +import numpy as np +import random + +# Add project root to path +project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +if project_dir not in sys.path: + sys.path.append(project_dir) + +from back_tester.strategy import backtest_strategy + +# Configuration +SYMBOLS = ["BTCUSDT", "ETHUSDT"] +INTERVALS = ["5m", "15m", "30m", "1h", "4h"] # Most useful intervals + +WEIGHT_NAMES = [ + "W_BULLISH_OB", "W_BEARISH_OB", "W_BULLISH_BREAKER", "W_BEARISH_BREAKER", + "W_ABOVE_SUPPORT", "W_BELOW_RESISTANCE", "W_FVG_ABOVE", "W_FVG_BELOW", + "W_TREND", "W_SWEEP_HIGHS", "W_SWEEP_LOWS", "W_STRUCTURE_BREAK", + "W_PIN_BAR", "W_ENGULFING", "W_LIQUIDITY_POOL_ABOVE", "W_LIQUIDITY_POOL_BELOW", + "W_LIQUIDITY_POOL_ROUND", "W_RSI_EXTREME" +] + +# Candle settings per interval (increased for more trades) +# Binance allows up to 1000 candles per request, but we can fetch more with pagination +CANDLES_PER_INTERVAL = { + "5m": 2000, # ~7 days of data + "15m": 1500, # ~15 days of data + "30m": 1200, # ~25 days of data + "1h": 1000, # ~42 days of data + "4h": 800, # ~133 days of data (~4.5 months) +} + +class Colors: + H = '\033[95m' + B = '\033[94m' + C = '\033[96m' + G = '\033[92m' + Y = '\033[93m' + R = '\033[91m' + E = '\033[0m' + BOLD = '\033[1m' + + +class AggressiveLearner: + """ + Aggressive weight learning with direct performance-based updates. + Unlike conservative learners, this one makes BIG changes. + """ + + def __init__( + self, + storage_path: str = "./data/btc_eth_learning", + initial_balance: float = 10000.0, + learning_rate: float = 0.15, # Much higher than typical 0.05 + lr_decay: float = 0.95, # Decay rate per iteration (prevents overfitting) + ): + self.storage_path = storage_path + self.initial_balance = initial_balance + self.base_learning_rate = learning_rate + self.learning_rate = learning_rate + self.lr_decay = lr_decay + + os.makedirs(storage_path, exist_ok=True) + + # Load or initialize weights + self.weights = self._load_weights() + self.best_weights = self.weights.copy() + self.best_profit = float('-inf') + + # Track indicator performance across all trades + self.indicator_stats = defaultdict(lambda: { + 'win_count': 0, 'loss_count': 0, + 'win_profit': 0.0, 'loss_amount': 0.0, + 'appearances': 0 + }) + + # Track overall performance + self.iteration = 0 + self.total_trades = 0 + self.total_profit = 0.0 + self.profit_history = [] + self.weight_history = [] + + def _load_weights(self) -> List[float]: + """Load weights from file or use optimized defaults""" + weights_file = os.path.join(self.storage_path, "final_weights.json") + + if os.path.exists(weights_file): + try: + with open(weights_file, 'r') as f: + data = json.load(f) + if 'weights' in data: + weights_data = data['weights'] + # Handle dict format (name -> value) + if isinstance(weights_data, dict): + if len(weights_data) == len(WEIGHT_NAMES): + weights = [float(weights_data.get(name, 1.0)) for name in WEIGHT_NAMES] + print(f"{Colors.G}✓ Loaded weights from previous session{Colors.E}") + return weights + # Handle list format + elif isinstance(weights_data, list) and len(weights_data) == len(WEIGHT_NAMES): + weights = [float(w) for w in weights_data] + print(f"{Colors.G}✓ Loaded weights from previous session{Colors.E}") + return weights + except Exception as e: + print(f"{Colors.Y}Warning: Could not load weights: {e}{Colors.E}") + + # Smart starting weights (not all 1.0) + return [ + 1.3, # W_BULLISH_OB - Order blocks tend to work well + 1.3, # W_BEARISH_OB + 1.1, # W_BULLISH_BREAKER + 1.1, # W_BEARISH_BREAKER + 0.8, # W_ABOVE_SUPPORT + 0.8, # W_BELOW_RESISTANCE + 0.6, # W_FVG_ABOVE - FVGs less reliable alone + 0.6, # W_FVG_BELOW + 1.0, # W_TREND + 1.4, # W_SWEEP_HIGHS - Sweeps are key signals + 1.4, # W_SWEEP_LOWS + 1.5, # W_STRUCTURE_BREAK - Very important + 0.7, # W_PIN_BAR + 0.7, # W_ENGULFING + 1.0, # W_LIQUIDITY_POOL_ABOVE + 1.0, # W_LIQUIDITY_POOL_BELOW + 1.2, # W_LIQUIDITY_POOL_ROUND + 0.5, # W_RSI_EXTREME - Often unreliable + ] + + def _save_weights(self): + """Save current weights""" + data = { + 'weights': self.weights, + 'weight_names': WEIGHT_NAMES, + 'best_weights': self.best_weights, + 'best_profit': self.best_profit, + 'iteration': self.iteration, + 'total_trades': self.total_trades, + 'total_profit': self.total_profit, + 'timestamp': datetime.now().isoformat(), + 'indicator_stats': dict(self.indicator_stats), + } + + with open(os.path.join(self.storage_path, "final_weights.json"), 'w') as f: + json.dump(data, f, indent=2) + + def run_backtest(self, symbol: str, interval: str, candles: int) -> Dict: + """Run a single backtest and return results""" + window = int(candles * 0.5) + + try: + final_balance, trades, _ = backtest_strategy( + symbol=symbol, + interval=interval, + candles=candles, + window=window, + initial_balance=self.initial_balance, + risk_percentage=1.0, + weights=self.weights, + use_trailing_stop=True, + trailing_stop_distance_percent=0.5, + ) + + profit = final_balance - self.initial_balance + entry_trades = [t for t in trades if t.get('type') == 'entry'] + + # Analyze exits + wins = 0 + losses = 0 + for t in trades: + if t.get('type') in ['take_profit_1', 'take_profit_2', 'take_profit_3']: + wins += 1 + elif t.get('type') == 'stop_loss': + losses += 1 + elif t.get('type') == 'exit_end_of_period': + if t.get('profit', 0) > 0: + wins += 1 + else: + losses += 1 + + return { + 'success': True, + 'profit': profit, + 'trades': len(entry_trades), + 'wins': wins, + 'losses': losses, + 'win_rate': (wins / (wins + losses) * 100) if (wins + losses) > 0 else 0, + } + + except Exception as e: + return {'success': False, 'error': str(e), 'profit': 0, 'trades': 0} + + def update_weights(self, iteration_profit: float, iteration_trades: int, win_rate: float): + """ + Update weights based on iteration performance. + Uses multiple strategies for learning. + """ + if iteration_trades == 0: + return + + # Strategy 1: Random perturbation with momentum + # If profit was good, keep direction. If bad, try opposite. + for i in range(len(self.weights)): + # Add random exploration + noise = np.random.normal(0, self.learning_rate * 0.5) + + # Bias based on performance + if iteration_profit > 0: + # Profitable - small random changes + self.weights[i] += noise * 0.5 + else: + # Losing - try bigger changes + self.weights[i] += noise * 1.5 + + # Keep within bounds + self.weights[i] = max(0.2, min(2.5, self.weights[i])) + + # Strategy 2: Win rate based adjustment + # If win rate is low, reduce all weights slightly (be more selective) + # If win rate is high, can afford to increase weights + if win_rate < 40: + for i in range(len(self.weights)): + self.weights[i] *= 0.95 # Reduce by 5% + elif win_rate > 60: + for i in range(len(self.weights)): + self.weights[i] *= 1.02 # Increase by 2% + + # Re-clamp + for i in range(len(self.weights)): + self.weights[i] = max(0.2, min(2.5, self.weights[i])) + + def run_iteration(self) -> Dict: + """Run one complete iteration over all symbols and intervals""" + self.iteration += 1 + + print(f"\n{Colors.H}{'='*70}{Colors.E}") + print(f"{Colors.H} ITERATION {self.iteration}{Colors.E}") + print(f"{Colors.H}{'='*70}{Colors.E}") + + iteration_profit = 0.0 + iteration_trades = 0 + iteration_wins = 0 + iteration_losses = 0 + + for symbol in SYMBOLS: + print(f"\n{Colors.C}--- {symbol} ---{Colors.E}") + + for interval in INTERVALS: + candles = CANDLES_PER_INTERVAL.get(interval, 400) + print(f" {interval:>3}: ", end="", flush=True) + + result = self.run_backtest(symbol, interval, candles) + + if result['success']: + profit = result['profit'] + trades = result['trades'] + wr = result['win_rate'] + + iteration_profit += profit + iteration_trades += trades + iteration_wins += result['wins'] + iteration_losses += result['losses'] + + color = Colors.G if profit > 0 else Colors.R + print(f"{color}${profit:+8.2f}{Colors.E} | {trades:2d} trades | {wr:5.1f}% WR") + else: + print(f"{Colors.R}FAILED: {result.get('error', 'Unknown')[:30]}{Colors.E}") + + # Update totals + self.total_trades += iteration_trades + self.total_profit += iteration_profit + + # Calculate iteration win rate + iter_wr = (iteration_wins / (iteration_wins + iteration_losses) * 100 + if (iteration_wins + iteration_losses) > 0 else 0) + + # Print summary + print(f"\n{Colors.BOLD}Iteration {self.iteration} Summary:{Colors.E}") + color = Colors.G if iteration_profit > 0 else Colors.R + print(f" Profit: {color}${iteration_profit:+.2f}{Colors.E}") + print(f" Trades: {iteration_trades}") + print(f" Win Rate: {iter_wr:.1f}%") + + # Track best + if iteration_profit > self.best_profit: + self.best_profit = iteration_profit + self.best_weights = self.weights.copy() + print(f" {Colors.G}★ NEW BEST!{Colors.E}") + + # Update weights AGGRESSIVELY based on performance + self.update_weights(iteration_profit, iteration_trades, iter_wr) + + # Apply learning rate decay (prevents overfitting in later iterations) + self.learning_rate = self.base_learning_rate * (self.lr_decay ** self.iteration) + + # Show weight changes + print(f"\n{Colors.Y}Weight adjustments:{Colors.E}") + changed = 0 + for i, (name, weight) in enumerate(zip(WEIGHT_NAMES, self.weights)): + if i < len(self.weight_history) and len(self.weight_history) > 0: + old = self.weight_history[-1][i] if self.weight_history else 1.0 + diff = weight - old + if abs(diff) > 0.01: + direction = "↑" if diff > 0 else "↓" + print(f" {name}: {old:.3f} → {weight:.3f} ({direction}{abs(diff):.3f})") + changed += 1 + + if changed == 0: + print(" (First iteration - weights established)") + + self.weight_history.append(self.weights.copy()) + self.profit_history.append(iteration_profit) + + # Save after each iteration + self._save_weights() + + return { + 'iteration': self.iteration, + 'profit': iteration_profit, + 'trades': iteration_trades, + 'win_rate': iter_wr, + } + + def run(self, iterations: int = 20) -> Dict: + """Run multiple iterations of learning""" + print(f"\n{Colors.H}{'='*70}{Colors.E}") + print(f"{Colors.H}{'AGGRESSIVE SELF-LEARNING BACKTEST':^70}{Colors.E}") + print(f"{Colors.H}{'='*70}{Colors.E}") + + print(f"\n{Colors.C}Configuration:{Colors.E}") + print(f" Iterations: {iterations}") + print(f" Symbols: {SYMBOLS}") + print(f" Intervals: {INTERVALS}") + print(f" Learning Rate: {self.learning_rate} (decay: {self.lr_decay}/iter)") + print(f" Initial Balance: ${self.initial_balance:,.2f}") + + # Show candle counts per interval + total_candles = sum(CANDLES_PER_INTERVAL.values()) * len(SYMBOLS) + print(f"\n{Colors.C}Data per iteration:{Colors.E}") + for interval, candles in CANDLES_PER_INTERVAL.items(): + print(f" {interval:>3}: {candles:,} candles × {len(SYMBOLS)} symbols") + print(f" Total: ~{total_candles:,} candles/iteration (expect 200-400+ trades)") + + print(f"\n{Colors.C}Starting weights:{Colors.E}") + for name, weight in zip(WEIGHT_NAMES, self.weights): + print(f" {name}: {weight:.3f}") + + start_time = datetime.now() + + try: + for _ in range(iterations): + self.run_iteration() + except KeyboardInterrupt: + print(f"\n{Colors.Y}Interrupted - saving progress...{Colors.E}") + + # Final report + self._print_final_report(start_time) + + return { + 'total_iterations': self.iteration, + 'total_trades': self.total_trades, + 'total_profit': self.total_profit, + 'best_profit': self.best_profit, + 'final_weights': self.weights, + 'best_weights': self.best_weights, + } + + def _print_final_report(self, start_time: datetime): + """Print final report""" + elapsed = (datetime.now() - start_time).total_seconds() + + print(f"\n{Colors.H}{'='*70}{Colors.E}") + print(f"{Colors.H}{'FINAL REPORT':^70}{Colors.E}") + print(f"{Colors.H}{'='*70}{Colors.E}") + + print(f"\n{Colors.BOLD}Overall Statistics:{Colors.E}") + print(f" Duration: {elapsed/60:.1f} minutes") + print(f" Iterations: {self.iteration}") + print(f" Total Trades: {self.total_trades}") + + color = Colors.G if self.total_profit > 0 else Colors.R + print(f" Total Profit: {color}${self.total_profit:+.2f}{Colors.E}") + print(f" Best Iteration: ${self.best_profit:+.2f}") + + # Profit trend + if len(self.profit_history) > 1: + first_half = sum(self.profit_history[:len(self.profit_history)//2]) + second_half = sum(self.profit_history[len(self.profit_history)//2:]) + trend = "📈 IMPROVING" if second_half > first_half else "📉 Declining" + print(f" Trend: {trend}") + + print(f"\n{Colors.BOLD}Final Optimized Weights:{Colors.E}") + for name, weight in zip(WEIGHT_NAMES, self.weights): + # Visual bar + bar_len = int(weight * 10) + bar = "█" * min(bar_len, 25) + "░" * max(0, 25 - bar_len) + + if weight > 1.5: + status = f"{Colors.G}STRONG{Colors.E}" + elif weight < 0.6: + status = f"{Colors.R}WEAK{Colors.E}" + else: + status = "" + + print(f" {name:<25} {weight:.4f} {bar} {status}") + + # Save + self._save_weights() + + # Also save as JSON for easy use + report_path = os.path.join(self.storage_path, "backtest_report.json") + with open(report_path, 'w') as f: + json.dump({ + 'timestamp': datetime.now().isoformat(), + 'iterations': self.iteration, + 'total_trades': self.total_trades, + 'total_profit': self.total_profit, + 'best_profit': self.best_profit, + 'profit_history': self.profit_history, + 'final_weights': dict(zip(WEIGHT_NAMES, self.weights)), + 'best_weights': dict(zip(WEIGHT_NAMES, self.best_weights)), + }, f, indent=2) + + print(f"\n{Colors.G}✓ Report saved to: {report_path}{Colors.E}") + print(f"{Colors.G}✓ Weights saved to: {self.storage_path}/final_weights.json{Colors.E}") + + +def main(): + parser = argparse.ArgumentParser(description="Aggressive self-learning backtest") + parser.add_argument('--iterations', type=int, default=20, + help='Number of learning iterations (default: 20)') + parser.add_argument('--balance', type=float, default=10000.0) + parser.add_argument('--learning-rate', type=float, default=0.15, + help='How aggressively to change weights (default: 0.15)') + parser.add_argument('--storage', type=str, default='./data/btc_eth_learning') + parser.add_argument('--report-only', action='store_true') + + args = parser.parse_args() + + if args.report_only: + weights_file = os.path.join(args.storage, "final_weights.json") + if os.path.exists(weights_file): + with open(weights_file) as f: + data = json.load(f) + print(json.dumps(data, indent=2)) + else: + print("No saved weights found") + return + + learner = AggressiveLearner( + storage_path=args.storage, + initial_balance=args.balance, + learning_rate=args.learning_rate, + ) + + try: + result = learner.run(iterations=args.iterations) + + print(f"\n{Colors.H}{'='*70}{Colors.E}") + print(f"{Colors.H}{'BACKTEST COMPLETE':^70}{Colors.E}") + print(f"{Colors.H}{'='*70}{Colors.E}") + + print(f"\n{Colors.G}✓ Completed {result['total_iterations']} iterations{Colors.E}") + color = Colors.G if result['total_profit'] > 0 else Colors.R + print(f"{Colors.G}✓ Total profit: {color}${result['total_profit']:+.2f}{Colors.E}") + print(f"{Colors.B}ℹ Learning data saved to: {args.storage}{Colors.E}") + + except Exception as e: + print(f"\n{Colors.R}Error: {e}{Colors.E}") + import traceback + traceback.print_exc() + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/back_tester/start_intensive_training.sh b/back_tester/start_intensive_training.sh new file mode 100755 index 0000000..3c5e2f3 --- /dev/null +++ b/back_tester/start_intensive_training.sh @@ -0,0 +1,278 @@ +#!/bin/bash +# +# Intensive Parameter Optimization Launcher +# +# Optimizes ALL trading parameters: +# - 18 Signal Weights +# - Stop Loss (ATR multiplier, min distance) +# - Take Profit Levels (TP1, TP2, TP3 ratios) +# - Trailing Stop (distance, activation) +# - Risk Percentage +# +# Usage: +# ./start_intensive_training.sh # Default: 2 hours +# ./start_intensive_training.sh --hours 8 # Run for 8 hours +# ./start_intensive_training.sh --generations 100 # Run 100 generations +# + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)" +CLICKHOUSE_DIR="${SCRIPT_DIR}/clickhouse" + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +CYAN='\033[0;36m' +MAGENTA='\033[0;35m' +BOLD='\033[1m' +NC='\033[0m' + +# Defaults +GENERATIONS=100 +HOURS=4 +POPULATION=8 +TESTS=10 +BALANCE=10000 +STORAGE_PATH="${SCRIPT_DIR}/data/intensive_training" +SKIP_CLICKHOUSE=false + +TIMESTAMP=$(date +"%Y%m%d_%H%M%S") +LOG_FILE="${SCRIPT_DIR}/logs/intensive_training_${TIMESTAMP}.log" + +print_banner() { + echo -e "${MAGENTA}" + echo "╔══════════════════════════════════════════════════════════════════╗" + echo "║ ║" + echo "║ 🧬 INTENSIVE PARAMETER OPTIMIZATION ║" + echo "║ ║" + echo "║ Optimizing: ║" + echo "║ • 18 Signal Weights ║" + echo "║ • Stop Loss Parameters (ATR, distance) ║" + echo "║ • Take Profit Ratios (TP1, TP2, TP3) ║" + echo "║ • Trailing Stop (distance, activation) ║" + echo "║ • Risk Percentage ║" + echo "║ ║" + echo "╚══════════════════════════════════════════════════════════════════╝" + echo -e "${NC}" +} + +show_help() { + echo "Usage: $0 [options]" + echo "" + echo "Intensive parameter optimization for trading strategy" + echo "" + echo "Options:" + echo " --hours N Maximum training time in hours (default: 4)" + echo " --generations N Maximum generations (default: 100)" + echo " --population N Population size per generation (default: 8)" + echo " --tests N Backtests per evaluation (default: 10)" + echo " --balance N Initial balance (default: 10000)" + echo " --storage PATH Storage path for results" + echo " --no-clickhouse Skip ClickHouse startup" + echo " --help Show this help" + echo "" + echo "Examples:" + echo " $0 --hours 8 # Run for 8 hours" + echo " $0 --generations 200 --hours 12 # Long training run" + echo "" +} + +is_clickhouse_running() { + nc -z localhost 9000 &> /dev/null + return $? +} + +start_clickhouse() { + echo -e "${YELLOW}Starting ClickHouse...${NC}" + + if is_clickhouse_running; then + echo -e "${GREEN}✓ ClickHouse already running${NC}" + return 0 + fi + + cd "${CLICKHOUSE_DIR}" 2>/dev/null || mkdir -p "${CLICKHOUSE_DIR}" + + if ! command -v clickhouse &> /dev/null; then + echo -e "${RED}✗ ClickHouse not found. Install: brew install clickhouse${NC}" + exit 1 + fi + + if [ -f "config.xml" ]; then + clickhouse server --config-file=config.xml &> "${SCRIPT_DIR}/logs/clickhouse_${TIMESTAMP}.log" & + else + clickhouse server &> "${SCRIPT_DIR}/logs/clickhouse_${TIMESTAMP}.log" & + fi + + CLICKHOUSE_PID=$! + + for i in {1..15}; do + if is_clickhouse_running; then + echo -e "${GREEN}✓ ClickHouse started${NC}" + return 0 + fi + sleep 1 + done + + echo -e "${RED}✗ ClickHouse failed to start${NC}" + return 1 +} + +cleanup() { + if [ "$SKIP_CLICKHOUSE" = false ] && [ -n "$CLICKHOUSE_PID" ]; then + echo -e "${YELLOW}Stopping ClickHouse...${NC}" + kill $CLICKHOUSE_PID 2>/dev/null || true + fi +} + +trap cleanup EXIT + +# Parse arguments +while [[ $# -gt 0 ]]; do + case $1 in + --hours) + HOURS="$2" + shift 2 + ;; + --generations) + GENERATIONS="$2" + shift 2 + ;; + --population) + POPULATION="$2" + shift 2 + ;; + --tests) + TESTS="$2" + shift 2 + ;; + --balance) + BALANCE="$2" + shift 2 + ;; + --storage) + STORAGE_PATH="$2" + shift 2 + ;; + --no-clickhouse) + SKIP_CLICKHOUSE=true + shift + ;; + --help) + show_help + exit 0 + ;; + *) + echo -e "${RED}Unknown option: $1${NC}" + show_help + exit 1 + ;; + esac +done + +main() { + print_banner + + mkdir -p "${SCRIPT_DIR}/logs" + mkdir -p "${STORAGE_PATH}" + + echo -e "${CYAN}Configuration:${NC}" + echo " Max Generations: ${GENERATIONS}" + echo " Max Time: ${HOURS} hours" + echo " Population Size: ${POPULATION}" + echo " Tests per Eval: ${TESTS}" + echo " Initial Balance: \$${BALANCE}" + echo " Storage: ${STORAGE_PATH}" + echo " Log: ${LOG_FILE}" + echo "" + + # Estimate time + ESTIMATED_BACKTESTS=$((GENERATIONS * POPULATION * TESTS)) + echo -e "${YELLOW}Estimated backtests: ~${ESTIMATED_BACKTESTS}${NC}" + echo -e "${YELLOW}This may take several hours. Progress will be saved automatically.${NC}" + echo "" + + if [ "$SKIP_CLICKHOUSE" = false ]; then + start_clickhouse || exit 1 + sleep 2 + fi + + export PYTHONPATH="${PROJECT_DIR}:${PYTHONPATH}" + + # Find Python + PYTHON_CMD="" + for cmd in python3.11 python3 python; do + if command -v $cmd &> /dev/null; then + PYTHON_CMD="$cmd" + break + fi + done + + if [ -z "$PYTHON_CMD" ]; then + echo -e "${RED}✗ Python not found${NC}" + exit 1 + fi + + echo -e "${GREEN}Using: $(${PYTHON_CMD} --version)${NC}" + echo "" + + CMD="${PYTHON_CMD} ${SCRIPT_DIR}/intensive_training.py" + CMD="${CMD} --generations ${GENERATIONS}" + CMD="${CMD} --hours ${HOURS}" + CMD="${CMD} --population ${POPULATION}" + CMD="${CMD} --tests ${TESTS}" + CMD="${CMD} --balance ${BALANCE}" + CMD="${CMD} --storage ${STORAGE_PATH}" + + echo -e "${CYAN}Command:${NC} ${CMD}" + echo "" + + echo -e "${YELLOW}Starting optimization... (Ctrl+C to stop safely)${NC}" + echo "" + + ${CMD} 2>&1 | tee "${LOG_FILE}" + EXIT_CODE=${PIPESTATUS[0]} + + echo "" + if [ $EXIT_CODE -eq 0 ]; then + echo -e "${GREEN}╔══════════════════════════════════════════════════════════════════╗${NC}" + echo -e "${GREEN}║ ✓ OPTIMIZATION COMPLETED ║${NC}" + echo -e "${GREEN}╚══════════════════════════════════════════════════════════════════╝${NC}" + echo "" + echo -e "${CYAN}Results:${NC}" + echo " Parameters: ${STORAGE_PATH}/best_params.json" + echo " Log: ${LOG_FILE}" + + # Show best params if file exists + if [ -f "${STORAGE_PATH}/best_params.json" ]; then + echo "" + echo -e "${CYAN}Best Parameters Summary:${NC}" + ${PYTHON_CMD} -c " +import json +with open('${STORAGE_PATH}/best_params.json') as f: + p = json.load(f) +print(f\" Fitness: {p.get('fitness', 'N/A'):.1f}\") +print(f\" TP Ratios: {p.get('tp1_ratio', 1.5):.1f} / {p.get('tp2_ratio', 2.5):.1f} / {p.get('tp3_ratio', 4.0):.1f}\") +print(f\" ATR Mult: {p.get('atr_multiplier', 2.0):.2f}\") +print(f\" Trailing: {p.get('trailing_stop_distance', 0.5):.2f}% at TP{p.get('trailing_activation_tp', 1)}\") +print(f\" Risk: {p.get('risk_percentage', 1.0):.2f}%\") +" 2>/dev/null || true + fi + else + echo -e "${RED}╔══════════════════════════════════════════════════════════════════╗${NC}" + echo -e "${RED}║ ✗ OPTIMIZATION FAILED (code: ${EXIT_CODE}) ║${NC}" + echo -e "${RED}╚══════════════════════════════════════════════════════════════════╝${NC}" + echo "Check log: ${LOG_FILE}" + fi + + return $EXIT_CODE +} + +main + + + + diff --git a/back_tester/start_learning_backtest.sh b/back_tester/start_learning_backtest.sh new file mode 100755 index 0000000..badd417 --- /dev/null +++ b/back_tester/start_learning_backtest.sh @@ -0,0 +1,283 @@ +#!/bin/bash +# +# Self-Learning Backtesting Launcher for BTC and ETH +# +# This script launches comprehensive backtesting with self-learning +# on BTCUSDT and ETHUSDT across all time intervals. +# +# Usage: +# ./start_learning_backtest.sh [options] +# +# Options: +# --iterations N Number of learning iterations (default: 3) +# --candles N Override candle count for all intervals +# --balance N Initial balance (default: 10000) +# --risk N Risk percentage per trade (default: 1.0) +# --no-clickhouse Skip ClickHouse startup (if already running) +# --help Show this help message +# + +set -e + +# Script directory +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)" +CLICKHOUSE_DIR="${SCRIPT_DIR}/clickhouse" + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +CYAN='\033[0;36m' +MAGENTA='\033[0;35m' +BOLD='\033[1m' +NC='\033[0m' + +# Default parameters - AGGRESSIVE LEARNING (20 iterations, not 3!) +ITERATIONS=20 +LEARNING_RATE=0.15 +BALANCE=10000 +SKIP_CLICKHOUSE=false +STORAGE_PATH="${SCRIPT_DIR}/data/btc_eth_learning" + +# Timestamp for logs +TIMESTAMP=$(date +"%Y%m%d_%H%M%S") +LOG_FILE="${SCRIPT_DIR}/logs/learning_backtest_${TIMESTAMP}.log" + +# Banner +print_banner() { + echo -e "${MAGENTA}" + echo "╔══════════════════════════════════════════════════════════════════╗" + echo "║ ║" + echo "║ 🤖 SELF-LEARNING BACKTESTING SYSTEM ║" + echo "║ ║" + echo "║ Symbols: BTCUSDT, ETHUSDT ║" + echo "║ Intervals: 1m, 5m, 15m, 30m, 1h, 4h, 1d ║" + echo "║ ║" + echo "╚══════════════════════════════════════════════════════════════════╝" + echo -e "${NC}" +} + +# Help message +show_help() { + echo "Usage: $0 [options]" + echo "" + echo "Self-Learning Backtesting for BTCUSDT and ETHUSDT" + echo "" + echo "Options:" + echo " --iterations N Number of learning iterations (default: 20)" + echo " --learning-rate N How fast weights change (default: 0.15)" + echo " --balance N Initial balance in USD (default: 10000)" + echo " --storage PATH Path to store learning data" + echo " --no-clickhouse Skip ClickHouse startup" + echo " --help Show this help message" + echo "" + echo "Examples:" + echo " $0 # Run 20 iterations (default)" + echo " $0 --iterations 50 # Run 50 learning iterations" + echo " $0 --learning-rate 0.25 # More aggressive learning" + echo "" +} + +# Check if ClickHouse is running +is_clickhouse_running() { + nc -z localhost 9000 &> /dev/null + return $? +} + +# Start ClickHouse +start_clickhouse() { + echo -e "${YELLOW}Starting ClickHouse server...${NC}" + + if is_clickhouse_running; then + echo -e "${GREEN}✓ ClickHouse is already running${NC}" + return 0 + fi + + if [ ! -d "${CLICKHOUSE_DIR}" ]; then + echo -e "${RED}✗ ClickHouse directory not found: ${CLICKHOUSE_DIR}${NC}" + echo -e "${YELLOW}Creating minimal ClickHouse config...${NC}" + mkdir -p "${CLICKHOUSE_DIR}" + fi + + cd "${CLICKHOUSE_DIR}" + + if ! command -v clickhouse &> /dev/null; then + echo -e "${RED}✗ ClickHouse not found in PATH${NC}" + echo "Please install ClickHouse: brew install clickhouse" + exit 1 + fi + + # Start ClickHouse in background + if [ -f "config.xml" ]; then + clickhouse server --config-file=config.xml &> "${SCRIPT_DIR}/logs/clickhouse_${TIMESTAMP}.log" & + else + clickhouse server &> "${SCRIPT_DIR}/logs/clickhouse_${TIMESTAMP}.log" & + fi + + CLICKHOUSE_PID=$! + + echo -e "${YELLOW}Waiting for ClickHouse to start (PID: ${CLICKHOUSE_PID})...${NC}" + + for i in {1..15}; do + if is_clickhouse_running; then + echo -e "${GREEN}✓ ClickHouse started successfully${NC}" + return 0 + fi + sleep 1 + echo -n "." + done + + echo "" + echo -e "${RED}✗ Failed to start ClickHouse within timeout${NC}" + return 1 +} + +# Stop ClickHouse on exit +cleanup() { + if [ "$SKIP_CLICKHOUSE" = false ] && [ -n "$CLICKHOUSE_PID" ]; then + echo -e "${YELLOW}Stopping ClickHouse...${NC}" + kill $CLICKHOUSE_PID 2>/dev/null || true + fi +} + +trap cleanup EXIT + +# Parse arguments +while [[ $# -gt 0 ]]; do + case $1 in + --iterations) + ITERATIONS="$2" + shift 2 + ;; + --learning-rate) + LEARNING_RATE="$2" + shift 2 + ;; + --balance) + BALANCE="$2" + shift 2 + ;; + --storage) + STORAGE_PATH="$2" + shift 2 + ;; + --no-clickhouse) + SKIP_CLICKHOUSE=true + shift + ;; + --help) + show_help + exit 0 + ;; + *) + echo -e "${RED}Unknown option: $1${NC}" + show_help + exit 1 + ;; + esac +done + +# Main execution +main() { + print_banner + + # Create logs directory + mkdir -p "${SCRIPT_DIR}/logs" + mkdir -p "${STORAGE_PATH}" + + echo -e "${CYAN}Configuration:${NC}" + echo " Iterations: ${ITERATIONS}" + echo " Learning Rate: ${LEARNING_RATE}" + echo " Balance: \$${BALANCE}" + echo " Storage: ${STORAGE_PATH}" + echo " Log File: ${LOG_FILE}" + echo "" + + # Estimate time + ESTIMATED_TIME=$((ITERATIONS * 2)) + echo -e "${YELLOW}Estimated time: ~${ESTIMATED_TIME} minutes${NC}" + echo "" + + # Start ClickHouse if needed + if [ "$SKIP_CLICKHOUSE" = false ]; then + start_clickhouse + if [ $? -ne 0 ]; then + echo -e "${RED}Failed to start ClickHouse. Exiting.${NC}" + exit 1 + fi + echo "" + sleep 2 + else + echo -e "${YELLOW}Skipping ClickHouse startup (--no-clickhouse)${NC}" + if ! is_clickhouse_running; then + echo -e "${RED}Warning: ClickHouse doesn't appear to be running!${NC}" + fi + echo "" + fi + + # Set Python path + export PYTHONPATH="${PROJECT_DIR}:${PYTHONPATH}" + + # Check Python + PYTHON_CMD="" + if command -v python3.11 &> /dev/null; then + PYTHON_CMD="python3.11" + elif command -v python3 &> /dev/null; then + PYTHON_CMD="python3" + elif command -v python &> /dev/null; then + PYTHON_CMD="python" + else + echo -e "${RED}✗ Python not found${NC}" + exit 1 + fi + + echo -e "${GREEN}Using Python: $(${PYTHON_CMD} --version)${NC}" + echo "" + + # Build command + CMD="${PYTHON_CMD} ${SCRIPT_DIR}/run_learning_backtest.py" + CMD="${CMD} --iterations ${ITERATIONS}" + CMD="${CMD} --learning-rate ${LEARNING_RATE}" + CMD="${CMD} --balance ${BALANCE}" + CMD="${CMD} --storage ${STORAGE_PATH}" + + echo -e "${CYAN}Executing:${NC}" + echo " ${CMD}" + echo "" + + # Run backtest + echo -e "${YELLOW}Starting backtests... (logging to ${LOG_FILE})${NC}" + echo "" + + # Run and tee to both console and log file + ${CMD} 2>&1 | tee "${LOG_FILE}" + + EXIT_CODE=${PIPESTATUS[0]} + + echo "" + if [ $EXIT_CODE -eq 0 ]; then + echo -e "${GREEN}╔══════════════════════════════════════════════════════════════════╗${NC}" + echo -e "${GREEN}║ ✓ BACKTESTING COMPLETED SUCCESSFULLY ║${NC}" + echo -e "${GREEN}╚══════════════════════════════════════════════════════════════════╝${NC}" + echo "" + echo -e "${CYAN}Results saved to:${NC}" + echo " Learning Data: ${STORAGE_PATH}" + echo " Weights: ${STORAGE_PATH}/final_weights.json" + echo " Report: ${STORAGE_PATH}/backtest_report.json" + echo " Log: ${LOG_FILE}" + else + echo -e "${RED}╔══════════════════════════════════════════════════════════════════╗${NC}" + echo -e "${RED}║ ✗ BACKTESTING FAILED (exit code: ${EXIT_CODE}) ║${NC}" + echo -e "${RED}╚══════════════════════════════════════════════════════════════════╝${NC}" + echo "" + echo "Check log file for details: ${LOG_FILE}" + fi + + return $EXIT_CODE +} + +# Run main +main + diff --git a/back_tester/strategy.py b/back_tester/strategy.py index 3f56272..f829810 100644 --- a/back_tester/strategy.py +++ b/back_tester/strategy.py @@ -1,9 +1,10 @@ import os import sys -from typing import List, Tuple, Optional, Dict +from typing import List, Tuple, Optional, Dict, Any import random from datetime import datetime, timedelta import uuid +import logging project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) sys.path.append(project_dir) @@ -26,6 +27,20 @@ from utils import create_true_preferences from .db_operations import ClickHouseDB +# Import self-learning modules (optional - graceful fallback if not available) +try: + from .adaptive_learning import ( + SelfLearningBacktester, + extract_indicator_contributions, + SignalOutcome + ) + from .enhanced_metrics import EnhancedMetricsCalculator, TradeMetrics, create_trade_metrics + LEARNING_AVAILABLE = True +except ImportError: + LEARNING_AVAILABLE = False + +logger = logging.getLogger(__name__) + def backtest_strategy( symbol: str, @@ -40,13 +55,19 @@ def backtest_strategy( trailing_stop_distance_percent: float = 0.5, # Distance to maintain from highest price iteration_id: Optional[str] = None, db: Optional[ClickHouseDB] = None, + # Self-learning parameters + enable_learning: bool = False, + learner: Optional[Any] = None, # SelfLearningBacktester instance + metrics_calculator: Optional[Any] = None, # EnhancedMetricsCalculator instance + track_indicator_contributions: bool = True, ) -> Tuple[float, list, Optional[str]]: """ - Backtest a strategy with risk management: + Backtest a strategy with risk management and optional self-learning: - At each step, generate a signal with risk management parameters - Use position sizing based on risk percentage - Implement multiple take profit levels - Use dynamic stop loss and trailing stop loss + - Optionally track signal performance for self-learning Parameters: symbol: The trading pair symbol (e.g. "BTCUSDT") @@ -59,6 +80,10 @@ def backtest_strategy( weights: List of weights for signal generation use_trailing_stop: Whether to enable trailing stop loss functionality trailing_stop_distance_percent: Distance in percentage to maintain from highest price reached + enable_learning: Whether to enable self-learning feedback loop + learner: SelfLearningBacktester instance for tracking signals + metrics_calculator: EnhancedMetricsCalculator for detailed metrics + track_indicator_contributions: Whether to track which indicators contributed to each signal Returns: final_balance: The simulated portfolio balance at the end @@ -97,6 +122,27 @@ def backtest_strategy( entry_signal = None current_trade = None # Store current trade details parent_trade_id = None # Track parent trade for TP/SL entries + + # Self-learning tracking variables + current_signal_id = None + current_indicator_contributions = {} + current_market_context = {} + max_favorable_excursion = 0.0 + max_adverse_excursion = 0.0 + + # Initialize learning components if enabled + if enable_learning and LEARNING_AVAILABLE and learner is None: + learner = SelfLearningBacktester( + storage_path="./data/learning", + auto_adjust_weights=True, + adjustment_frequency=50 + ) + # Use adaptive weights if available + if not weights: + weights = learner.get_current_weights() + + if enable_learning and LEARNING_AVAILABLE and metrics_calculator is None: + metrics_calculator = EnhancedMetricsCalculator() # Use all indicators enabled by default in backtesting preferences = create_true_preferences() @@ -313,6 +359,17 @@ def backtest_strategy( } db.insert_trade(trade_data) + # Record outcome for self-learning (TP3 - full target hit) + if enable_learning and LEARNING_AVAILABLE and learner and current_signal_id: + learner.record_signal_exit( + signal_id=current_signal_id, + outcome="tp3", + exit_price=current_price, + profit_loss=profit, + duration=i - entry_index + ) + current_signal_id = None + position = 0 entry_price = None entry_time = None @@ -397,6 +454,17 @@ def backtest_strategy( } db.insert_trade(trade_data) + # Record outcome for self-learning (stop loss) + if enable_learning and LEARNING_AVAILABLE and learner and current_signal_id: + learner.record_signal_exit( + signal_id=current_signal_id, + outcome="trailing_stop" if current_trade.get("trailing_stop_active", False) else "stop_loss", + exit_price=current_price, + profit_loss=loss, + duration=i - entry_index + ) + current_signal_id = None + position = 0 entry_price = None entry_time = None @@ -436,9 +504,11 @@ def backtest_strategy( entry_signal = signal parent_trade_id = str(uuid.uuid4()) # Generate parent trade ID - # Store trade details + # Store trade details with trailing stop initialization + trailing_activation_price = float(trading_signal.take_profit_1) # Activate at TP1 current_trade = { "stop_loss": float(trading_signal.stop_loss), + "initial_stop_loss": float(trading_signal.stop_loss), # Store initial stop for comparison "take_profit_1": float(trading_signal.take_profit_1), "take_profit_2": float(trading_signal.take_profit_2), "take_profit_3": float(trading_signal.take_profit_3), @@ -448,6 +518,11 @@ def backtest_strategy( "tp1_hit": False, "tp2_hit": False, "tp3_hit": False, + # Trailing stop fields + "highest_price": float(current_price), # Track highest price reached + "trailing_stop_active": False, # Whether trailing stop is active + "trailing_activation_price": trailing_activation_price, # Price at which trailing stop activates + "trailing_stop_level": float(trading_signal.stop_loss), # Current trailing stop level } trade_log.append( @@ -462,8 +537,63 @@ def backtest_strategy( "take_profit_1": float(trading_signal.take_profit_1), "take_profit_2": float(trading_signal.take_profit_2), "take_profit_3": float(trading_signal.take_profit_3), + "symbol": symbol, + "interval": interval, } ) + + # Track signal for self-learning + if enable_learning and LEARNING_AVAILABLE and learner: + # Parse reasons to extract indicator contributions + reasons_list = reason.split("\n- ") if reason else [] + reasons_list = [r.strip("- \n") for r in reasons_list if r.strip()] + + # Extract bullish/bearish scores from reason string + bullish_score = 0.0 + bearish_score = 0.0 + if "Bullish Score:" in reason: + try: + bullish_part = reason.split("Bullish Score:")[1].split("|")[0] + bullish_score = float(bullish_part.strip()) + except: + pass + if "Bearish Score:" in reason: + try: + bearish_part = reason.split("Bearish Score:")[1].split("\n")[0] + bearish_score = float(bearish_part.strip()) + except: + pass + + current_indicator_contributions = extract_indicator_contributions( + bullish_score, bearish_score, signal, reasons_list + ) + + current_market_context = { + "market_regime": trading_signal.market_conditions.get("market_regime", ""), + "volatility": trading_signal.market_conditions.get("volatility", 0), + "volume_ratio": trading_signal.market_conditions.get("volume_ratio", 1), + "rsi": trading_signal.market_conditions.get("rsi", 50), + "trend": "uptrend" if signal == "Bullish" else "downtrend" + } + + current_signal_id = str(uuid.uuid4()) + max_favorable_excursion = 0.0 + max_adverse_excursion = 0.0 + + learner.record_signal_entry( + signal_id=current_signal_id, + symbol=symbol, + interval=interval, + signal_type=signal, + entry_price=entry_price, + stop_loss=float(trading_signal.stop_loss), + tp1=float(trading_signal.take_profit_1), + tp2=float(trading_signal.take_profit_2), + tp3=float(trading_signal.take_profit_3), + indicator_contributions=current_indicator_contributions, + reasons=reasons_list, + market_context=current_market_context + ) # Store entry trade in database if db: @@ -551,6 +681,20 @@ def backtest_strategy( "parent_trade_id": parent_trade_id, } db.insert_trade(trade_data) + + # Record outcome for self-learning (end of period) + if enable_learning and LEARNING_AVAILABLE and learner and current_signal_id: + learner.record_signal_exit( + signal_id=current_signal_id, + outcome="end", + exit_price=final_price, + profit_loss=profit, + duration=len(df) - 1 - entry_index if entry_index else 0 + ) + + # Save learning state if enabled + if enable_learning and LEARNING_AVAILABLE and learner: + learner.save_state() return balance, trade_log, sub_iteration_id @@ -570,7 +714,7 @@ def backtest_strategy( use_trailing_stop=True, trailing_stop_distance_percent=0.5, # Keep trailing stop 0.5% below highest price ) - print(f"Final Balance: {final_balance:.2f}") + print(f"Final Balance: {final_balance_ts:.2f}") print("Trade Log:") - for trade in trades: + for trade in trades_ts: print(trade) diff --git a/back_tester/trainer.py b/back_tester/trainer.py index da66c47..51ed68a 100644 --- a/back_tester/trainer.py +++ b/back_tester/trainer.py @@ -45,22 +45,27 @@ db = ClickHouseDB() # --- Initial Weight Configuration --- -# Updated for new weights including risk management signals +# Updated for all 18 weights matching signal_detection.py weights = [ - 1.0, # W_BULLISH_OB - 1.0, # W_BEARISH_OB - 1.0, # W_BULLISH_BREAKER - 1.0, # W_BEARISH_BREAKER - 0.7, # W_ABOVE_SUPPORT - 0.7, # W_BELOW_RESISTANCE - 0.5, # W_FVG_ABOVE - 0.5, # W_FVG_BELOW - 0.8, # W_TREND - 1.2, # W_SWEEP_HIGHS - 1.2, # W_SWEEP_LOWS - 1.5, # W_STRUCTURE_BREAK - 0.6, # W_PIN_BAR -] # Updated for new weights + 1.0, # W_BULLISH_OB (0) + 1.0, # W_BEARISH_OB (1) + 1.0, # W_BULLISH_BREAKER (2) + 1.0, # W_BEARISH_BREAKER (3) + 0.7, # W_ABOVE_SUPPORT (4) + 0.7, # W_BELOW_RESISTANCE (5) + 0.5, # W_FVG_ABOVE (6) + 0.5, # W_FVG_BELOW (7) + 0.8, # W_TREND (8) + 1.2, # W_SWEEP_HIGHS (9) + 1.2, # W_SWEEP_LOWS (10) + 1.5, # W_STRUCTURE_BREAK (11) + 0.6, # W_PIN_BAR (12) + 0.6, # W_ENGULFING (13) + 1.2, # W_LIQUIDITY_POOL_ABOVE (14) + 1.2, # W_LIQUIDITY_POOL_BELOW (15) + 1.5, # W_LIQUIDITY_POOL_ROUND (16) + 0.6, # W_RSI_EXTREME (17) +] learning_rate = 0.05 iterations = 1000 @@ -77,6 +82,15 @@ patience = 50 history_size = 10 +# Weight names for tracking and logging +WEIGHT_NAMES = [ + "W_BULLISH_OB", "W_BEARISH_OB", "W_BULLISH_BREAKER", "W_BEARISH_BREAKER", + "W_ABOVE_SUPPORT", "W_BELOW_RESISTANCE", "W_FVG_ABOVE", "W_FVG_BELOW", + "W_TREND", "W_SWEEP_HIGHS", "W_SWEEP_LOWS", "W_STRUCTURE_BREAK", + "W_PIN_BAR", "W_ENGULFING", "W_LIQUIDITY_POOL_ABOVE", "W_LIQUIDITY_POOL_BELOW", + "W_LIQUIDITY_POOL_ROUND", "W_RSI_EXTREME" +] + class TrainingMetrics: def __init__(self): @@ -440,8 +454,35 @@ def optimize_weights( if __name__ == "__main__": + # Parse command line arguments + parser = argparse.ArgumentParser(description="Train signal weights for CryptoBot") + parser.add_argument( + "--symbol", type=str, default="BTCUSDT", help="Primary trading pair for optimization" + ) + parser.add_argument( + "--interval", type=str, default="1h", help="Primary candle interval" + ) + parser.add_argument( + "--iterations", type=int, default=1000, help="Number of training iterations" + ) + parser.add_argument( + "--risk", type=float, default=1.0, help="Risk percentage per trade" + ) + parser.add_argument( + "--learning-rate", type=float, default=0.05, help="Learning rate for weight updates" + ) + args = parser.parse_args() + + # Update global parameters from args + iterations = args.iterations + risk_percentage = args.risk + learning_rate = args.learning_rate + # Train the model try: + logger.info(f"Starting weight optimization for {args.symbol} {args.interval}") + logger.info(f"Iterations: {iterations}, Risk: {risk_percentage}%, Learning rate: {learning_rate}") + best_weights = optimize_weights(weights, iterations, learning_rate) logger.info(f"Training completed successfully") @@ -454,17 +495,22 @@ def optimize_weights( weights_file = f"models/weights_{timestamp}.txt" os.makedirs("models", exist_ok=True) + with open(weights_file, "w") as f: f.write( f"# Optimized weights for {args.symbol} {args.interval} at {timestamp}\n" ) f.write(f"# Fitness: {fitness:.4f}\n") - f.write(f"# Risk: {risk_percentage}\n\n") + f.write(f"# Risk: {risk_percentage}%\n") + f.write(f"# Iterations: {iterations}\n\n") for i, w in enumerate(best_weights): - f.write(f"W{i} = {w:.6f}\n") + name = WEIGHT_NAMES[i] if i < len(WEIGHT_NAMES) else f"W{i}" + f.write(f"{name} = {w:.6f}\n") logger.info(f"Weights saved to {weights_file}") except Exception as e: logger.error(f"Training failed: {str(e)}") + import traceback + traceback.print_exc() sys.exit(1) From 87a8f70eee10d885dc4223e98a6c6e26a3614aff Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi Date: Tue, 16 Dec 2025 00:40:51 +0100 Subject: [PATCH 2/4] update requirements --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3685001..e5dac87 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,7 +8,7 @@ mplfinance==0.12.10b0 numpy==1.26.4 oauthlib==3.2.2 pandas==2.1.1 -pandas-ta==0.3.14b0 +pandas-ta==0.3.14 pytest==8.3.4 pytest-asyncio==0.25.0 pytest-mock==3.14.0 From fbdf59cc07d0e103432f668d8b29fc8d897eecfc Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi Date: Tue, 16 Dec 2025 00:41:38 +0100 Subject: [PATCH 3/4] Update requirements --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index e5dac87..7e977e2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,7 +8,7 @@ mplfinance==0.12.10b0 numpy==1.26.4 oauthlib==3.2.2 pandas==2.1.1 -pandas-ta==0.3.14 +pandas-ta pytest==8.3.4 pytest-asyncio==0.25.0 pytest-mock==3.14.0 From d63bc5c6c2e73e65cd453486f0fe15a03ebba172 Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi Date: Tue, 16 Dec 2025 00:42:38 +0100 Subject: [PATCH 4/4] Remove pandas-ta from requirements --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 7e977e2..61446be 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,7 +8,6 @@ mplfinance==0.12.10b0 numpy==1.26.4 oauthlib==3.2.2 pandas==2.1.1 -pandas-ta pytest==8.3.4 pytest-asyncio==0.25.0 pytest-mock==3.14.0