diff --git a/docker-compose.yml b/docker-compose.yml
index 54482b1..64c564b 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -3,14 +3,14 @@ services:
build: .
container_name: condor-bot
restart: unless-stopped
+ environment:
+ TELEGRAM_TOKEN: your_bot_token
+ AUTHORIZED_USERS: chat_id_1,chat_id_2
env_file:
- .env
volumes:
# Persist bot data (user preferences, trading context, etc.)
- - ./data:/app/data
+ - ./condor_bot_data.pickle:/app/condor_bot_data.pickle
# Mount servers config
- ./servers.yml:/app/servers.yml
- environment:
- - PYTHONUNBUFFERED=1
- # Optional: enable if you need to access the host network (e.g., for local Hummingbot instances)
- # network_mode: host
+ network_mode: host
diff --git a/handlers/__init__.py b/handlers/__init__.py
index 28fd372..ea4bcfd 100644
--- a/handlers/__init__.py
+++ b/handlers/__init__.py
@@ -79,3 +79,14 @@ def clear_all_input_states(context: ContextTypes.DEFAULT_TYPE) -> None:
context.user_data.pop("editing_controller_field", None)
context.user_data.pop("deploy_params", None)
context.user_data.pop("editing_deploy_field", None)
+
+ # Bots - archived states
+ context.user_data.pop("archived_databases", None)
+ context.user_data.pop("archived_current_db", None)
+ context.user_data.pop("archived_page", None)
+ context.user_data.pop("archived_summaries", None)
+ context.user_data.pop("archived_total_count", None)
+
+ # Routines states
+ context.user_data.pop("routines_state", None)
+ context.user_data.pop("routines_editing", None)
diff --git a/handlers/bots/__init__.py b/handlers/bots/__init__.py
index 33bf833..fca3044 100644
--- a/handlers/bots/__init__.py
+++ b/handlers/bots/__init__.py
@@ -146,6 +146,17 @@
)
from ._shared import clear_bots_state, SIDE_LONG, SIDE_SHORT
+# Archived bots handlers
+from .archived import (
+ show_archived_menu,
+ show_archived_detail,
+ show_timeline_chart,
+ show_bot_chart,
+ handle_generate_report,
+ handle_archived_refresh,
+ clear_archived_state,
+)
+
logger = logging.getLogger(__name__)
@@ -625,6 +636,36 @@ async def bots_callback_handler(update: Update, context: ContextTypes.DEFAULT_TY
idx = int(action_parts[1])
await handle_refresh_controller(update, context, idx)
+ # Archived bots handlers
+ elif main_action == "archived":
+ await show_archived_menu(update, context)
+
+ elif main_action == "archived_page":
+ if len(action_parts) > 1:
+ page = int(action_parts[1])
+ await show_archived_menu(update, context, page)
+
+ elif main_action == "archived_select":
+ if len(action_parts) > 1:
+ db_index = int(action_parts[1])
+ await show_archived_detail(update, context, db_index)
+
+ elif main_action == "archived_timeline":
+ await show_timeline_chart(update, context)
+
+ elif main_action == "archived_chart":
+ if len(action_parts) > 1:
+ db_index = int(action_parts[1])
+ await show_bot_chart(update, context, db_index)
+
+ elif main_action == "archived_report":
+ if len(action_parts) > 1:
+ db_index = int(action_parts[1])
+ await handle_generate_report(update, context, db_index)
+
+ elif main_action == "archived_refresh":
+ await handle_archived_refresh(update, context)
+
else:
logger.warning(f"Unknown bots action: {action}")
await query.message.reply_text(f"Unknown action: {action}")
diff --git a/handlers/bots/_shared.py b/handlers/bots/_shared.py
index 2442349..d6d9829 100644
--- a/handlers/bots/_shared.py
+++ b/handlers/bots/_shared.py
@@ -112,6 +112,11 @@ def clear_bots_state(context) -> None:
context.user_data.pop("editing_controller_field", None)
context.user_data.pop("deploy_params", None)
context.user_data.pop("editing_deploy_field", None)
+ # Archived bots state
+ context.user_data.pop("archived_databases", None)
+ context.user_data.pop("archived_current_db", None)
+ context.user_data.pop("archived_page", None)
+ context.user_data.pop("archived_summaries", None)
def get_controller_config(context) -> Dict[str, Any]:
diff --git a/handlers/bots/archived.py b/handlers/bots/archived.py
new file mode 100644
index 0000000..db480c6
--- /dev/null
+++ b/handlers/bots/archived.py
@@ -0,0 +1,802 @@
+"""
+Archived Bots Module - View historical bot data and generate reports
+
+Provides:
+- List of archived bot databases with pagination
+- Detailed view for individual archived bots
+- Timeline chart showing all bots with PnL
+- Performance chart for individual bots
+- Full report generation (JSON + PNG) saved locally
+"""
+
+import logging
+from typing import Dict, Any, Optional, List
+
+from telegram import Update, InlineKeyboardButton, InlineKeyboardMarkup
+from telegram.error import BadRequest
+from telegram.ext import ContextTypes
+
+from utils.telegram_formatters import format_error_message, escape_markdown_v2, format_number
+from ._shared import get_bots_client, get_cached, set_cached
+
+logger = logging.getLogger(__name__)
+
+# Cache TTL for archived data (longer since it's static)
+ARCHIVED_CACHE_TTL = 300 # 5 minutes
+
+# Pagination settings
+BOTS_PER_PAGE = 5
+
+
+# ============================================
+# STATE MANAGEMENT
+# ============================================
+
+def clear_archived_state(context: ContextTypes.DEFAULT_TYPE) -> None:
+ """Clear archived-related state from context."""
+ context.user_data.pop("archived_databases", None)
+ context.user_data.pop("archived_current_db", None)
+ context.user_data.pop("archived_page", None)
+ context.user_data.pop("archived_summaries", None)
+ context.user_data.pop("archived_total_count", None)
+
+
+def _get_db_path_by_index(context: ContextTypes.DEFAULT_TYPE, index: int) -> Optional[str]:
+ """Get db_path from stored databases list by index."""
+ databases = context.user_data.get("archived_databases", [])
+ if 0 <= index < len(databases):
+ return databases[index]
+ return None
+
+
+# ============================================
+# DATA FETCHING
+# ============================================
+
+async def fetch_archived_databases(client) -> List[str]:
+ """Fetch list of archived bot database paths."""
+ try:
+ result = await client.archived_bots.list_databases()
+ # Result is ArchivedBotListResponse with 'bots' list
+ if isinstance(result, dict):
+ return result.get("bots", [])
+ return result if isinstance(result, list) else []
+ except Exception as e:
+ logger.error(f"Error fetching archived databases: {e}", exc_info=True)
+ return []
+
+
+async def fetch_database_status(client, db_path: str) -> Optional[Dict[str, Any]]:
+ """Fetch health status for a database."""
+ try:
+ return await client.archived_bots.get_database_status(db_path)
+ except Exception as e:
+ logger.debug(f"Error fetching status for {db_path}: {e}")
+ return None
+
+
+def is_database_healthy(status: Optional[Dict[str, Any]]) -> bool:
+ """
+ Check if a database has valid trade data for analysis.
+
+ We only need trade_fill to be correct for PnL analysis,
+ even if executors/positions have errors.
+ """
+ if not status:
+ return False
+
+ # Check nested status object for trade_fill
+ nested_status = status.get("status", {})
+ if isinstance(nested_status, dict):
+ # Trade fill data is sufficient for analysis
+ if nested_status.get("trade_fill") == "Correct":
+ return True
+ # Also accept if orders are correct (backup)
+ if nested_status.get("orders") == "Correct":
+ return True
+
+ # Fallback to overall healthy flag
+ if status.get("healthy") == True:
+ return True
+
+ return False
+
+
+async def get_healthy_databases(client, databases: List[str]) -> List[str]:
+ """Filter databases to only include healthy ones."""
+ healthy = []
+ for db_path in databases:
+ status = await fetch_database_status(client, db_path)
+ if is_database_healthy(status):
+ healthy.append(db_path)
+ else:
+ logger.debug(f"Skipping unhealthy database: {db_path}")
+ return healthy
+
+
+async def fetch_database_summary(client, db_path: str) -> Optional[Dict[str, Any]]:
+ """Fetch summary for a specific archived database."""
+ try:
+ return await client.archived_bots.get_database_summary(db_path)
+ except Exception as e:
+ logger.error(f"Error fetching summary for {db_path}: {e}", exc_info=True)
+ return None
+
+
+async def fetch_database_performance(client, db_path: str) -> Optional[Dict[str, Any]]:
+ """Fetch performance metrics for a specific archived database."""
+ try:
+ return await client.archived_bots.get_database_performance(db_path)
+ except Exception as e:
+ logger.error(f"Error fetching performance for {db_path}: {e}", exc_info=True)
+ return None
+
+
+async def fetch_database_trades(client, db_path: str, limit: int = 500, offset: int = 0) -> Optional[Dict[str, Any]]:
+ """Fetch trades for a specific archived database (single page)."""
+ try:
+ return await client.archived_bots.get_database_trades(db_path, limit=limit, offset=offset)
+ except Exception as e:
+ logger.error(f"Error fetching trades for {db_path}: {e}", exc_info=True)
+ return None
+
+
+async def fetch_all_trades(client, db_path: str) -> List[Dict[str, Any]]:
+ """Fetch ALL trades with pagination."""
+ all_trades = []
+ offset = 0
+ limit = 500
+
+ while True:
+ response = await fetch_database_trades(client, db_path, limit=limit, offset=offset)
+ if not response:
+ break
+
+ trades = response.get("trades", [])
+ if not trades:
+ break
+
+ all_trades.extend(trades)
+
+ # Check pagination
+ pagination = response.get("pagination", {})
+ if not pagination.get("has_more", False):
+ break
+
+ offset += limit
+
+ # Safety limit to avoid infinite loops
+ if len(all_trades) > 50000:
+ logger.warning(f"Trade limit reached for {db_path}, stopping at {len(all_trades)}")
+ break
+
+ logger.debug(f"Fetched {len(all_trades)} total trades for {db_path}")
+ return all_trades
+
+
+async def fetch_database_orders(client, db_path: str, limit: int = 1000, offset: int = 0) -> Optional[Dict[str, Any]]:
+ """Fetch orders for a specific archived database."""
+ try:
+ return await client.archived_bots.get_database_orders(db_path, limit=limit, offset=offset)
+ except Exception as e:
+ logger.error(f"Error fetching orders for {db_path}: {e}", exc_info=True)
+ return None
+
+
+async def fetch_database_executors(client, db_path: str) -> Optional[Dict[str, Any]]:
+ """Fetch executors for a specific archived database."""
+ try:
+ return await client.archived_bots.get_database_executors(db_path)
+ except Exception as e:
+ logger.error(f"Error fetching executors for {db_path}: {e}", exc_info=True)
+ return None
+
+
+# ============================================
+# FORMATTING HELPERS
+# ============================================
+
+def _format_pnl(pnl: float) -> str:
+ """Format PnL with color indicator."""
+ if pnl >= 0:
+ return f"+${format_number(pnl)}"
+ else:
+ return f"-${format_number(abs(pnl))}"
+
+
+def _format_datetime(dt) -> str:
+ """Format datetime for display."""
+ if dt is None:
+ return "N/A"
+ if isinstance(dt, str):
+ # Parse ISO format and format nicely
+ try:
+ from datetime import datetime
+ if "T" in dt:
+ parsed = datetime.fromisoformat(dt.replace("Z", "+00:00"))
+ else:
+ parsed = datetime.fromisoformat(dt)
+ return parsed.strftime("%b %d %H:%M")
+ except Exception:
+ return dt[:16] if len(dt) > 16 else dt
+ return str(dt)
+
+
+def _format_duration(start_time, end_time) -> str:
+ """Calculate and format duration between two times."""
+ if not start_time or not end_time:
+ return "N/A"
+
+ try:
+ from datetime import datetime
+
+ def parse_dt(dt):
+ if isinstance(dt, datetime):
+ return dt
+ if isinstance(dt, str):
+ if "T" in dt:
+ return datetime.fromisoformat(dt.replace("Z", "+00:00"))
+ return datetime.fromisoformat(dt)
+ return None
+
+ start = parse_dt(start_time)
+ end = parse_dt(end_time)
+
+ if not start or not end:
+ return "N/A"
+
+ # Remove timezone info for calculation if present
+ if start.tzinfo:
+ start = start.replace(tzinfo=None)
+ if end.tzinfo:
+ end = end.replace(tzinfo=None)
+
+ delta = end - start
+ days = delta.days
+ hours = delta.seconds // 3600
+
+ if days > 0:
+ return f"{days}d {hours}h"
+ elif hours > 0:
+ mins = (delta.seconds % 3600) // 60
+ return f"{hours}h {mins}m"
+ else:
+ mins = delta.seconds // 60
+ return f"{mins}m"
+ except Exception as e:
+ logger.debug(f"Error calculating duration: {e}")
+ return "N/A"
+
+
+def _extract_bot_name(db_path: str) -> str:
+ """Extract readable bot name from database path."""
+ # db_path might be like "/path/to/bot_name.db" or just "bot_name.db"
+ import os
+ name = os.path.basename(db_path)
+ if name.endswith(".db"):
+ name = name[:-3]
+ return name
+
+
+# ============================================
+# MENU DISPLAY
+# ============================================
+
+async def show_archived_menu(update: Update, context: ContextTypes.DEFAULT_TYPE, page: int = 0) -> None:
+ """Display the archived bots menu with pagination."""
+ query = update.callback_query
+ chat_id = update.effective_chat.id
+
+ try:
+ client = await get_bots_client(chat_id)
+
+ # Fetch databases (with caching) - only healthy databases
+ cache_key = "archived_databases"
+ databases = get_cached(context.user_data, cache_key, ARCHIVED_CACHE_TTL)
+ if databases is None:
+ all_databases = await fetch_archived_databases(client)
+ # Filter to only healthy databases to avoid 500 errors
+ databases = await get_healthy_databases(client, all_databases)
+ set_cached(context.user_data, cache_key, databases)
+ logger.info(f"Found {len(databases)} healthy databases out of {len(all_databases)} total")
+ # Store total count for message
+ context.user_data["archived_total_count"] = len(all_databases)
+
+ # Store in context for later use
+ context.user_data["archived_databases"] = databases
+ context.user_data["archived_page"] = page
+
+ if not databases:
+ total_count = context.user_data.get("archived_total_count", 0)
+ if total_count > 0:
+ message = escape_markdown_v2(f"š No readable archived bots found.\n\n{total_count} databases exist but may be corrupted or incomplete.")
+ else:
+ message = escape_markdown_v2("š No archived bots found.\n\nArchived bots will appear here after you stop a running bot.")
+ keyboard = [[InlineKeyboardButton("š Back", callback_data="bots:main_menu")]]
+
+ if query:
+ await query.message.edit_text(
+ message,
+ parse_mode="MarkdownV2",
+ reply_markup=InlineKeyboardMarkup(keyboard)
+ )
+ return
+
+ # Fetch summaries for current page (with caching)
+ summaries_cache_key = "archived_summaries"
+ all_summaries = get_cached(context.user_data, summaries_cache_key, ARCHIVED_CACHE_TTL)
+ if all_summaries is None:
+ all_summaries = {}
+ for db_path in databases:
+ summary = await fetch_database_summary(client, db_path)
+ if summary:
+ all_summaries[db_path] = summary
+ set_cached(context.user_data, summaries_cache_key, all_summaries)
+
+ context.user_data["archived_summaries"] = all_summaries
+
+ # Pagination
+ total_pages = (len(databases) + BOTS_PER_PAGE - 1) // BOTS_PER_PAGE
+ start_idx = page * BOTS_PER_PAGE
+ end_idx = min(start_idx + BOTS_PER_PAGE, len(databases))
+ page_databases = databases[start_idx:end_idx]
+
+ # Build message
+ lines = [r"*š Archived Bots*", ""]
+
+ for i, db_path in enumerate(page_databases, start=start_idx + 1):
+ summary = all_summaries.get(db_path, {})
+ bot_name = summary.get("bot_name") or _extract_bot_name(db_path)
+ total_trades = summary.get("total_trades", 0)
+ start_time = summary.get("start_time")
+ end_time = summary.get("end_time")
+
+ # Format time range
+ if start_time and end_time:
+ time_range = f"{_format_datetime(start_time)} ā {_format_datetime(end_time)}"
+ else:
+ time_range = "Time unknown"
+
+ lines.append(f"`{i}.` *{escape_markdown_v2(bot_name)}*")
+ lines.append(f" {escape_markdown_v2(time_range)} ⢠{total_trades} trades")
+ lines.append("")
+
+ message = "\n".join(lines)
+
+ # Build keyboard
+ keyboard = []
+
+ # Bot selection buttons - use index into databases list
+ for idx, db_path in enumerate(page_databases):
+ global_idx = start_idx + idx # Global index in full databases list
+ summary = all_summaries.get(db_path, {})
+ bot_name = summary.get("bot_name") or _extract_bot_name(db_path)
+ display_name = bot_name[:25] + "..." if len(bot_name) > 25 else bot_name
+ keyboard.append([
+ InlineKeyboardButton(f"š {display_name}", callback_data=f"bots:archived_select:{global_idx}")
+ ])
+
+ # Pagination row
+ if total_pages > 1:
+ nav_row = []
+ if page > 0:
+ nav_row.append(InlineKeyboardButton("āļø", callback_data=f"bots:archived_page:{page-1}"))
+ nav_row.append(InlineKeyboardButton(f"{page+1}/{total_pages}", callback_data="bots:noop"))
+ if page < total_pages - 1:
+ nav_row.append(InlineKeyboardButton("ā¶ļø", callback_data=f"bots:archived_page:{page+1}"))
+ keyboard.append(nav_row)
+
+ # Action buttons
+ keyboard.append([
+ InlineKeyboardButton("š Timeline", callback_data="bots:archived_timeline"),
+ InlineKeyboardButton("š Refresh", callback_data="bots:archived_refresh"),
+ ])
+ keyboard.append([
+ InlineKeyboardButton("š Back", callback_data="bots:main_menu"),
+ ])
+
+ reply_markup = InlineKeyboardMarkup(keyboard)
+
+ if query:
+ try:
+ await query.message.edit_text(
+ message,
+ parse_mode="MarkdownV2",
+ reply_markup=reply_markup
+ )
+ except BadRequest as e:
+ if "no text in the message" in str(e).lower():
+ await query.message.delete()
+ await context.bot.send_message(
+ chat_id=query.message.chat_id,
+ text=message,
+ parse_mode="MarkdownV2",
+ reply_markup=reply_markup
+ )
+ elif "Message is not modified" not in str(e):
+ raise
+ else:
+ await update.message.reply_text(
+ message,
+ parse_mode="MarkdownV2",
+ reply_markup=reply_markup
+ )
+
+ except Exception as e:
+ logger.error(f"Error showing archived menu: {e}", exc_info=True)
+ error_msg = format_error_message(f"Failed to fetch archived bots: {str(e)}")
+ keyboard = [[InlineKeyboardButton("š Back", callback_data="bots:main_menu")]]
+
+ if query:
+ await query.message.edit_text(
+ error_msg,
+ parse_mode="MarkdownV2",
+ reply_markup=InlineKeyboardMarkup(keyboard)
+ )
+
+
+async def show_archived_detail(update: Update, context: ContextTypes.DEFAULT_TYPE, db_index: int) -> None:
+ """Show detailed view for a specific archived bot."""
+ query = update.callback_query
+ chat_id = update.effective_chat.id
+
+ try:
+ db_path = _get_db_path_by_index(context, db_index)
+ if not db_path:
+ await query.message.reply_text("Bot not found. Please refresh the list.")
+ return
+
+ context.user_data["archived_current_db"] = db_path
+ context.user_data["archived_current_idx"] = db_index
+
+ client = await get_bots_client(chat_id)
+
+ # Fetch summary
+ summary = await fetch_database_summary(client, db_path)
+
+ if not summary:
+ error_msg = format_error_message("Could not fetch bot data")
+ keyboard = [[InlineKeyboardButton("š Back", callback_data="bots:archived")]]
+ await query.message.edit_text(
+ error_msg,
+ parse_mode="MarkdownV2",
+ reply_markup=InlineKeyboardMarkup(keyboard)
+ )
+ return
+
+ # Extract data from summary (actual API structure)
+ bot_name = _extract_bot_name(db_path)
+ total_trades = summary.get("total_trades", 0)
+ total_orders = summary.get("total_orders", 0)
+ trading_pairs = summary.get("trading_pairs", [])
+ exchanges = summary.get("exchanges", [])
+
+ # Fetch trades to calculate PnL and time range
+ # Only fetch first page initially for quick display
+ trades_response = await fetch_database_trades(client, db_path, limit=500, offset=0)
+ trades = trades_response.get("trades", []) if trades_response else []
+
+ # Calculate PnL and time range from trades
+ from .archived_chart import calculate_pnl_from_trades, get_time_range_from_trades
+
+ # For detail view, use first page of trades for quick PnL estimate
+ pnl_data = calculate_pnl_from_trades(trades)
+ total_pnl = pnl_data.get("total_pnl", 0)
+ total_fees = pnl_data.get("total_fees", 0)
+ total_volume = pnl_data.get("total_volume", 0)
+
+ # Get time range from trades
+ start_time, end_time = get_time_range_from_trades(trades)
+
+ # Check if there are more trades (for accurate PnL)
+ pagination = trades_response.get("pagination", {}) if trades_response else {}
+ has_more = pagination.get("has_more", False)
+ pnl_note = " (partial)" if has_more else ""
+
+ # Build message
+ lines = [
+ f"*š {escape_markdown_v2(bot_name)}*",
+ "",
+ ]
+
+ # Time info
+ if start_time and end_time:
+ duration = _format_duration(start_time, end_time)
+ lines.append(f"ā± {escape_markdown_v2(_format_datetime(start_time))} ā {escape_markdown_v2(_format_datetime(end_time))}")
+ lines.append(f" Duration: {escape_markdown_v2(duration)}")
+ lines.append("")
+
+ # PnL - highlight color
+ pnl_emoji = "š" if total_pnl >= 0 else "š"
+ lines.append(f"{pnl_emoji} *PnL:* `{escape_markdown_v2(_format_pnl(total_pnl))}`{escape_markdown_v2(pnl_note)}")
+
+ # Other metrics
+ if total_volume:
+ lines.append(f"š *Volume:* ${escape_markdown_v2(format_number(total_volume))}")
+ if total_fees:
+ lines.append(f"š° *Fees:* ${escape_markdown_v2(format_number(total_fees))}")
+
+ lines.append("")
+ lines.append(f"š *Trades:* {total_trades} ⢠*Orders:* {total_orders}")
+
+ if trading_pairs:
+ lines.append(f"š *Pairs:* {escape_markdown_v2(', '.join(trading_pairs[:5]))}")
+ if exchanges:
+ lines.append(f"š¦ *Exchanges:* {escape_markdown_v2(', '.join(exchanges[:3]))}")
+
+ message = "\n".join(lines)
+
+ # Build keyboard - use index for callbacks
+ keyboard = [
+ [
+ InlineKeyboardButton("š Chart", callback_data=f"bots:archived_chart:{db_index}"),
+ InlineKeyboardButton("š¾ Report", callback_data=f"bots:archived_report:{db_index}"),
+ ],
+ [
+ InlineKeyboardButton("š Back", callback_data="bots:archived"),
+ ],
+ ]
+
+ await query.message.edit_text(
+ message,
+ parse_mode="MarkdownV2",
+ reply_markup=InlineKeyboardMarkup(keyboard)
+ )
+
+ except Exception as e:
+ logger.error(f"Error showing archived detail: {e}", exc_info=True)
+ error_msg = format_error_message(f"Failed to fetch bot details: {str(e)}")
+ keyboard = [[InlineKeyboardButton("š Back", callback_data="bots:archived")]]
+ await query.message.edit_text(
+ error_msg,
+ parse_mode="MarkdownV2",
+ reply_markup=InlineKeyboardMarkup(keyboard)
+ )
+
+
+# ============================================
+# CHART HANDLERS
+# ============================================
+
+async def show_timeline_chart(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
+ """Generate and show timeline chart for all archived bots."""
+ query = update.callback_query
+ chat_id = update.effective_chat.id
+
+ try:
+ # Show loading message
+ loading_msg = await query.message.reply_text(
+ escape_markdown_v2("ā³ Generating timeline... Fetching trade data for all bots."),
+ parse_mode="MarkdownV2"
+ )
+
+ # Get cached data or fetch
+ databases = context.user_data.get("archived_databases", [])
+ summaries = context.user_data.get("archived_summaries", {})
+
+ if not databases:
+ client = await get_bots_client(chat_id)
+ all_databases = await fetch_archived_databases(client)
+ # Filter to only healthy databases
+ databases = await get_healthy_databases(client, all_databases)
+ if not databases:
+ await loading_msg.edit_text(
+ escape_markdown_v2("No healthy archived bots to display."),
+ parse_mode="MarkdownV2"
+ )
+ return
+
+ client = await get_bots_client(chat_id)
+ bots_data = []
+
+ # Import chart functions
+ from .archived_chart import calculate_pnl_from_trades
+
+ for db_path in databases:
+ summary = summaries.get(db_path) or await fetch_database_summary(client, db_path)
+
+ if summary:
+ # Fetch all trades for this bot to calculate accurate PnL
+ trades = await fetch_all_trades(client, db_path)
+
+ # Calculate PnL from trades
+ pnl_data = calculate_pnl_from_trades(trades)
+
+ bots_data.append({
+ "db_path": db_path,
+ "summary": summary,
+ "trades": trades,
+ "pnl_data": pnl_data,
+ })
+
+ if not bots_data:
+ await loading_msg.edit_text(
+ escape_markdown_v2("No data available for timeline chart."),
+ parse_mode="MarkdownV2"
+ )
+ return
+
+ # Import chart generation
+ from .archived_chart import generate_timeline_chart
+
+ chart_bytes = generate_timeline_chart(bots_data)
+
+ if chart_bytes:
+ # Calculate total PnL from all bots
+ total_pnl = sum(b.get("pnl_data", {}).get("total_pnl", 0) for b in bots_data)
+
+ caption = (
+ f"š *Archived Bots Timeline*\n"
+ f"Total: {len(bots_data)} bots ⢠PnL: `{escape_markdown_v2(_format_pnl(total_pnl))}`"
+ )
+
+ keyboard = [[InlineKeyboardButton("š Back", callback_data="bots:archived")]]
+
+ # Delete loading message and send chart
+ await loading_msg.delete()
+
+ await context.bot.send_photo(
+ chat_id=chat_id,
+ photo=chart_bytes,
+ caption=caption,
+ parse_mode="MarkdownV2",
+ reply_markup=InlineKeyboardMarkup(keyboard)
+ )
+ else:
+ await loading_msg.edit_text(
+ escape_markdown_v2("Failed to generate timeline chart."),
+ parse_mode="MarkdownV2"
+ )
+
+ except Exception as e:
+ logger.error(f"Error generating timeline chart: {e}", exc_info=True)
+ error_msg = format_error_message(f"Failed to generate chart: {str(e)}")
+ await query.message.reply_text(error_msg, parse_mode="MarkdownV2")
+
+
+async def show_bot_chart(update: Update, context: ContextTypes.DEFAULT_TYPE, db_index: int) -> None:
+ """Generate and show performance chart for a specific bot."""
+ query = update.callback_query
+ chat_id = update.effective_chat.id
+
+ try:
+ db_path = _get_db_path_by_index(context, db_index)
+ if not db_path:
+ await query.message.reply_text("Bot not found. Please refresh the list.")
+ return
+
+ # Show loading message
+ loading_msg = await query.message.reply_text(
+ escape_markdown_v2("ā³ Generating chart... Fetching all trade data."),
+ parse_mode="MarkdownV2"
+ )
+
+ client = await get_bots_client(chat_id)
+
+ # Fetch summary and ALL trades
+ summary = await fetch_database_summary(client, db_path)
+ trades = await fetch_all_trades(client, db_path)
+
+ if not summary:
+ await loading_msg.edit_text(
+ escape_markdown_v2("Could not fetch bot data for chart."),
+ parse_mode="MarkdownV2"
+ )
+ return
+
+ # Import chart generation
+ from .archived_chart import generate_performance_chart, calculate_pnl_from_trades
+
+ # Calculate PnL from trades
+ pnl_data = calculate_pnl_from_trades(trades)
+ total_pnl = pnl_data.get("total_pnl", 0)
+
+ # Generate chart (pass db_path for bot name extraction)
+ chart_bytes = generate_performance_chart(summary, None, trades, db_path=db_path)
+
+ if chart_bytes:
+ bot_name = _extract_bot_name(db_path)
+
+ caption = (
+ f"š *{escape_markdown_v2(bot_name)}*\n"
+ f"PnL: `{escape_markdown_v2(_format_pnl(total_pnl))}` ⢠"
+ f"Trades: {len(trades)}"
+ )
+
+ keyboard = [[
+ InlineKeyboardButton("š¾ Report", callback_data=f"bots:archived_report:{db_index}"),
+ InlineKeyboardButton("š Back", callback_data=f"bots:archived_select:{db_index}"),
+ ]]
+
+ # Delete loading message and send chart
+ await loading_msg.delete()
+
+ await context.bot.send_photo(
+ chat_id=chat_id,
+ photo=chart_bytes,
+ caption=caption,
+ parse_mode="MarkdownV2",
+ reply_markup=InlineKeyboardMarkup(keyboard)
+ )
+ else:
+ await loading_msg.edit_text(
+ escape_markdown_v2("Failed to generate performance chart."),
+ parse_mode="MarkdownV2"
+ )
+
+ except Exception as e:
+ logger.error(f"Error generating bot chart: {e}", exc_info=True)
+ error_msg = format_error_message(f"Failed to generate chart: {str(e)}")
+ await query.message.reply_text(error_msg, parse_mode="MarkdownV2")
+
+
+# ============================================
+# REPORT GENERATION
+# ============================================
+
+async def handle_generate_report(update: Update, context: ContextTypes.DEFAULT_TYPE, db_index: int) -> None:
+ """Generate and save a full report for a specific bot."""
+ query = update.callback_query
+ chat_id = update.effective_chat.id
+
+ try:
+ db_path = _get_db_path_by_index(context, db_index)
+ if not db_path:
+ await query.message.reply_text("Bot not found. Please refresh the list.")
+ return
+
+ # Show progress message
+ progress_msg = await query.message.reply_text(
+ escape_markdown_v2("ā³ Generating report... This may take a moment."),
+ parse_mode="MarkdownV2"
+ )
+
+ # Import report generation
+ from .archived_report import save_full_report
+
+ client = await get_bots_client(chat_id)
+ json_path, png_path = await save_full_report(client, db_path)
+
+ # Update message with success
+ if json_path:
+ lines = [
+ "ā
*Report Generated\\!*",
+ "",
+ f"š JSON: `{escape_markdown_v2(json_path)}`",
+ ]
+ if png_path:
+ lines.append(f"š Chart: `{escape_markdown_v2(png_path)}`")
+
+ message = "\n".join(lines)
+
+ keyboard = [[InlineKeyboardButton("š Back", callback_data=f"bots:archived_select:{db_index}")]]
+
+ await progress_msg.edit_text(
+ message,
+ parse_mode="MarkdownV2",
+ reply_markup=InlineKeyboardMarkup(keyboard)
+ )
+ else:
+ await progress_msg.edit_text(
+ escape_markdown_v2("ā Failed to generate report."),
+ parse_mode="MarkdownV2"
+ )
+
+ except Exception as e:
+ logger.error(f"Error generating report: {e}", exc_info=True)
+ error_msg = format_error_message(f"Failed to generate report: {str(e)}")
+ await query.message.reply_text(error_msg, parse_mode="MarkdownV2")
+
+
+# ============================================
+# REFRESH HANDLER
+# ============================================
+
+async def handle_archived_refresh(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
+ """Refresh archived bots data."""
+ # Clear cache
+ context.user_data.pop("_bots_cache", None)
+ clear_archived_state(context)
+
+ # Re-show menu
+ await show_archived_menu(update, context, page=0)
diff --git a/handlers/bots/archived_chart.py b/handlers/bots/archived_chart.py
new file mode 100644
index 0000000..aec5608
--- /dev/null
+++ b/handlers/bots/archived_chart.py
@@ -0,0 +1,695 @@
+"""
+Archived Bots Chart Generation
+
+Provides:
+- Timeline chart (Gantt-style) showing all bots with PnL-colored bars
+- Performance chart for individual bots with cumulative PnL
+- PnL calculation from trade data (OPEN/CLOSE positions)
+"""
+
+import io
+import logging
+import os
+from collections import defaultdict
+from typing import List, Dict, Any, Optional, Tuple
+from datetime import datetime
+
+logger = logging.getLogger(__name__)
+
+# Reuse the dark theme from visualizations
+DARK_THEME = {
+ "bgcolor": "#0a0e14",
+ "paper_bgcolor": "#0a0e14",
+ "plot_bgcolor": "#131720",
+ "font_color": "#e6edf3",
+ "font_family": "'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', sans-serif",
+ "grid_color": "#21262d",
+ "axis_color": "#8b949e",
+ "up_color": "#10b981", # Green for profit
+ "down_color": "#ef4444", # Red for loss
+ "neutral_color": "#6b7280", # Gray for zero
+}
+
+
+def _parse_timestamp(ts) -> Optional[datetime]:
+ """Parse timestamp from various formats including milliseconds."""
+ if ts is None:
+ return None
+
+ try:
+ # Handle millisecond timestamp (integer or float)
+ if isinstance(ts, (int, float)):
+ # If timestamp > 1e12, it's milliseconds
+ if ts > 1e12:
+ ts = ts / 1000
+ return datetime.fromtimestamp(ts)
+
+ if isinstance(ts, datetime):
+ return ts
+
+ if hasattr(ts, 'to_pydatetime'): # pandas Timestamp
+ return ts.to_pydatetime()
+
+ if isinstance(ts, str) and ts:
+ # Try parsing as ISO format
+ if "T" in ts:
+ return datetime.fromisoformat(ts.replace("Z", "+00:00"))
+ else:
+ return datetime.fromisoformat(ts)
+ except Exception as e:
+ logger.debug(f"Failed to parse timestamp {ts}: {e}")
+
+ return None
+
+
+def _format_pnl(pnl: float) -> str:
+ """Format PnL for display on chart."""
+ if pnl >= 0:
+ return f"+${pnl:,.2f}"
+ else:
+ return f"-${abs(pnl):,.2f}"
+
+
+def _get_pnl_color(pnl: float) -> str:
+ """Get color based on PnL value."""
+ if pnl > 0:
+ return DARK_THEME["up_color"]
+ elif pnl < 0:
+ return DARK_THEME["down_color"]
+ return DARK_THEME["neutral_color"]
+
+
+def _extract_bot_name(db_path: str) -> str:
+ """Extract readable bot name from database path."""
+ # db_path: "bots/archived/trend_follower_grid-20251015-155015/data/trend_follower_grid-20251015-155015.sqlite"
+ name = os.path.basename(db_path)
+ if name.endswith(".sqlite"):
+ name = name[:-7]
+ elif name.endswith(".db"):
+ name = name[:-3]
+ return name
+
+
+def calculate_pnl_from_trades(trades: List[Dict[str, Any]]) -> Dict[str, Any]:
+ """
+ Calculate realized PnL from a list of trades using position tracking.
+
+ For perpetual futures:
+ - OPEN trades establish positions (long or short)
+ - CLOSE trades realize PnL
+
+ Args:
+ trades: List of trade dicts with timestamp, trading_pair, trade_type,
+ position, price, amount, trade_fee_in_quote
+
+ Returns:
+ Dict with:
+ - total_pnl: Total realized PnL
+ - total_fees: Total fees paid
+ - pnl_by_pair: Dict mapping trading_pair to PnL
+ - cumulative_pnl: List of (timestamp, pnl) for charting
+ - total_volume: Total traded volume in quote
+ """
+ if not trades:
+ return {
+ "total_pnl": 0,
+ "total_fees": 0,
+ "pnl_by_pair": {},
+ "cumulative_pnl": [],
+ "total_volume": 0,
+ }
+
+ # Track positions per trading pair
+ # position = {amount: float, total_cost: float, direction: int (1=long, -1=short)}
+ positions: Dict[str, Dict[str, Any]] = {}
+
+ pnl_by_pair: Dict[str, float] = defaultdict(float)
+ cumulative_pnl: List[Dict[str, Any]] = []
+ running_pnl = 0.0
+ total_fees = 0.0
+ total_volume = 0.0
+
+ # Debug counters
+ open_count = 0
+ close_count = 0
+ close_with_position = 0
+
+ # Sort trades by timestamp
+ sorted_trades = sorted(trades, key=lambda t: t.get("timestamp", 0))
+
+ for trade in sorted_trades:
+ pair = trade.get("trading_pair", "Unknown")
+ amount = float(trade.get("amount", 0))
+ price = float(trade.get("price", 0))
+ trade_type = trade.get("trade_type", "").upper() # BUY or SELL
+ position_action = trade.get("position", "").upper() # OPEN or CLOSE
+ fee = float(trade.get("trade_fee_in_quote", 0))
+ timestamp = trade.get("timestamp", 0)
+
+ total_fees += fee
+ total_volume += amount * price
+
+ # Parse timestamp for cumulative chart
+ ts = _parse_timestamp(timestamp)
+
+ if position_action == "OPEN":
+ open_count += 1
+ # Opening a new position or adding to existing
+ if pair not in positions:
+ positions[pair] = {"amount": 0, "total_cost": 0, "direction": 0}
+
+ pos = positions[pair]
+
+ if trade_type == "BUY":
+ # Opening/adding to long position
+ pos["amount"] += amount
+ pos["total_cost"] += price * amount
+ pos["direction"] = 1
+ else: # SELL
+ # Opening/adding to short position
+ pos["amount"] += amount
+ pos["total_cost"] += price * amount
+ pos["direction"] = -1
+
+ elif position_action == "CLOSE":
+ close_count += 1
+ # Closing a position - realize PnL
+ pos = positions.get(pair)
+
+ if pos and pos["amount"] > 0:
+ close_with_position += 1
+ # Calculate average entry price
+ avg_entry = pos["total_cost"] / pos["amount"]
+
+ if trade_type == "SELL":
+ # Closing long: PnL = (exit - entry) * amount
+ pnl = (price - avg_entry) * amount
+ else: # BUY
+ # Closing short: PnL = (entry - exit) * amount
+ pnl = (avg_entry - price) * amount
+
+ # Subtract fee from PnL
+ pnl -= fee
+
+ pnl_by_pair[pair] += pnl
+ running_pnl += pnl
+
+ # Update position
+ if amount >= pos["amount"]:
+ # Fully closed
+ del positions[pair]
+ else:
+ # Partially closed
+ close_ratio = amount / pos["amount"]
+ pos["amount"] -= amount
+ pos["total_cost"] -= pos["total_cost"] * close_ratio
+
+ # Record cumulative PnL point for charting
+ if ts:
+ cumulative_pnl.append({
+ "timestamp": ts,
+ "pnl": running_pnl,
+ "pair": pair,
+ })
+
+ logger.info(f"PnL calculation: {len(trades)} trades, {open_count} OPEN, {close_count} CLOSE, "
+ f"{close_with_position} CLOSE with matching position, total_pnl=${running_pnl:.4f}")
+
+ return {
+ "total_pnl": running_pnl,
+ "total_fees": total_fees,
+ "pnl_by_pair": dict(pnl_by_pair),
+ "cumulative_pnl": cumulative_pnl,
+ "total_volume": total_volume,
+ }
+
+
+def get_time_range_from_trades(trades: List[Dict[str, Any]]) -> Tuple[Optional[datetime], Optional[datetime]]:
+ """Extract start and end time from trades list."""
+ if not trades:
+ return None, None
+
+ timestamps = []
+ for trade in trades:
+ ts = _parse_timestamp(trade.get("timestamp"))
+ if ts:
+ timestamps.append(ts)
+
+ if not timestamps:
+ return None, None
+
+ return min(timestamps), max(timestamps)
+
+
+def generate_timeline_chart(
+ bots_data: List[Dict[str, Any]],
+ width: int = 1100,
+ height: int = 600,
+) -> Optional[io.BytesIO]:
+ """
+ Generate a Gantt-style timeline chart showing all archived bots.
+
+ Args:
+ bots_data: List of dicts with 'db_path', 'summary', 'trades', 'pnl_data' for each bot
+ width: Chart width in pixels
+ height: Chart height in pixels
+
+ Returns:
+ BytesIO buffer with PNG image or None if failed
+ """
+ try:
+ import plotly.graph_objects as go
+
+ if not bots_data:
+ return None
+
+ # Process bot data
+ processed = []
+ for bot in bots_data:
+ db_path = bot.get("db_path", "")
+ summary = bot.get("summary", {})
+ trades = bot.get("trades", [])
+ pnl_data = bot.get("pnl_data", {})
+
+ # Get bot name from summary or db_path
+ bot_name = summary.get("bot_name") or _extract_bot_name(db_path)
+
+ # Get time range from trades
+ start_time, end_time = get_time_range_from_trades(trades)
+
+ # Get PnL - prefer pre-calculated
+ total_pnl = pnl_data.get("total_pnl", 0) if pnl_data else 0
+
+ if start_time and end_time:
+ # Remove timezone for consistency
+ if start_time.tzinfo:
+ start_time = start_time.replace(tzinfo=None)
+ if end_time.tzinfo:
+ end_time = end_time.replace(tzinfo=None)
+
+ processed.append({
+ "name": bot_name,
+ "start": start_time,
+ "end": end_time,
+ "pnl": total_pnl,
+ "color": _get_pnl_color(total_pnl),
+ "trades": summary.get("total_trades", len(trades)),
+ })
+
+ if not processed:
+ logger.warning("No bots with valid time data for timeline")
+ return None
+
+ # Sort by start time
+ processed.sort(key=lambda x: x["start"])
+
+ # Create figure
+ fig = go.Figure()
+
+ # Add bars for each bot
+ for i, bot in enumerate(processed):
+ # Calculate duration in hours for bar width
+ duration = (bot["end"] - bot["start"]).total_seconds() / 3600
+
+ # Create the bar using a horizontal bar chart approach
+ fig.add_trace(go.Bar(
+ y=[bot["name"]],
+ x=[duration],
+ base=[bot["start"]],
+ orientation='h',
+ marker_color=bot["color"],
+ marker_line_width=0,
+ text=f'{_format_pnl(bot["pnl"])}',
+ textposition="inside",
+ textfont=dict(color="white", size=11),
+ hovertemplate=(
+ f"{bot['name']}
"
+ f"Start: {bot['start'].strftime('%b %d %H:%M')}
"
+ f"End: {bot['end'].strftime('%b %d %H:%M')}
"
+ f"PnL: {_format_pnl(bot['pnl'])}
"
+ f"Trades: {bot['trades']}"
+ ),
+ showlegend=False,
+ ))
+
+ # Calculate totals for subtitle
+ total_pnl = sum(b["pnl"] for b in processed)
+ total_trades = sum(b["trades"] for b in processed)
+
+ # Update layout
+ fig.update_layout(
+ title=dict(
+ text=f"Archived Bots Timeline
{len(processed)} bots | Total PnL: {_format_pnl(total_pnl)} | {total_trades} trades",
+ x=0.5,
+ font=dict(size=16, color=DARK_THEME["font_color"]),
+ ),
+ paper_bgcolor=DARK_THEME["paper_bgcolor"],
+ plot_bgcolor=DARK_THEME["plot_bgcolor"],
+ font=dict(
+ family=DARK_THEME["font_family"],
+ color=DARK_THEME["font_color"],
+ ),
+ xaxis=dict(
+ type="date",
+ showgrid=True,
+ gridcolor=DARK_THEME["grid_color"],
+ tickformat="%b %d",
+ tickfont=dict(color=DARK_THEME["axis_color"]),
+ ),
+ yaxis=dict(
+ showgrid=False,
+ tickfont=dict(color=DARK_THEME["axis_color"]),
+ autorange="reversed", # First bot at top
+ ),
+ barmode="overlay",
+ bargap=0.3,
+ margin=dict(l=150, r=30, t=80, b=50),
+ height=max(height, 100 + len(processed) * 40), # Dynamic height based on number of bots
+ width=width,
+ )
+
+ # Export to PNG
+ img_bytes = io.BytesIO()
+ fig.write_image(img_bytes, format='png', scale=2)
+ img_bytes.seek(0)
+
+ return img_bytes
+
+ except ImportError as e:
+ logger.error(f"Missing required package for chart generation: {e}")
+ return None
+ except Exception as e:
+ logger.error(f"Error generating timeline chart: {e}", exc_info=True)
+ return None
+
+
+def generate_performance_chart(
+ summary: Dict[str, Any],
+ performance: Optional[Dict[str, Any]],
+ trades: List[Dict[str, Any]],
+ db_path: str = "",
+ width: int = 1100,
+ height: int = 600,
+) -> Optional[io.BytesIO]:
+ """
+ Generate a performance chart for a single bot showing cumulative PnL.
+
+ Args:
+ summary: BotSummary data
+ performance: BotPerformanceResponse data (often None or incomplete)
+ trades: List of TradeDetail objects
+ db_path: Database path for extracting bot name
+ width: Chart width in pixels
+ height: Chart height in pixels
+
+ Returns:
+ BytesIO buffer with PNG image or None if failed
+ """
+ try:
+ import plotly.graph_objects as go
+ from plotly.subplots import make_subplots
+
+ bot_name = summary.get("bot_name") or _extract_bot_name(db_path)
+
+ # Calculate PnL from trades
+ pnl_data = calculate_pnl_from_trades(trades)
+ cumulative_pnl = pnl_data.get("cumulative_pnl", [])
+ pnl_by_pair = pnl_data.get("pnl_by_pair", {})
+ total_pnl = pnl_data.get("total_pnl", 0)
+ total_fees = pnl_data.get("total_fees", 0)
+ total_volume = pnl_data.get("total_volume", 0)
+
+ # Create figure with subplots
+ fig = make_subplots(
+ rows=2, cols=2,
+ specs=[
+ [{"colspan": 2}, None],
+ [{"type": "bar"}, {"type": "pie"}],
+ ],
+ subplot_titles=("Cumulative PnL Over Time", "PnL by Trading Pair", "Trade Distribution"),
+ vertical_spacing=0.15,
+ horizontal_spacing=0.1,
+ row_heights=[0.6, 0.4],
+ )
+
+ # Panel 1: Cumulative PnL line chart
+ if cumulative_pnl:
+ timestamps = [p["timestamp"] for p in cumulative_pnl]
+ pnl_values = [p["pnl"] for p in cumulative_pnl]
+
+ line_color = _get_pnl_color(total_pnl)
+
+ fig.add_trace(
+ go.Scatter(
+ x=timestamps,
+ y=pnl_values,
+ mode='lines',
+ name='Net PnL',
+ line=dict(color=line_color, width=2),
+ fill='tozeroy',
+ fillcolor=f'rgba{tuple(list(int(line_color.lstrip("#")[i:i+2], 16) for i in (0, 2, 4)) + [0.15])}',
+ hovertemplate="%{x|%b %d %H:%M}
PnL: $%{y:,.4f}",
+ ),
+ row=1, col=1
+ )
+
+ # Add zero line
+ fig.add_hline(y=0, line_dash="dash", line_color=DARK_THEME["axis_color"], opacity=0.5, row=1, col=1)
+
+ # Panel 2: PnL by trading pair (bar chart)
+ if pnl_by_pair:
+ # Sort by absolute PnL
+ sorted_pairs = sorted(pnl_by_pair.items(), key=lambda x: abs(x[1]), reverse=True)[:8]
+ pairs = [p[0] for p in sorted_pairs]
+ pnls = [p[1] for p in sorted_pairs]
+ colors = [_get_pnl_color(p) for p in pnls]
+
+ fig.add_trace(
+ go.Bar(
+ x=pairs,
+ y=pnls,
+ marker_color=colors,
+ showlegend=False,
+ hovertemplate="%{x}
PnL: $%{y:,.2f}",
+ ),
+ row=2, col=1
+ )
+
+ # Panel 3: Trade type distribution (pie chart)
+ buy_count = sum(1 for t in trades if t.get("trade_type", "").upper() == "BUY")
+ sell_count = len(trades) - buy_count
+
+ if trades:
+ fig.add_trace(
+ go.Pie(
+ labels=["Buy", "Sell"],
+ values=[buy_count, sell_count],
+ marker_colors=[DARK_THEME["up_color"], DARK_THEME["down_color"]],
+ hole=0.4,
+ textinfo="label+percent",
+ textfont=dict(color="white"),
+ showlegend=False,
+ ),
+ row=2, col=2
+ )
+
+ # Get time range
+ start_time, end_time = get_time_range_from_trades(trades)
+ time_info = ""
+ if start_time and end_time:
+ time_info = f" | {start_time.strftime('%b %d')} - {end_time.strftime('%b %d %H:%M')}"
+
+ # Update layout
+ fig.update_layout(
+ title=dict(
+ text=(
+ f"{bot_name}
"
+ f"PnL: {_format_pnl(total_pnl)} | "
+ f"Volume: ${total_volume:,.0f} | "
+ f"Fees: ${total_fees:,.2f} | "
+ f"Trades: {len(trades)}{time_info}"
+ ),
+ x=0.5,
+ font=dict(size=16, color=DARK_THEME["font_color"]),
+ ),
+ paper_bgcolor=DARK_THEME["paper_bgcolor"],
+ plot_bgcolor=DARK_THEME["plot_bgcolor"],
+ font=dict(family=DARK_THEME["font_family"], color=DARK_THEME["font_color"]),
+ margin=dict(l=70, r=30, t=80, b=50),
+ height=height,
+ width=width,
+ )
+
+ # Update axes styling
+ fig.update_xaxes(showgrid=True, gridcolor=DARK_THEME["grid_color"], tickfont=dict(color=DARK_THEME["axis_color"]))
+ fig.update_yaxes(showgrid=True, gridcolor=DARK_THEME["grid_color"], tickfont=dict(color=DARK_THEME["axis_color"]))
+
+ # Export to PNG
+ img_bytes = io.BytesIO()
+ fig.write_image(img_bytes, format='png', scale=2)
+ img_bytes.seek(0)
+
+ return img_bytes
+
+ except ImportError as e:
+ logger.error(f"Missing required package for chart generation: {e}")
+ return None
+ except Exception as e:
+ logger.error(f"Error generating performance chart: {e}", exc_info=True)
+ return None
+
+
+def generate_report_chart(
+ summary: Dict[str, Any],
+ performance: Optional[Dict[str, Any]],
+ trades: List[Dict[str, Any]],
+ executors: List[Dict[str, Any]],
+ db_path: str = "",
+ width: int = 1400,
+ height: int = 800,
+) -> Optional[io.BytesIO]:
+ """
+ Generate a comprehensive report chart with multiple panels.
+
+ Args:
+ summary: BotSummary data
+ performance: BotPerformanceResponse data
+ trades: List of TradeDetail objects
+ executors: List of ExecutorInfo objects
+ db_path: Database path for extracting bot name
+ width: Chart width in pixels
+ height: Chart height in pixels
+
+ Returns:
+ BytesIO buffer with PNG image or None if failed
+ """
+ try:
+ import plotly.graph_objects as go
+ from plotly.subplots import make_subplots
+
+ bot_name = summary.get("bot_name") or _extract_bot_name(db_path)
+
+ # Calculate PnL from trades
+ pnl_data = calculate_pnl_from_trades(trades)
+ cumulative_pnl = pnl_data.get("cumulative_pnl", [])
+ pnl_by_pair = pnl_data.get("pnl_by_pair", {})
+ total_pnl = pnl_data.get("total_pnl", 0)
+ total_fees = pnl_data.get("total_fees", 0)
+ total_volume = pnl_data.get("total_volume", 0)
+
+ # Create 2x2 subplot layout
+ fig = make_subplots(
+ rows=2, cols=2,
+ specs=[
+ [{"type": "scatter"}, {"type": "bar"}],
+ [{"type": "bar", "colspan": 2}, None],
+ ],
+ subplot_titles=(
+ "Cumulative PnL",
+ "PnL by Trading Pair",
+ "Volume by Market",
+ ),
+ vertical_spacing=0.15,
+ horizontal_spacing=0.1,
+ )
+
+ # Panel 1: Cumulative PnL
+ if cumulative_pnl:
+ timestamps = [p["timestamp"] for p in cumulative_pnl]
+ pnl_values = [p["pnl"] for p in cumulative_pnl]
+
+ line_color = _get_pnl_color(total_pnl)
+
+ fig.add_trace(
+ go.Scatter(
+ x=timestamps,
+ y=pnl_values,
+ mode='lines',
+ line=dict(color=line_color, width=2),
+ fill='tozeroy',
+ fillcolor=f'rgba{tuple(list(int(line_color.lstrip("#")[i:i+2], 16) for i in (0, 2, 4)) + [0.15])}',
+ showlegend=False,
+ ),
+ row=1, col=1
+ )
+
+ # Panel 2: PnL by trading pair
+ if pnl_by_pair:
+ sorted_pairs = sorted(pnl_by_pair.items(), key=lambda x: abs(x[1]), reverse=True)[:8]
+ pairs = [p[0] for p in sorted_pairs]
+ pnls = [p[1] for p in sorted_pairs]
+ colors = [_get_pnl_color(p) for p in pnls]
+
+ fig.add_trace(
+ go.Bar(
+ x=pairs,
+ y=pnls,
+ marker_color=colors,
+ showlegend=False,
+ ),
+ row=1, col=2
+ )
+
+ # Panel 3: Volume by market (bar chart)
+ market_volume: Dict[str, float] = {}
+ for trade in trades:
+ pair = trade.get("trading_pair", "Unknown")
+ amount = float(trade.get("amount", 0))
+ price = float(trade.get("price", 0))
+ volume = amount * price
+ market_volume[pair] = market_volume.get(pair, 0) + volume
+
+ if market_volume:
+ sorted_markets = sorted(market_volume.items(), key=lambda x: x[1], reverse=True)[:10]
+ markets = [m[0] for m in sorted_markets]
+ volumes = [m[1] for m in sorted_markets]
+
+ fig.add_trace(
+ go.Bar(
+ x=markets,
+ y=volumes,
+ marker_color=DARK_THEME["up_color"],
+ showlegend=False,
+ ),
+ row=2, col=1
+ )
+
+ # Update layout
+ fig.update_layout(
+ title=dict(
+ text=(
+ f"{bot_name} Report
"
+ f"PnL: {_format_pnl(total_pnl)} | Volume: ${total_volume:,.0f} | "
+ f"Fees: ${total_fees:,.2f} | Trades: {len(trades)}"
+ ),
+ x=0.5,
+ font=dict(size=18, color=DARK_THEME["font_color"]),
+ ),
+ paper_bgcolor=DARK_THEME["paper_bgcolor"],
+ plot_bgcolor=DARK_THEME["plot_bgcolor"],
+ font=dict(
+ family=DARK_THEME["font_family"],
+ color=DARK_THEME["font_color"],
+ ),
+ margin=dict(l=60, r=30, t=100, b=50),
+ height=height,
+ width=width,
+ )
+
+ # Update subplot axes
+ fig.update_xaxes(showgrid=True, gridcolor=DARK_THEME["grid_color"])
+ fig.update_yaxes(showgrid=True, gridcolor=DARK_THEME["grid_color"])
+
+ # Export to PNG
+ img_bytes = io.BytesIO()
+ fig.write_image(img_bytes, format='png', scale=2)
+ img_bytes.seek(0)
+
+ return img_bytes
+
+ except ImportError as e:
+ logger.error(f"Missing required package for chart generation: {e}")
+ return None
+ except Exception as e:
+ logger.error(f"Error generating report chart: {e}", exc_info=True)
+ return None
diff --git a/handlers/bots/archived_report.py b/handlers/bots/archived_report.py
new file mode 100644
index 0000000..283aca3
--- /dev/null
+++ b/handlers/bots/archived_report.py
@@ -0,0 +1,359 @@
+"""
+Archived Bots Report Generation
+
+Saves comprehensive reports to local filesystem:
+- JSON file with all bot data
+- PNG chart file with performance visualization
+"""
+
+import json
+import logging
+import os
+from datetime import datetime
+from pathlib import Path
+from typing import Dict, Any, Optional, Tuple, List
+
+logger = logging.getLogger(__name__)
+
+# Reports directory in project root
+REPORTS_DIR = Path("reports")
+
+
+def ensure_reports_dir() -> Path:
+ """Create reports directory if it doesn't exist."""
+ REPORTS_DIR.mkdir(exist_ok=True)
+ return REPORTS_DIR
+
+
+def _extract_bot_name(db_path: str) -> str:
+ """Extract readable bot name from database path."""
+ name = os.path.basename(db_path)
+ if name.endswith(".sqlite"):
+ name = name[:-7]
+ elif name.endswith(".db"):
+ name = name[:-3]
+ return name
+
+
+def generate_report_filename(db_path: str) -> str:
+ """
+ Generate a unique filename for the report.
+
+ Format: {bot_name}_{YYYYMMDD_HHMMSS}
+ """
+ # Extract bot name from db_path
+ bot_name = _extract_bot_name(db_path)
+ # Sanitize bot name for filename
+ safe_name = "".join(c if c.isalnum() or c in "-_" else "_" for c in bot_name)
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+ return f"{safe_name}_{timestamp}"
+
+
+def _serialize_datetime(obj):
+ """JSON serializer for datetime objects."""
+ if isinstance(obj, datetime):
+ return obj.isoformat()
+ raise TypeError(f"Object of type {type(obj)} is not JSON serializable")
+
+
+def build_report_json(
+ db_path: str,
+ summary: Dict[str, Any],
+ performance: Optional[Dict[str, Any]],
+ trades: List[Dict[str, Any]],
+ orders: List[Dict[str, Any]],
+ executors: List[Dict[str, Any]],
+ pnl_data: Optional[Dict[str, Any]] = None,
+) -> Dict[str, Any]:
+ """
+ Build the complete JSON report structure.
+
+ Args:
+ db_path: Path to the database file
+ summary: BotSummary data
+ performance: BotPerformanceResponse data
+ trades: List of TradeDetail objects
+ orders: List of OrderDetail objects
+ executors: List of ExecutorInfo objects
+ pnl_data: Calculated PnL data from trades
+
+ Returns:
+ Complete report dictionary
+ """
+ # Calculate time range from trades
+ start_time = None
+ end_time = None
+ if trades:
+ timestamps = [t.get("timestamp") for t in trades if t.get("timestamp")]
+ if timestamps:
+ # Convert milliseconds to ISO format
+ min_ts = min(timestamps)
+ max_ts = max(timestamps)
+ if min_ts > 1e12:
+ min_ts = min_ts / 1000
+ if max_ts > 1e12:
+ max_ts = max_ts / 1000
+ start_time = datetime.fromtimestamp(min_ts).isoformat()
+ end_time = datetime.fromtimestamp(max_ts).isoformat()
+
+ return {
+ "metadata": {
+ "generated_at": datetime.now().isoformat(),
+ "db_path": db_path,
+ "bot_name": _extract_bot_name(db_path),
+ "generator": "condor",
+ "version": "1.1.0",
+ },
+ "summary": summary,
+ "calculated_pnl": pnl_data or {},
+ "period": {
+ "start": start_time,
+ "end": end_time,
+ },
+ "trades": trades,
+ "orders": orders,
+ "executors": executors,
+ "statistics": {
+ "total_trades": len(trades),
+ "total_orders": len(orders),
+ "total_executors": len(executors),
+ },
+ }
+
+
+async def save_full_report(
+ client,
+ db_path: str,
+ include_chart: bool = True
+) -> Tuple[Optional[str], Optional[str]]:
+ """
+ Fetch all data and save comprehensive report.
+
+ Args:
+ client: API client with archived_bots router
+ db_path: Path to the database file
+ include_chart: Whether to generate and save a PNG chart
+
+ Returns:
+ Tuple of (json_path, png_path) or (json_path, None) if no chart
+ """
+ try:
+ # Ensure reports directory exists
+ ensure_reports_dir()
+
+ # Fetch all data
+ logger.info(f"Fetching data for report: {db_path}")
+
+ summary = await client.archived_bots.get_database_summary(db_path)
+ if not summary:
+ logger.error(f"Could not fetch summary for {db_path}")
+ return None, None
+
+ # Performance may fail for some databases - continue anyway
+ try:
+ performance = await client.archived_bots.get_database_performance(db_path)
+ except Exception as e:
+ logger.warning(f"Could not fetch performance for {db_path}: {e}")
+ performance = None
+
+ # Fetch trades with pagination (get all)
+ all_trades = []
+ offset = 0
+ limit = 500
+ while True:
+ trades_response = await client.archived_bots.get_database_trades(
+ db_path, limit=limit, offset=offset
+ )
+ if not trades_response:
+ break
+ trades = trades_response.get("trades", [])
+ if not trades:
+ break
+ all_trades.extend(trades)
+ if len(trades) < limit:
+ break
+ offset += limit
+ logger.info(f"Fetched {len(all_trades)} trades")
+
+ # Fetch orders with pagination
+ all_orders = []
+ offset = 0
+ while True:
+ orders_response = await client.archived_bots.get_database_orders(
+ db_path, limit=limit, offset=offset
+ )
+ if not orders_response:
+ break
+ orders = orders_response.get("orders", [])
+ if not orders:
+ break
+ all_orders.extend(orders)
+ if len(orders) < limit:
+ break
+ offset += limit
+ logger.info(f"Fetched {len(all_orders)} orders")
+
+ # Fetch executors
+ executors_response = await client.archived_bots.get_database_executors(db_path)
+ executors = executors_response.get("executors", []) if executors_response else []
+ logger.info(f"Fetched {len(executors)} executors")
+
+ # Calculate PnL from trades
+ from .archived_chart import calculate_pnl_from_trades
+ pnl_data = calculate_pnl_from_trades(all_trades)
+ logger.info(f"Calculated PnL: ${pnl_data.get('total_pnl', 0):.2f}")
+
+ # Build report
+ filename = generate_report_filename(db_path)
+
+ report_data = build_report_json(
+ db_path=db_path,
+ summary=summary,
+ performance=performance,
+ trades=all_trades,
+ orders=all_orders,
+ executors=executors,
+ pnl_data=pnl_data,
+ )
+
+ # Save JSON
+ json_path = REPORTS_DIR / f"{filename}.json"
+ with open(json_path, "w", encoding="utf-8") as f:
+ json.dump(report_data, f, indent=2, default=_serialize_datetime)
+ logger.info(f"Saved JSON report to {json_path}")
+
+ # Generate and save chart
+ png_path = None
+ if include_chart:
+ try:
+ from .archived_chart import generate_report_chart
+
+ chart_bytes = generate_report_chart(
+ summary=summary,
+ performance=performance,
+ trades=all_trades,
+ executors=executors,
+ db_path=db_path,
+ )
+
+ if chart_bytes:
+ png_path = REPORTS_DIR / f"{filename}.png"
+ with open(png_path, "wb") as f:
+ f.write(chart_bytes.read())
+ logger.info(f"Saved chart to {png_path}")
+ png_path = str(png_path)
+ except Exception as e:
+ logger.error(f"Error generating chart for report: {e}", exc_info=True)
+
+ return str(json_path), png_path
+
+ except Exception as e:
+ logger.error(f"Error saving report: {e}", exc_info=True)
+ return None, None
+
+
+def list_reports() -> List[Dict[str, Any]]:
+ """
+ List all saved reports in the reports directory.
+
+ Returns:
+ List of report metadata dicts
+ """
+ reports = []
+ try:
+ if not REPORTS_DIR.exists():
+ return []
+
+ for json_file in REPORTS_DIR.glob("*.json"):
+ try:
+ with open(json_file, "r", encoding="utf-8") as f:
+ data = json.load(f)
+
+ metadata = data.get("metadata", {})
+ summary = data.get("summary", {})
+
+ # Check for corresponding PNG
+ png_file = json_file.with_suffix(".png")
+ has_chart = png_file.exists()
+
+ reports.append({
+ "filename": json_file.name,
+ "path": str(json_file),
+ "chart_path": str(png_file) if has_chart else None,
+ "generated_at": metadata.get("generated_at"),
+ "bot_name": summary.get("bot_name"),
+ "db_path": metadata.get("db_path"),
+ })
+ except Exception as e:
+ logger.debug(f"Error reading report {json_file}: {e}")
+ continue
+
+ # Sort by generation time, newest first
+ reports.sort(key=lambda r: r.get("generated_at", ""), reverse=True)
+
+ except Exception as e:
+ logger.error(f"Error listing reports: {e}", exc_info=True)
+
+ return reports
+
+
+def load_report(filename: str) -> Optional[Dict[str, Any]]:
+ """
+ Load a saved report by filename.
+
+ Args:
+ filename: Report filename (with or without .json extension)
+
+ Returns:
+ Report data dict or None if not found
+ """
+ try:
+ if not filename.endswith(".json"):
+ filename = f"{filename}.json"
+
+ report_path = REPORTS_DIR / filename
+
+ if not report_path.exists():
+ return None
+
+ with open(report_path, "r", encoding="utf-8") as f:
+ return json.load(f)
+
+ except Exception as e:
+ logger.error(f"Error loading report {filename}: {e}", exc_info=True)
+ return None
+
+
+def delete_report(filename: str) -> bool:
+ """
+ Delete a saved report and its chart.
+
+ Args:
+ filename: Report filename (with or without .json extension)
+
+ Returns:
+ True if deleted successfully
+ """
+ try:
+ if not filename.endswith(".json"):
+ filename = f"{filename}.json"
+
+ json_path = REPORTS_DIR / filename
+ png_path = json_path.with_suffix(".png")
+
+ deleted = False
+
+ if json_path.exists():
+ json_path.unlink()
+ deleted = True
+ logger.info(f"Deleted report {json_path}")
+
+ if png_path.exists():
+ png_path.unlink()
+ logger.info(f"Deleted chart {png_path}")
+
+ return deleted
+
+ except Exception as e:
+ logger.error(f"Error deleting report {filename}: {e}", exc_info=True)
+ return False
diff --git a/handlers/bots/controllers/grid_strike/config.py b/handlers/bots/controllers/grid_strike/config.py
index 23a67b1..d3a30f3 100644
--- a/handlers/bots/controllers/grid_strike/config.py
+++ b/handlers/bots/controllers/grid_strike/config.py
@@ -51,6 +51,7 @@
"take_profit": 0.0005,
"take_profit_order_type": 3,
},
+ "coerce_tp_to_step": False,
}
@@ -199,6 +200,14 @@
hint="Order type for take profit",
default=ORDER_TYPE_LIMIT_MAKER
),
+ "coerce_tp_to_step": ControllerField(
+ name="coerce_tp_to_step",
+ label="Coerce TP to Step",
+ type="bool",
+ required=False,
+ hint="Set TP to step size if TP is smaller than step",
+ default=False
+ ),
}
@@ -208,7 +217,8 @@
"total_amount_quote", "start_price", "end_price", "limit_price",
"max_open_orders", "max_orders_per_batch", "order_frequency",
"min_order_amount_quote", "min_spread_between_orders", "take_profit",
- "open_order_type", "take_profit_order_type", "keep_position", "activation_bounds"
+ "open_order_type", "take_profit_order_type", "coerce_tp_to_step",
+ "keep_position", "activation_bounds"
]
diff --git a/handlers/bots/menu.py b/handlers/bots/menu.py
index 081f5d7..6efcd0a 100644
--- a/handlers/bots/menu.py
+++ b/handlers/bots/menu.py
@@ -51,9 +51,10 @@ def _build_main_menu_keyboard(bots_dict: Dict[str, Any]) -> InlineKeyboardMarkup
InlineKeyboardButton("ā PMM Mister", callback_data="bots:new_pmm_mister"),
])
- # Action buttons - configs
+ # Action buttons - configs and historical
keyboard.append([
InlineKeyboardButton("š Configs", callback_data="bots:controller_configs"),
+ InlineKeyboardButton("š Historical", callback_data="bots:archived"),
])
keyboard.append([
diff --git a/handlers/config/__init__.py b/handlers/config/__init__.py
index 671aa35..0c304e2 100644
--- a/handlers/config/__init__.py
+++ b/handlers/config/__init__.py
@@ -185,6 +185,12 @@ async def handle_all_text_input(update: Update, context: ContextTypes.DEFAULT_TY
await handle_gateway_input(update, context)
return
+ # 7. Check routines state
+ if context.user_data.get('routines_state'):
+ from handlers.routines import routines_message_handler
+ await routines_message_handler(update, context)
+ return
+
# No active state - ignore the message
logger.debug(f"No active input state for message: {update.message.text[:50] if update.message else 'N/A'}...")
diff --git a/handlers/dex/__init__.py b/handlers/dex/__init__.py
index 4f2951e..bc1a3cc 100644
--- a/handlers/dex/__init__.py
+++ b/handlers/dex/__init__.py
@@ -131,6 +131,8 @@
handle_gecko_token_add,
handle_back_to_list,
handle_gecko_add_liquidity,
+ handle_gecko_swap,
+ show_gecko_info,
)
# Unified liquidity module
from .liquidity import (
@@ -226,7 +228,7 @@ async def dex_callback_handler(update: Update, context: ContextTypes.DEFAULT_TYP
"pool_info", "pool_list", "manage_positions", "pos_add_confirm", "pos_close_exec",
"add_to_gateway", "pool_detail_refresh",
"gecko_networks", "gecko_trades", "gecko_show_pools", "gecko_refresh", "gecko_token_search", "gecko_token_add",
- "gecko_explore"}
+ "gecko_explore", "gecko_swap", "gecko_info"}
# Also show typing for actions that start with these prefixes
slow_prefixes = ("gecko_trending_", "gecko_top_", "gecko_new_", "gecko_pool:", "gecko_ohlcv:",
"gecko_token:", "swap_hist_set_", "lp_hist_set_")
@@ -358,6 +360,10 @@ async def dex_callback_handler(update: Update, context: ContextTypes.DEFAULT_TYP
await handle_pool_list_back(update, context)
elif action == "pool_detail_refresh":
await handle_pool_detail_refresh(update, context)
+ elif action.startswith("pool_tf:"):
+ # Format: pool_tf:timeframe
+ timeframe = action.split(":")[1]
+ await handle_pool_detail_refresh(update, context, timeframe=timeframe)
elif action == "add_to_gateway":
await handle_add_to_gateway(update, context)
elif action.startswith("plot_liquidity:"):
@@ -370,6 +376,12 @@ async def dex_callback_handler(update: Update, context: ContextTypes.DEFAULT_TYP
elif action.startswith("pos_view:"):
pos_index = action.split(":")[1]
await handle_pos_view(update, context, pos_index)
+ elif action.startswith("pos_view_tf:"):
+ # Format: pos_view_tf:pos_index:timeframe
+ parts = action.split(":")
+ pos_index = parts[1]
+ timeframe = parts[2] if len(parts) > 2 else "1h"
+ await handle_pos_view(update, context, pos_index, timeframe=timeframe)
elif action.startswith("pos_view_pool:"):
pos_index = action.split(":")[1]
await handle_pos_view_pool(update, context, pos_index)
@@ -440,6 +452,10 @@ async def dex_callback_handler(update: Update, context: ContextTypes.DEFAULT_TYP
await handle_pos_toggle_strategy(update, context)
elif action == "pos_refresh":
await handle_pos_refresh(update, context)
+ elif action.startswith("pos_tf:"):
+ # Format: pos_tf:timeframe - switch timeframe in add position menu
+ timeframe = action.split(":")[1]
+ await handle_pos_refresh(update, context, timeframe=timeframe)
# GeckoTerminal explore handlers
elif action == "gecko_explore":
@@ -498,6 +514,10 @@ async def dex_callback_handler(update: Update, context: ContextTypes.DEFAULT_TYP
await handle_gecko_token_search(update, context)
elif action == "gecko_token_add":
await handle_gecko_token_add(update, context)
+ elif action == "gecko_swap":
+ await handle_gecko_swap(update, context)
+ elif action == "gecko_info":
+ await show_gecko_info(update, context)
elif action.startswith("gecko_ohlcv:"):
timeframe = action.split(":")[1]
await show_ohlcv_chart(update, context, timeframe)
diff --git a/handlers/dex/geckoterminal.py b/handlers/dex/geckoterminal.py
index b54020e..966d9f1 100644
--- a/handlers/dex/geckoterminal.py
+++ b/handlers/dex/geckoterminal.py
@@ -1246,12 +1246,37 @@ async def process_gecko_search(update: Update, context: ContextTypes.DEFAULT_TYP
)
+# GeckoTerminal to Gateway network mapping
+GECKO_TO_GATEWAY_NETWORK = {
+ "solana": "solana-mainnet-beta",
+ "eth": "ethereum-mainnet",
+ "base": "base-mainnet",
+ "arbitrum": "arbitrum-one",
+ "bsc": "bsc-mainnet",
+ "polygon_pos": "polygon-mainnet",
+ "avalanche": "avalanche-mainnet",
+ "optimism": "optimism-mainnet",
+}
+
+# Default connectors by network chain
+NETWORK_DEFAULT_CONNECTOR = {
+ "solana": "jupiter",
+ "eth": "uniswap",
+ "base": "uniswap",
+ "arbitrum": "uniswap",
+ "bsc": "pancakeswap",
+ "polygon_pos": "quickswap",
+ "avalanche": "traderjoe",
+ "optimism": "uniswap",
+}
+
+
# ============================================
# POOL DETAIL VIEW
# ============================================
async def show_pool_detail(update: Update, context: ContextTypes.DEFAULT_TYPE, pool_index: int) -> None:
- """Show detailed information for a selected pool"""
+ """Show OHLCV chart automatically when selecting a pool with action buttons"""
query = update.callback_query
pools = context.user_data.get("gecko_pools", [])
@@ -1266,7 +1291,175 @@ async def show_pool_detail(update: Update, context: ContextTypes.DEFAULT_TYPE, p
context.user_data["gecko_selected_pool"] = pool_data
context.user_data["gecko_selected_pool_index"] = pool_index
- # Build detailed view
+ # Default timeframe for initial view
+ default_timeframe = "1h" # 1h candles showing 1 day of data
+
+ # Show OHLCV chart automatically
+ await _show_pool_chart(update, context, pool_data, default_timeframe)
+
+
+async def _show_pool_chart(update: Update, context: ContextTypes.DEFAULT_TYPE, pool_data: dict, timeframe: str) -> None:
+ """Internal function to show pool OHLCV chart with action buttons"""
+ query = update.callback_query
+
+ await query.answer("Loading chart...")
+
+ # Show loading - handle photo messages (can't edit photo to text)
+ if getattr(query.message, 'photo', None):
+ await query.message.delete()
+ loading_msg = await query.message.chat.send_message(
+ f"š *{escape_markdown_v2(pool_data['name'])}*\n\n_Loading chart\\.\\.\\._",
+ parse_mode="MarkdownV2"
+ )
+ else:
+ await query.message.edit_text(
+ f"š *{escape_markdown_v2(pool_data['name'])}*\n\n_Loading chart\\.\\.\\._",
+ parse_mode="MarkdownV2"
+ )
+ loading_msg = query.message
+
+ try:
+ network = pool_data["network"]
+ address = pool_data["address"]
+
+ client = GeckoTerminalAsyncClient()
+ result = await client.get_ohlcv(network, address, timeframe)
+
+ logger.info(f"OHLCV raw response type: {type(result)}")
+
+ # Handle various response formats for OHLCV
+ ohlcv_data = []
+
+ # Check for pandas DataFrame first (library often returns DataFrames)
+ try:
+ import pandas as pd
+ if isinstance(result, pd.DataFrame):
+ logger.info(f"OHLCV DataFrame columns: {list(result.columns)}, shape: {result.shape}")
+ if not result.empty:
+ if result.index.name == 'datetime' or 'datetime' not in result.columns:
+ result = result.reset_index()
+ ohlcv_data = result.values.tolist()
+ logger.info(f"Converted OHLCV DataFrame with {len(ohlcv_data)} rows")
+ except ImportError:
+ pass
+
+ # If not a DataFrame, try other formats
+ if not ohlcv_data:
+ if isinstance(result, dict):
+ ohlcv_data = result.get("data", {}).get("attributes", {}).get("ohlcv_list", [])
+ elif hasattr(result, 'data'):
+ data = result.data
+ if isinstance(data, dict):
+ ohlcv_data = data.get("attributes", {}).get("ohlcv_list", [])
+ elif hasattr(data, 'attributes'):
+ ohlcv_data = getattr(data.attributes, 'ohlcv_list', [])
+ elif isinstance(result, list):
+ ohlcv_data = result
+
+ if not ohlcv_data:
+ # Fall back to text view if no chart data
+ await _show_pool_text_detail(loading_msg, context, pool_data)
+ return
+
+ # Generate chart image using visualization module
+ pair_name = pool_data.get('name', 'Pool')
+ base_symbol = pool_data.get('base_token_symbol')
+ quote_symbol = pool_data.get('quote_token_symbol')
+
+ chart_buffer = generate_ohlcv_chart(
+ ohlcv_data=ohlcv_data,
+ pair_name=pair_name,
+ timeframe=_format_timeframe_label(timeframe),
+ base_symbol=base_symbol,
+ quote_symbol=quote_symbol
+ )
+
+ if not chart_buffer:
+ await _show_pool_text_detail(loading_msg, context, pool_data)
+ return
+
+ # Build caption with key info
+ caption_lines = [
+ f"š *{escape_markdown_v2(pool_data['name'])}*",
+ ]
+
+ # Add price and change info
+ if pool_data.get("base_token_price_usd"):
+ try:
+ price = float(pool_data["base_token_price_usd"])
+ caption_lines.append(f"š° {escape_markdown_v2(_format_price(price))}")
+ except (ValueError, TypeError):
+ pass
+
+ change_24h = pool_data.get("price_change_24h")
+ if change_24h is not None:
+ try:
+ change = float(change_24h)
+ caption_lines.append(f"{escape_markdown_v2(_format_change(change))} 24h")
+ except (ValueError, TypeError):
+ pass
+
+ vol_24h = pool_data.get("volume_24h")
+ if vol_24h:
+ try:
+ vol = float(vol_24h)
+ caption_lines.append(f"Vol: {escape_markdown_v2(_format_volume(vol))}")
+ except (ValueError, TypeError):
+ pass
+
+ caption = caption_lines[0] + "\n" + " \\| ".join(caption_lines[1:]) if len(caption_lines) > 1 else caption_lines[0]
+
+ # Build keyboard with timeframe selection and action buttons
+ pool_index = context.user_data.get("gecko_selected_pool_index", 0)
+ dex_id = pool_data.get("dex_id", "")
+ network = pool_data.get("network", "")
+ supports_liquidity = can_fetch_liquidity(dex_id, network)
+
+ keyboard = [
+ # Timeframe row
+ [
+ InlineKeyboardButton("1h" if timeframe != "1m" else "⢠1h ā¢", callback_data="dex:gecko_ohlcv:1m"),
+ InlineKeyboardButton("1d" if timeframe != "1h" else "⢠1d ā¢", callback_data="dex:gecko_ohlcv:1h"),
+ InlineKeyboardButton("7d" if timeframe != "1d" else "⢠7d ā¢", callback_data="dex:gecko_ohlcv:1d"),
+ ],
+ # Action row: Swap + Trades + Info
+ [
+ InlineKeyboardButton("š± Swap", callback_data="dex:gecko_swap"),
+ InlineKeyboardButton("š Trades", callback_data="dex:gecko_trades"),
+ InlineKeyboardButton("ā¹ļø Info", callback_data="dex:gecko_info"),
+ ],
+ ]
+
+ # Add liquidity button for supported DEXes
+ if supports_liquidity:
+ keyboard.append([
+ InlineKeyboardButton("š Liquidity", callback_data="dex:gecko_liquidity"),
+ InlineKeyboardButton("ā Add LP", callback_data="dex:gecko_add_liquidity"),
+ ])
+
+ keyboard.append([
+ InlineKeyboardButton("š", callback_data=f"dex:gecko_pool:{pool_index}"),
+ InlineKeyboardButton("Ā« Back", callback_data="dex:gecko_back_to_list"),
+ ])
+
+ # Delete loading message and send photo
+ await loading_msg.delete()
+ await loading_msg.chat.send_photo(
+ photo=chart_buffer,
+ caption=caption,
+ parse_mode="MarkdownV2",
+ reply_markup=InlineKeyboardMarkup(keyboard)
+ )
+
+ except Exception as e:
+ logger.error(f"Error generating pool chart: {e}", exc_info=True)
+ # Fall back to text view on error
+ await _show_pool_text_detail(loading_msg, context, pool_data)
+
+
+async def _show_pool_text_detail(message, context: ContextTypes.DEFAULT_TYPE, pool_data: dict) -> None:
+ """Show pool details as text (fallback when chart unavailable)"""
+ pool_index = context.user_data.get("gecko_selected_pool_index", 0)
network_name = NETWORK_NAMES.get(pool_data["network"], pool_data["network"])
lines = [
@@ -1340,16 +1533,6 @@ async def show_pool_detail(update: Update, context: ContextTypes.DEFAULT_TYPE, p
except (ValueError, TypeError):
pass
- # Transactions
- txns = pool_data.get("transactions_24h", {})
- if txns:
- buys = txns.get("buys", 0)
- sells = txns.get("sells", 0)
- if buys or sells:
- lines.append("")
- lines.append(r"š *24h Transactions:*")
- lines.append(f"⢠Buys: {buys} | Sells: {sells}")
-
# Pool address and link
lines.append("")
addr = pool_data.get("address", "")
@@ -1360,35 +1543,18 @@ async def show_pool_detail(update: Update, context: ContextTypes.DEFAULT_TYPE, p
gecko_url = f"https://www.geckoterminal.com/{network}/pools/{addr}"
lines.append(f"\nš¦ [View on GeckoTerminal]({escape_markdown_v2(gecko_url)})")
- # Build keyboard - compressed menu like Meteora
+ # Build keyboard
dex_id = pool_data.get("dex_id", "")
- network = pool_data.get("network", "")
supports_liquidity = can_fetch_liquidity(dex_id, network)
- keyboard = []
-
- # Row 1: Charts button + Trades
- keyboard.append([
- InlineKeyboardButton("š Charts", callback_data="dex:gecko_charts"),
- InlineKeyboardButton("š Trades", callback_data="dex:gecko_trades"),
- ])
-
- # Row 2: Token info buttons if addresses are available
- base_addr = pool_data.get("base_token_address", "")
- quote_addr = pool_data.get("quote_token_address", "")
- base_sym = pool_data.get("base_token_symbol", "Base")[:6]
- quote_sym = pool_data.get("quote_token_symbol", "Quote")[:6]
-
- if base_addr or quote_addr:
- token_row = []
- if base_addr:
- token_row.append(InlineKeyboardButton(f"šŖ {base_sym}", callback_data="dex:gecko_token:base"))
- if quote_addr:
- token_row.append(InlineKeyboardButton(f"šŖ {quote_sym}", callback_data="dex:gecko_token:quote"))
- if token_row:
- keyboard.append(token_row)
+ keyboard = [
+ [
+ InlineKeyboardButton("š Charts", callback_data="dex:gecko_charts"),
+ InlineKeyboardButton("š± Swap", callback_data="dex:gecko_swap"),
+ InlineKeyboardButton("š Trades", callback_data="dex:gecko_trades"),
+ ],
+ ]
- # Row 3: Add Liquidity button - only for supported DEXes (Meteora, Raydium, Orca on Solana)
if supports_liquidity:
keyboard.append([
InlineKeyboardButton("ā Add Liquidity", callback_data="dex:gecko_add_liquidity"),
@@ -1399,22 +1565,12 @@ async def show_pool_detail(update: Update, context: ContextTypes.DEFAULT_TYPE, p
InlineKeyboardButton("Ā« Back", callback_data="dex:gecko_back_to_list"),
])
- # Handle case when returning from photo (OHLCV chart) - can't edit photo to text
- if getattr(query.message, 'photo', None):
- await query.message.delete()
- await query.message.chat.send_message(
- "\n".join(lines),
- parse_mode="MarkdownV2",
- reply_markup=InlineKeyboardMarkup(keyboard),
- disable_web_page_preview=True
- )
- else:
- await query.message.edit_text(
- "\n".join(lines),
- parse_mode="MarkdownV2",
- reply_markup=InlineKeyboardMarkup(keyboard),
- disable_web_page_preview=True
- )
+ await message.edit_text(
+ "\n".join(lines),
+ parse_mode="MarkdownV2",
+ reply_markup=InlineKeyboardMarkup(keyboard),
+ disable_web_page_preview=True
+ )
# ============================================
@@ -2572,6 +2728,230 @@ async def handle_gecko_add_liquidity(update: Update, context: ContextTypes.DEFAU
await _show_pool_detail(update, context, pool, from_callback=True)
+# ============================================
+# SWAP INTEGRATION
+# ============================================
+
+async def handle_gecko_swap(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
+ """Set up swap params from GeckoTerminal pool and redirect to swap menu"""
+ from .swap import show_swap_menu
+
+ query = update.callback_query
+
+ pool_data = context.user_data.get("gecko_selected_pool")
+ if not pool_data:
+ await query.answer("No pool selected")
+ return
+
+ await query.answer("Opening swap...")
+
+ # Get trading pair from pool
+ base_symbol = pool_data.get("base_token_symbol", "")
+ quote_symbol = pool_data.get("quote_token_symbol", "")
+
+ if not base_symbol or not quote_symbol:
+ await query.answer("Token symbols not available")
+ return
+
+ # Format trading pair as BASE-QUOTE
+ trading_pair = f"{base_symbol}-{quote_symbol}"
+
+ # Map GeckoTerminal network to Gateway network
+ gecko_network = pool_data.get("network", "solana")
+ gateway_network = GECKO_TO_GATEWAY_NETWORK.get(gecko_network, "solana-mainnet-beta")
+
+ # Get default connector for this network
+ connector = NETWORK_DEFAULT_CONNECTOR.get(gecko_network, "jupiter")
+
+ # Set up swap params
+ context.user_data["swap_params"] = {
+ "connector": connector,
+ "network": gateway_network,
+ "trading_pair": trading_pair,
+ "side": "BUY",
+ "amount": "1.0",
+ "slippage": "1.0",
+ }
+
+ context.user_data["dex_state"] = "swap"
+
+ # Store source for back navigation
+ context.user_data["swap_from_gecko"] = True
+ context.user_data["swap_gecko_pool_index"] = context.user_data.get("gecko_selected_pool_index", 0)
+
+ # Delete current message (might be a photo) and show swap menu
+ try:
+ if getattr(query.message, 'photo', None):
+ await query.message.delete()
+ else:
+ await query.message.delete()
+ except Exception:
+ pass
+
+ # Show swap menu
+ await show_swap_menu(update, context)
+
+
+async def show_gecko_info(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
+ """Show detailed pool info as text (accessed via Info button)"""
+ query = update.callback_query
+
+ pool_data = context.user_data.get("gecko_selected_pool")
+ if not pool_data:
+ await query.answer("No pool selected")
+ return
+
+ await query.answer()
+
+ pool_index = context.user_data.get("gecko_selected_pool_index", 0)
+ network_name = NETWORK_NAMES.get(pool_data["network"], pool_data["network"])
+
+ lines = [
+ f"š *{escape_markdown_v2(pool_data['name'])}*\n",
+ f"š Network: {escape_markdown_v2(network_name)}",
+ f"š¦ DEX: {escape_markdown_v2(pool_data['dex_id'])}",
+ "",
+ r"š° *Price Info:*",
+ ]
+
+ if pool_data.get("base_token_price_usd"):
+ try:
+ price = float(pool_data["base_token_price_usd"])
+ symbol = escape_markdown_v2(pool_data['base_token_symbol'])
+ lines.append(f"⢠{symbol}: {escape_markdown_v2(_format_price(price))}")
+ except (ValueError, TypeError):
+ pass
+
+ if pool_data.get("quote_token_price_usd"):
+ try:
+ price = float(pool_data["quote_token_price_usd"])
+ symbol = escape_markdown_v2(pool_data['quote_token_symbol'])
+ lines.append(f"⢠{symbol}: {escape_markdown_v2(_format_price(price))}")
+ except (ValueError, TypeError):
+ pass
+
+ lines.append("")
+ lines.append(r"š *Price Changes:*")
+
+ for period, key in [("1h", "price_change_1h"), ("6h", "price_change_6h"), ("24h", "price_change_24h")]:
+ change = pool_data.get(key)
+ if change is not None:
+ try:
+ change = float(change)
+ lines.append(f"⢠{period}: {escape_markdown_v2(_format_change(change))}")
+ except (ValueError, TypeError):
+ pass
+
+ lines.append("")
+ lines.append(r"š *Volume:*")
+
+ for period, key in [("1h", "volume_1h"), ("6h", "volume_6h"), ("24h", "volume_24h")]:
+ vol = pool_data.get(key)
+ if vol:
+ try:
+ vol = float(vol)
+ lines.append(f"⢠{period}: {escape_markdown_v2(_format_volume(vol))}")
+ except (ValueError, TypeError):
+ pass
+
+ # Market cap and FDV
+ lines.append("")
+ if pool_data.get("market_cap_usd"):
+ try:
+ mc = float(pool_data["market_cap_usd"])
+ lines.append(f"š Market Cap: {escape_markdown_v2(_format_volume(mc))}")
+ except (ValueError, TypeError):
+ pass
+
+ if pool_data.get("fdv_usd"):
+ try:
+ fdv = float(pool_data["fdv_usd"])
+ lines.append(f"š FDV: {escape_markdown_v2(_format_volume(fdv))}")
+ except (ValueError, TypeError):
+ pass
+
+ if pool_data.get("reserve_usd"):
+ try:
+ reserve = float(pool_data["reserve_usd"])
+ lines.append(f"š§ Liquidity: {escape_markdown_v2(_format_volume(reserve))}")
+ except (ValueError, TypeError):
+ pass
+
+ # Transactions
+ txns = pool_data.get("transactions_24h", {})
+ if txns:
+ buys = txns.get("buys", 0)
+ sells = txns.get("sells", 0)
+ if buys or sells:
+ lines.append("")
+ lines.append(r"š *24h Transactions:*")
+ lines.append(f"⢠Buys: {buys} | Sells: {sells}")
+
+ # Pool address and link
+ lines.append("")
+ addr = pool_data.get("address", "")
+ network = pool_data.get("network", "")
+ if addr:
+ lines.append(f"š Address:\n`{addr}`")
+ if network:
+ gecko_url = f"https://www.geckoterminal.com/{network}/pools/{addr}"
+ lines.append(f"\nš¦ [View on GeckoTerminal]({escape_markdown_v2(gecko_url)})")
+
+ # Token info buttons
+ base_addr = pool_data.get("base_token_address", "")
+ quote_addr = pool_data.get("quote_token_address", "")
+ base_sym = pool_data.get("base_token_symbol", "Base")[:6]
+ quote_sym = pool_data.get("quote_token_symbol", "Quote")[:6]
+
+ # Build keyboard
+ dex_id = pool_data.get("dex_id", "")
+ supports_liquidity = can_fetch_liquidity(dex_id, network)
+
+ keyboard = []
+
+ # Token info buttons if addresses are available
+ if base_addr or quote_addr:
+ token_row = []
+ if base_addr:
+ token_row.append(InlineKeyboardButton(f"šŖ {base_sym}", callback_data="dex:gecko_token:base"))
+ if quote_addr:
+ token_row.append(InlineKeyboardButton(f"šŖ {quote_sym}", callback_data="dex:gecko_token:quote"))
+ if token_row:
+ keyboard.append(token_row)
+
+ keyboard.append([
+ InlineKeyboardButton("š Chart", callback_data=f"dex:gecko_pool:{pool_index}"),
+ InlineKeyboardButton("š± Swap", callback_data="dex:gecko_swap"),
+ InlineKeyboardButton("š Trades", callback_data="dex:gecko_trades"),
+ ])
+
+ if supports_liquidity:
+ keyboard.append([
+ InlineKeyboardButton("ā Add Liquidity", callback_data="dex:gecko_add_liquidity"),
+ ])
+
+ keyboard.append([
+ InlineKeyboardButton("Ā« Back", callback_data="dex:gecko_back_to_list"),
+ ])
+
+ # Handle case when returning from photo (OHLCV chart) - can't edit photo to text
+ if getattr(query.message, 'photo', None):
+ await query.message.delete()
+ await query.message.chat.send_message(
+ "\n".join(lines),
+ parse_mode="MarkdownV2",
+ reply_markup=InlineKeyboardMarkup(keyboard),
+ disable_web_page_preview=True
+ )
+ else:
+ await query.message.edit_text(
+ "\n".join(lines),
+ parse_mode="MarkdownV2",
+ reply_markup=InlineKeyboardMarkup(keyboard),
+ disable_web_page_preview=True
+ )
+
+
# ============================================
# EXPORTS
# ============================================
@@ -2607,4 +2987,6 @@ async def handle_gecko_add_liquidity(update: Update, context: ContextTypes.DEFAU
'handle_gecko_token_add',
'handle_back_to_list',
'handle_gecko_add_liquidity',
+ 'handle_gecko_swap',
+ 'show_gecko_info',
]
diff --git a/handlers/dex/liquidity.py b/handlers/dex/liquidity.py
index ac000b6..13c9d1a 100644
--- a/handlers/dex/liquidity.py
+++ b/handlers/dex/liquidity.py
@@ -440,11 +440,9 @@ def _format_closed_position_line(pos: dict, token_cache: dict = None, token_pric
closed_at = pos.get('closed_at', pos.get('updated_at', ''))
age = format_relative_time(closed_at) if closed_at else ""
- # Build line: "MET-USDC (met) ā [0.31-0.32]"
- line = f"{pair} ({connector}) ā {range_str}"
+ # Build single-line format: "MET-USDC (met) ā [0.31-0.32] | PnL: -$38.92 | š° $5.42 | 17h"
+ parts = [f"{pair} ({connector}) ā {range_str}"]
- # Add PnL and fees on second line in USD
- parts = []
if pnl_usd >= 0:
parts.append(f"PnL: +${pnl_usd:.2f}")
else:
@@ -453,9 +451,8 @@ def _format_closed_position_line(pos: dict, token_cache: dict = None, token_pric
parts.append(f"š° ${fees_usd:.2f}")
if age:
parts.append(age)
- line += "\n " + " | ".join(parts)
- return line
+ return " | ".join(parts)
# ============================================
diff --git a/handlers/dex/pool_data.py b/handlers/dex/pool_data.py
index 8693634..af5b35d 100644
--- a/handlers/dex/pool_data.py
+++ b/handlers/dex/pool_data.py
@@ -144,7 +144,18 @@ async def fetch_ohlcv(
return cached, None
client = GeckoTerminalAsyncClient()
- result = await client.get_ohlcv(gecko_network, pool_address, timeframe, currency=currency)
+ # Pass all parameters explicitly:
+ # - currency="token" means price in quote token (not USD)
+ # - token="base" means OHLCV for the base token
+ # - limit=100 for reasonable data size
+ result = await client.get_ohlcv(
+ gecko_network,
+ pool_address,
+ timeframe,
+ currency=currency,
+ token="base",
+ limit=100
+ )
# Parse response - handle different formats
ohlcv_list = None
@@ -173,6 +184,16 @@ async def fetch_ohlcv(
if not ohlcv_list:
return None, "No OHLCV data available"
+ # Debug logging: show price range from OHLCV data
+ if ohlcv_list:
+ try:
+ closes = [float(c[4]) for c in ohlcv_list if len(c) > 4 and c[4]]
+ if closes:
+ logger.info(f"OHLCV {pool_address[:8]}... {timeframe} currency={currency}: "
+ f"{len(ohlcv_list)} candles, price range [{min(closes):.6f} - {max(closes):.6f}]")
+ except Exception as e:
+ logger.debug(f"Could not log OHLCV price range: {e}")
+
# Cache result
if user_data is not None:
set_cached(user_data, cache_key, ohlcv_list)
diff --git a/handlers/dex/pools.py b/handlers/dex/pools.py
index 5778c05..c77d019 100644
--- a/handlers/dex/pools.py
+++ b/handlers/dex/pools.py
@@ -414,13 +414,13 @@ def _format_pool_table(pools: list) -> str:
# Header - balanced for mobile (~40 chars)
lines.append("```")
- lines.append(f"{'#':>2} {'Pair':<12} {'APR%':>7} {'Bin':>3} {'Fee':>4} {'TVL':>5}")
+ lines.append(f"{'#':>2} {'Pair':<11} {'APR%':>7} {'Bin':>3} {'Fee':>5} {'TVL':>5}")
lines.append("ā" * 40)
for i, pool in enumerate(pools):
idx = str(i + 1)
- # Truncate pair to 12 chars
- pair = pool.get('trading_pair', 'N/A')[:12]
+ # Truncate pair to 11 chars
+ pair = pool.get('trading_pair', 'N/A')[:11]
# Get TVL value
tvl_val = 0
@@ -437,7 +437,7 @@ def _format_pool_table(pools: list) -> str:
if base_fee:
try:
fee_val = float(base_fee)
- fee_str = f"{fee_val:.1f}" if fee_val < 10 else f"{int(fee_val)}"
+ fee_str = f"{fee_val:.2f}" if fee_val < 10 else f"{int(fee_val)}"
except (ValueError, TypeError):
fee_str = "ā"
else:
@@ -457,7 +457,7 @@ def _format_pool_table(pools: list) -> str:
elif apr_val >= 100:
apr_str = f"{apr_val:.0f}"
else:
- apr_str = f"{apr_val:.1f}"
+ apr_str = f"{apr_val:.2f}"
except (ValueError, TypeError):
apr_str = "ā"
else:
@@ -466,7 +466,7 @@ def _format_pool_table(pools: list) -> str:
# Bin step
bin_step = pool.get('bin_step', 'ā')
- lines.append(f"{idx:>2} {pair:<12} {apr_str:>7} {bin_step:>3} {fee_str:>4} {tvl:>5}")
+ lines.append(f"{idx:>2} {pair:<11} {apr_str:>7} {bin_step:>3} {fee_str:>5} {tvl:>5}")
lines.append("```")
@@ -956,7 +956,8 @@ async def _show_pool_detail(
context: ContextTypes.DEFAULT_TYPE,
pool: dict,
from_callback: bool = False,
- has_list_context: bool = True
+ has_list_context: bool = True,
+ timeframe: str = "1h"
) -> None:
"""Show detailed pool information with inline add liquidity controls
@@ -966,27 +967,63 @@ async def _show_pool_detail(
pool: Pool data dict
from_callback: Whether triggered from callback (button click)
has_list_context: Whether there's a pool list to go back to
+ timeframe: OHLCV timeframe (1m, 1h, 1d) - default 1h
"""
+ import asyncio
from io import BytesIO
from utils.telegram_formatters import resolve_token_symbol
pool_address = pool.get('pool_address', pool.get('address', 'N/A'))
connector = pool.get('connector', 'meteora')
network = 'solana-mainnet-beta'
-
- # Fetch additional pool info with bins (cached with 60s TTL)
chat_id = update.effective_chat.id
+
+ # Parallel fetch: pool_info, token_cache, and OHLCV data
cache_key = f"pool_info_{connector}_{pool_address}"
pool_info = get_cached(context.user_data, cache_key, ttl=DEFAULT_CACHE_TTL)
- if pool_info is None:
+ token_cache = context.user_data.get("token_cache")
+ gecko_network = get_gecko_network(network)
+
+ # Build list of async tasks to run in parallel
+ async def fetch_pool_info_task():
+ if pool_info is not None:
+ return pool_info
client = await get_client(chat_id)
- pool_info = await _fetch_pool_info(client, pool_address, connector)
- set_cached(context.user_data, cache_key, pool_info)
+ return await _fetch_pool_info(client, pool_address, connector)
- # Get or fetch token cache for symbol resolution
- token_cache = context.user_data.get("token_cache")
- if not token_cache:
- token_cache = await get_token_cache_from_gateway(chat_id=chat_id)
+ async def fetch_token_cache_task():
+ if token_cache is not None:
+ return token_cache
+ return await get_token_cache_from_gateway(chat_id=chat_id)
+
+ async def fetch_ohlcv_task():
+ try:
+ data, error = await fetch_ohlcv(
+ pool_address=pool_address,
+ network=gecko_network,
+ timeframe=timeframe,
+ currency="token",
+ user_data=context.user_data
+ )
+ if error:
+ logger.warning(f"Failed to fetch OHLCV: {error}")
+ return []
+ return data or []
+ except Exception as e:
+ logger.warning(f"Error fetching OHLCV: {e}")
+ return []
+
+ # Run all fetches in parallel
+ pool_info, token_cache, ohlcv_data = await asyncio.gather(
+ fetch_pool_info_task(),
+ fetch_token_cache_task(),
+ fetch_ohlcv_task()
+ )
+
+ # Cache results
+ if pool_info:
+ set_cached(context.user_data, cache_key, pool_info)
+ if token_cache:
context.user_data["token_cache"] = token_cache
# Try to get trading pair name from multiple sources
@@ -1094,60 +1131,46 @@ async def _show_pool_detail(
quote_in_gateway = mint_y and mint_y in token_cache
tokens_in_gateway = base_in_gateway and quote_in_gateway
- # Build message with copyable addresses
- message = r"š *Pool Details*" + "\n\n"
- message += escape_markdown_v2(f"š Pool: {pair}") + "\n\n"
-
- # Addresses section - all copyable
- message += escape_markdown_v2("āāā Addresses āāā") + "\n"
- message += escape_markdown_v2("š Pool: ") + f"`{pool_address}`\n"
- if mint_x:
- message += escape_markdown_v2(f"šŖ Base ({base_symbol}): ") + f"`{mint_x}`\n"
- if mint_y:
- message += escape_markdown_v2(f"šµ Quote ({quote_symbol}): ") + f"`{mint_y}`\n"
- message += "\n"
-
- # Pool metrics
+ # Get pool metrics
tvl = pool.get('liquidity') or pool.get('tvl') or pool_info.get('liquidity') or pool_info.get('tvl')
vol_24h = pool.get('volume_24h') or pool_info.get('volume_24h')
fees_24h = pool.get('fees_24h') or pool_info.get('fees_24h')
+ base_fee = pool.get('base_fee_percentage') or pool_info.get('base_fee_percentage')
+ apr = pool.get('apr') or pool_info.get('apr')
+
+ # Build message - matching show_add_position_menu format
+ message = r"ā *Add CLMM Position*" + "\n\n"
- if tvl or vol_24h or fees_24h:
- lines = ["āāā Metrics āāā"]
+ # Pool header - compact info
+ message += f"š *Pool:* `{escape_markdown_v2(pair)}`\n"
+ addr_short = f"{pool_address[:6]}...{pool_address[-4:]}" if len(pool_address) > 12 else pool_address
+ message += f"š *Address:* `{escape_markdown_v2(addr_short)}`\n"
+ if current_price:
+ message += f"š± *Price:* `{escape_markdown_v2(str(current_price)[:10])}`\n"
+ if bin_step:
+ message += f"š *Bin Step:* `{escape_markdown_v2(str(bin_step))}` _\\(default 20 bins each side\\)_\n"
+ if base_fee:
+ message += f"šø *Fee:* `{escape_markdown_v2(str(base_fee))}%`\n"
+
+ # Pool metrics section (if available)
+ if tvl or vol_24h or fees_24h or apr:
+ lines = []
if tvl:
lines.append(f"š° TVL: ${_format_number(tvl)}")
if vol_24h:
- lines.append(f"š Volume 24h: ${_format_number(vol_24h)}")
+ lines.append(f"š Vol 24h: ${_format_number(vol_24h)}")
if fees_24h:
lines.append(f"šµ Fees 24h: ${_format_number(fees_24h)}")
- message += escape_markdown_v2("\n".join(lines)) + "\n\n"
-
- # Fees and APR
- base_fee = pool.get('base_fee_percentage') or pool_info.get('base_fee_percentage')
- apr = pool.get('apr') or pool_info.get('apr')
-
- if base_fee or apr:
- lines = ["āāā Fees & Yield āāā"]
- if base_fee:
- lines.append(f"šø Fee: {base_fee}%")
if apr:
try:
apr_val = float(apr)
- lines.append(f"š APR: {apr_val:.2f}%")
+ lines.append(f"š APR: {apr_val:.2f}%")
except (ValueError, TypeError):
pass
- message += escape_markdown_v2("\n".join(lines)) + "\n\n"
+ if lines:
+ message += "\n" + escape_markdown_v2(" | ".join(lines)) + "\n"
- # Pool config - compact
- if bin_step or current_price:
- lines = ["āāā Config āāā"]
- if bin_step:
- lines.append(f"š Bin Step: {bin_step}")
- if current_price:
- lines.append(f"š± Price: {current_price}")
- message += escape_markdown_v2("\n".join(lines)) + "\n\n"
-
- # Wallet balances for add liquidity
+ # Fetch wallet balances
try:
balance_cache_key = f"token_balances_{network}_{base_symbol}_{quote_symbol}"
balances = get_cached(context.user_data, balance_cache_key, ttl=DEFAULT_CACHE_TTL)
@@ -1155,14 +1178,6 @@ async def _show_pool_detail(
client = await get_client(chat_id)
balances = await _fetch_token_balances(client, network, base_symbol, quote_symbol)
set_cached(context.user_data, balance_cache_key, balances)
-
- lines = ["āāā Wallet āāā"]
- base_bal_str = _format_number(balances["base_balance"])
- quote_bal_str = _format_number(balances["quote_balance"])
- lines.append(f"š° {base_symbol}: {base_bal_str}")
- lines.append(f"šµ {quote_symbol}: {quote_bal_str}")
- message += escape_markdown_v2("\n".join(lines)) + "\n"
-
context.user_data["token_balances"] = balances
except Exception as e:
logger.warning(f"Could not fetch token balances: {e}")
@@ -1173,8 +1188,22 @@ async def _show_pool_detail(
context.user_data["selected_pool_info"] = pool_info
context.user_data["dex_state"] = "add_position"
+ # ========== WALLET BALANCES ==========
+ base_bal = balances.get("base_balance", 0)
+ quote_bal = balances.get("quote_balance", 0)
+ base_val = balances.get("base_value", 0)
+ quote_val = balances.get("quote_value", 0)
+
+ message += "\n" + escape_markdown_v2("āāā Wallet Balances āāā") + "\n"
+ base_bal_str = _format_number(base_bal)
+ base_val_str = f"${_format_number(base_val)}" if base_val > 0 else ""
+ message += f"š° `{escape_markdown_v2(base_symbol)}`: `{escape_markdown_v2(base_bal_str)}` {escape_markdown_v2(base_val_str)}\n"
+
+ quote_bal_str = _format_number(quote_bal)
+ quote_val_str = f"${_format_number(quote_val)}" if quote_val > 0 else ""
+ message += f"šµ `{escape_markdown_v2(quote_symbol)}`: `{escape_markdown_v2(quote_bal_str)}` {escape_markdown_v2(quote_val_str)}\n"
+
# ========== POSITION PREVIEW ==========
- # Show preview of the position to be created
message += "\n" + escape_markdown_v2("āāā Position Preview āāā") + "\n"
# Get amounts and calculate actual values
@@ -1184,7 +1213,7 @@ async def _show_pool_detail(
try:
if base_amount_str.endswith('%'):
base_pct_val = float(base_amount_str[:-1])
- base_amount = balances.get("base_balance", 0) * base_pct_val / 100
+ base_amount = base_bal * base_pct_val / 100
else:
base_amount = float(base_amount_str) if base_amount_str else 0
except (ValueError, TypeError):
@@ -1193,7 +1222,7 @@ async def _show_pool_detail(
try:
if quote_amount_str.endswith('%'):
quote_pct_val = float(quote_amount_str[:-1])
- quote_amount = balances.get("quote_balance", 0) * quote_pct_val / 100
+ quote_amount = quote_bal * quote_pct_val / 100
else:
quote_amount = float(quote_amount_str) if quote_amount_str else 0
except (ValueError, TypeError):
@@ -1228,8 +1257,8 @@ async def _show_pool_detail(
# Show range with percentages
l_pct_str = f"({lower_pct_preview:+.1f}%)" if lower_pct_preview is not None else ""
u_pct_str = f"({upper_pct_preview:+.1f}%)" if upper_pct_preview is not None else ""
- message += f"š *L:* `{escape_markdown_v2(l_str)}` _{escape_markdown_v2(l_pct_str)}_\n"
- message += f"š *U:* `{escape_markdown_v2(u_str)}` _{escape_markdown_v2(u_pct_str)}_\n"
+ message += f"š *Lower:* `{escape_markdown_v2(l_str)}` _{escape_markdown_v2(l_pct_str)}_\n"
+ message += f"š *Upper:* `{escape_markdown_v2(u_str)}` _{escape_markdown_v2(u_pct_str)}_\n"
# Validate bin range and show
if current_price and bin_step:
@@ -1243,42 +1272,68 @@ async def _show_pool_detail(
except (ValueError, TypeError):
pass
- # Show calculated amounts
- message += f"š° *{escape_markdown_v2(base_symbol)}:* `{escape_markdown_v2(_format_number(base_amount))}` _\\({escape_markdown_v2(base_amount_str)}\\)_\n"
- message += f"šµ *{escape_markdown_v2(quote_symbol)}:* `{escape_markdown_v2(_format_number(quote_amount))}` _\\({escape_markdown_v2(quote_amount_str)}\\)_\n"
+ # Show calculated amounts with $ values
+ base_amt_fmt = escape_markdown_v2(_format_number(base_amount))
+ quote_amt_fmt = escape_markdown_v2(_format_number(quote_amount))
+
+ # Calculate $ values for amounts
+ try:
+ price_float = float(current_price) if current_price else 0
+ base_usd = base_amount * price_float # base token value in quote (usually USD)
+ quote_usd = quote_amount # quote is usually USDC/USDT
+ except (ValueError, TypeError):
+ base_usd, quote_usd = 0, 0
- # Build add position display values - show percentages in buttons
+ base_usd_str = f" _${escape_markdown_v2(_format_number(base_usd))}_" if base_usd > 0 else ""
+ quote_usd_str = f" _${escape_markdown_v2(_format_number(quote_usd))}_" if quote_usd > 0 else ""
+
+ message += f"š° *Add:* `{base_amt_fmt}` {escape_markdown_v2(base_symbol)}{base_usd_str}\n"
+ message += f"šµ *Add:* `{quote_amt_fmt}` {escape_markdown_v2(quote_symbol)}{quote_usd_str}\n"
+
+ # Build edit template - each field on its own line
lower_pct = params.get('lower_pct')
upper_pct = params.get('upper_pct')
- lower_display = f"{lower_pct:.1f}%" if lower_pct is not None else (params.get('lower_price', 'ā')[:8] if params.get('lower_price') else 'ā')
- upper_display = f"+{upper_pct:.1f}%" if upper_pct is not None and upper_pct >= 0 else (f"{upper_pct:.1f}%" if upper_pct is not None else (params.get('upper_price', 'ā')[:8] if params.get('upper_price') else 'ā'))
- base_display = params.get('amount_base') or '10%'
- quote_display = params.get('amount_quote') or '10%'
+ l_pct_edit = f"{lower_pct:.1f}%" if lower_pct is not None else "-5%"
+ # For upper, don't show + prefix (parser treats unsigned as positive)
+ u_pct_edit = f"{upper_pct:.1f}%" if upper_pct is not None else "5%"
+ base_edit = params.get('amount_base') or '10%'
+ quote_edit = params.get('amount_quote') or '10%'
+
+ message += f"\n_To edit, send:_\n"
+ message += f"`L:{escape_markdown_v2(l_pct_edit)}`\n"
+ message += f"`U:{escape_markdown_v2(u_pct_edit)}`\n"
+ message += f"`B:{escape_markdown_v2(base_edit)}`\n"
+ message += f"`Q:{escape_markdown_v2(quote_edit)}`\n"
+
strategy_display = params.get('strategy_type', '0')
strategy_names = {'0': 'Spot', '1': 'Curve', '2': 'BidAsk'}
strategy_name = strategy_names.get(strategy_display, 'Spot')
- # Build keyboard with inline add liquidity controls
+ # Build GeckoTerminal link for pool
+ gecko_url = f"https://www.geckoterminal.com/solana/pools/{pool_address}"
+
+ # Timeframe buttons - highlight current
+ tf_1m = "⢠1m ā¢" if timeframe == "1m" else "1m"
+ tf_1h = "⢠1h ā¢" if timeframe == "1h" else "1h"
+ tf_1d = "⢠1d ā¢" if timeframe == "1d" else "1d"
+
+ # Build keyboard - simplified without L/U/B/Q buttons (combined chart is default)
keyboard = [
- # Row 1: Price range
- [
- InlineKeyboardButton(f"š L: {lower_display}", callback_data="dex:pos_set_lower"),
- InlineKeyboardButton(f"š U: {upper_display}", callback_data="dex:pos_set_upper"),
- ],
- # Row 2: Amounts
+ # Row 1: Timeframe selection
[
- InlineKeyboardButton(f"š° {base_symbol}: {base_display}", callback_data="dex:pos_set_base"),
- InlineKeyboardButton(f"šµ {quote_symbol}: {quote_display}", callback_data="dex:pos_set_quote"),
+ InlineKeyboardButton(tf_1m, callback_data="dex:pool_tf:1m"),
+ InlineKeyboardButton(tf_1h, callback_data="dex:pool_tf:1h"),
+ InlineKeyboardButton(tf_1d, callback_data="dex:pool_tf:1d"),
],
- # Row 3: Strategy + Add Position
+ # Row 2: Strategy + Add Position
[
InlineKeyboardButton(f"šÆ {strategy_name}", callback_data="dex:pos_toggle_strategy"),
InlineKeyboardButton("ā Add Position", callback_data="dex:pos_add_confirm"),
],
- # Row 4: Candles + Refresh
+ # Row 3: Refresh + Gecko link
[
- InlineKeyboardButton("š Candles", callback_data="dex:pool_ohlcv:1h"),
InlineKeyboardButton("š Refresh", callback_data="dex:pool_detail_refresh"),
+ InlineKeyboardButton("š¦ Gecko", url=gecko_url),
],
]
@@ -1300,40 +1355,69 @@ async def _show_pool_detail(
reply_markup = InlineKeyboardMarkup(keyboard)
- # Generate liquidity chart if bins available
+ # Generate combined OHLCV + Liquidity chart (OHLCV already fetched in parallel above)
chart_bytes = None
- if bins:
+ price_float = float(current_price) if current_price else None
+
+ # Parse lower/upper prices for range visualization
+ lower_float = None
+ upper_float = None
+ try:
+ if params.get('lower_price'):
+ lower_float = float(params['lower_price'])
+ if params.get('upper_price'):
+ upper_float = float(params['upper_price'])
+ except (ValueError, TypeError):
+ pass
+
+ # Generate combined chart if we have OHLCV or bins
+ if ohlcv_data or bins:
try:
- price_float = float(current_price) if current_price else None
- # Parse lower/upper prices for range visualization
- lower_float = None
- upper_float = None
- try:
- if params.get('lower_price'):
- lower_float = float(params['lower_price'])
- if params.get('upper_price'):
- upper_float = float(params['upper_price'])
- except (ValueError, TypeError):
- pass
- chart_bytes = generate_liquidity_chart(
- bins=bins,
- active_bin_id=active_bin,
- current_price=price_float,
+ chart_bytes = generate_combined_chart(
+ ohlcv_data=ohlcv_data or [],
+ bins=bins or [],
pair_name=pair,
+ timeframe=timeframe,
+ current_price=price_float,
+ base_symbol=base_symbol,
+ quote_symbol=quote_symbol,
lower_price=lower_float,
upper_price=upper_float
)
except Exception as e:
- logger.warning(f"Failed to generate chart: {e}")
+ logger.warning(f"Failed to generate combined chart: {e}")
+ # Fallback to liquidity-only chart
+ if bins:
+ try:
+ chart_bytes = generate_liquidity_chart(
+ bins=bins,
+ active_bin_id=active_bin,
+ current_price=price_float,
+ pair_name=pair,
+ lower_price=lower_float,
+ upper_price=upper_float
+ )
+ except Exception as e2:
+ logger.warning(f"Failed to generate fallback chart: {e2}")
# Determine chat for sending
if from_callback:
chat = update.callback_query.message.chat
- # Delete the previous message
- try:
- await update.callback_query.message.delete()
- except Exception:
- pass
+
+ # Check for loading message from timeframe switch and delete it
+ loading_msg_id = context.user_data.pop("_loading_msg_id", None)
+ loading_chat_id = context.user_data.pop("_loading_chat_id", None)
+ if loading_msg_id and loading_chat_id:
+ try:
+ await update.get_bot().delete_message(chat_id=loading_chat_id, message_id=loading_msg_id)
+ except Exception:
+ pass
+ else:
+ # Delete the previous message (normal case, not timeframe switch)
+ try:
+ await update.callback_query.message.delete()
+ except Exception:
+ pass
else:
chat = update.message.chat
# Delete user input and original message
@@ -1352,8 +1436,13 @@ async def _show_pool_detail(
# Send chart as photo with caption, or just text if no chart
if chart_bytes:
try:
- photo_file = BytesIO(chart_bytes)
- photo_file.name = "liquidity_distribution.png"
+ # Handle both BytesIO objects and raw bytes
+ if isinstance(chart_bytes, BytesIO):
+ photo_file = chart_bytes
+ photo_file.seek(0) # Ensure we're at the start
+ else:
+ photo_file = BytesIO(chart_bytes)
+ photo_file.name = "ohlcv_liquidity.png"
sent_msg = await chat.send_photo(
photo=photo_file,
@@ -1418,28 +1507,63 @@ async def handle_pool_select(update: Update, context: ContextTypes.DEFAULT_TYPE,
await query.answer("Pool not found. Please search again.")
-async def handle_pool_detail_refresh(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
- """Refresh pool detail by clearing cache and re-fetching"""
+async def handle_pool_detail_refresh(update: Update, context: ContextTypes.DEFAULT_TYPE, timeframe: str = None) -> None:
+ """Refresh pool detail by clearing cache and re-fetching
+
+ Args:
+ timeframe: Optional OHLCV timeframe (1m, 1h, 1d). If None, uses default 1h.
+ """
from ._shared import clear_cache
+ query = update.callback_query
selected_pool = context.user_data.get("selected_pool", {})
if not selected_pool:
- await update.callback_query.answer("No pool selected")
+ await query.answer("No pool selected")
return
pool_address = selected_pool.get('pool_address', selected_pool.get('address', ''))
connector = selected_pool.get('connector', 'meteora')
- # Clear pool info cache
- cache_key = f"pool_info_{connector}_{pool_address}"
- clear_cache(context.user_data, cache_key)
- context.user_data.pop("selected_pool_info", None)
+ # Only clear cache on full refresh (no timeframe switch)
+ if timeframe is None:
+ # Clear pool info cache
+ cache_key = f"pool_info_{connector}_{pool_address}"
+ clear_cache(context.user_data, cache_key)
+ context.user_data.pop("selected_pool_info", None)
- # Also clear add_position_params to get fresh range calculation
- context.user_data.pop("add_position_params", None)
+ # Also clear add_position_params to get fresh range calculation
+ context.user_data.pop("add_position_params", None)
+ await query.answer("Refreshing...")
+ timeframe = "1h" # Default timeframe
+ else:
+ # Timeframe switch - show loading transition
+ await query.answer(f"Loading {timeframe} candles...")
- await update.callback_query.answer("Refreshing...")
- await _show_pool_detail(update, context, selected_pool, from_callback=True)
+ # Show loading state - need to handle both photo and text messages
+ pair = selected_pool.get('trading_pair', selected_pool.get('name', 'Pool'))
+ loading_msg = rf"ā³ *Loading {timeframe} candles\.\.\.*" + "\n\n"
+ loading_msg += f"š *Pool:* `{escape_markdown_v2(pair)}`\n"
+ loading_msg += r"_Fetching OHLCV data\.\.\._"
+
+ loading_keyboard = [[InlineKeyboardButton("ā³ Loading...", callback_data="noop")]]
+
+ try:
+ # Try to delete current message (works for both photo and text)
+ await query.message.delete()
+ except Exception:
+ pass
+
+ # Send loading message
+ loading_sent = await query.message.chat.send_message(
+ loading_msg,
+ parse_mode="MarkdownV2",
+ reply_markup=InlineKeyboardMarkup(loading_keyboard)
+ )
+ # Store loading message ID for deletion
+ context.user_data["_loading_msg_id"] = loading_sent.message_id
+ context.user_data["_loading_chat_id"] = loading_sent.chat.id
+
+ await _show_pool_detail(update, context, selected_pool, from_callback=True, timeframe=timeframe)
async def handle_add_to_gateway(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
@@ -1673,7 +1797,7 @@ async def handle_pool_ohlcv(update: Update, context: ContextTypes.DEFAULT_TYPE,
pool_address=pool_address,
network=network,
timeframe=timeframe,
- currency=currency,
+ currency="token",
user_data=context.user_data
)
@@ -1822,7 +1946,7 @@ async def handle_pool_combined_chart(update: Update, context: ContextTypes.DEFAU
pool_address=pool_address,
network=network,
timeframe=timeframe,
- currency=currency,
+ currency="token",
user_data=context.user_data
)
@@ -1841,13 +1965,27 @@ async def handle_pool_combined_chart(update: Update, context: ContextTypes.DEFAU
await loading_msg.edit_text("ā No data available for this pool")
return
+ # Get lower/upper prices from add_position_params if available
+ params = context.user_data.get("add_position_params", {})
+ lower_price = None
+ upper_price = None
+ try:
+ if params.get('lower_price'):
+ lower_price = float(params['lower_price'])
+ if params.get('upper_price'):
+ upper_price = float(params['upper_price'])
+ except (ValueError, TypeError):
+ pass
+
# Generate combined chart
chart_buf = generate_combined_chart(
ohlcv_data=ohlcv_data or [],
bins=bins or [],
pair_name=pair,
timeframe=_format_timeframe_label(timeframe),
- current_price=float(current_price) if current_price else None
+ current_price=float(current_price) if current_price else None,
+ lower_price=lower_price,
+ upper_price=upper_price
)
if not chart_buf:
@@ -2300,23 +2438,148 @@ def is_active_position(pos):
)
-async def handle_pos_view(update: Update, context: ContextTypes.DEFAULT_TYPE, pos_index: str) -> None:
- """View detailed info about a position"""
+async def handle_pos_view(update: Update, context: ContextTypes.DEFAULT_TYPE, pos_index: str, timeframe: str = "1h") -> None:
+ """View detailed info about a position with combined OHLCV + liquidity chart
+
+ Args:
+ update: Telegram update
+ context: Bot context
+ pos_index: Position index in cache
+ timeframe: OHLCV timeframe (1m, 1h, 1d) - default 1h
+ """
+ import asyncio
+ from io import BytesIO
+
+ query = update.callback_query
+
try:
positions_cache = context.user_data.get("positions_cache", {})
pos = positions_cache.get(pos_index)
if not pos:
- await update.callback_query.answer("Position not found. Please refresh.")
+ await query.answer("Position not found. Please refresh.")
return
- # Get token cache (fetch if not available)
+ await query.answer()
+
+ # Get basic position info for loading message
+ base_token = pos.get('base_token', pos.get('token_a', ''))
+ quote_token = pos.get('quote_token', pos.get('token_b', ''))
+ token_cache = context.user_data.get("token_cache", {})
+ from utils.telegram_formatters import resolve_token_symbol
+ base_symbol = resolve_token_symbol(base_token, token_cache) if base_token else 'BASE'
+ quote_symbol = resolve_token_symbol(quote_token, token_cache) if quote_token else 'QUOTE'
+ pair = f"{base_symbol}/{quote_symbol}"
+
+ # Immediately show loading state
+ loading_msg = f"š *Position: {escape_markdown_v2(pair)}*\n\n"
+ loading_msg += r"ā³ _Gathering information\.\.\._" + "\n"
+ loading_msg += r"⢠Fetching OHLCV data" + "\n"
+ loading_msg += r"⢠Loading liquidity distribution" + "\n"
+ loading_msg += r"⢠Generating chart"
+
+ loading_keyboard = [[InlineKeyboardButton("ā³ Loading...", callback_data="noop")]]
+
+ # Handle loading state for both photo and text messages
+ loading_msg_id = None
+ if query.message.photo:
+ # For photo messages, delete and send new text message
+ try:
+ await query.message.delete()
+ except Exception:
+ pass
+ loading_sent = await query.message.chat.send_message(
+ loading_msg,
+ parse_mode="MarkdownV2",
+ reply_markup=InlineKeyboardMarkup(loading_keyboard)
+ )
+ loading_msg_id = loading_sent.message_id
+ else:
+ # For text messages, just edit
+ try:
+ await query.message.edit_text(
+ loading_msg,
+ parse_mode="MarkdownV2",
+ reply_markup=InlineKeyboardMarkup(loading_keyboard)
+ )
+ except Exception:
+ pass
+
chat_id = update.effective_chat.id
- token_cache = context.user_data.get("token_cache")
- if not token_cache:
- token_cache = await get_token_cache_from_gateway(chat_id=chat_id)
+ connector = pos.get('connector', 'meteora')
+ pool_address = pos.get('pool_address', '')
+ network = 'solana-mainnet-beta'
+
+ # Get position price data
+ lower_price = pos.get('lower_price', pos.get('price_lower'))
+ upper_price = pos.get('upper_price', pos.get('price_upper'))
+ pnl_summary = pos.get('pnl_summary', {})
+ entry_price = pnl_summary.get('entry_price')
+ current_price = pnl_summary.get('current_price')
+
+ # Parallel fetch: token_cache (if needed), pool_info (bins), and OHLCV
+ gecko_network = get_gecko_network(network)
+
+ async def fetch_token_cache_task():
+ cached = context.user_data.get("token_cache")
+ if cached is not None:
+ return cached
+ return await get_token_cache_from_gateway(chat_id=chat_id)
+
+ async def fetch_pool_info_task():
+ if not pool_address:
+ return {}
+ cache_key = f"pool_info_{connector}_{pool_address}"
+ cached = get_cached(context.user_data, cache_key, ttl=DEFAULT_CACHE_TTL)
+ if cached:
+ return cached
+ client = await get_client(chat_id)
+ info = await _fetch_pool_info(client, pool_address, connector)
+ if info:
+ set_cached(context.user_data, cache_key, info)
+ return info or {}
+
+ async def fetch_ohlcv_task():
+ if not pool_address:
+ return []
+ try:
+ data, error = await fetch_ohlcv(
+ pool_address=pool_address,
+ network=gecko_network,
+ timeframe=timeframe,
+ currency="token",
+ user_data=context.user_data
+ )
+ return data or []
+ except Exception as e:
+ logger.warning(f"Error fetching OHLCV for position: {e}")
+ return []
+
+ # Run all fetches in parallel
+ token_cache_new, pool_info, ohlcv_data = await asyncio.gather(
+ fetch_token_cache_task(),
+ fetch_pool_info_task(),
+ fetch_ohlcv_task()
+ )
+
+ # Update token cache if fetched
+ if token_cache_new:
+ token_cache = token_cache_new
context.user_data["token_cache"] = token_cache
+ # Re-resolve symbols with fresh token cache if needed
+ if token_cache_new and token_cache_new != context.user_data.get("token_cache"):
+ base_symbol = resolve_token_symbol(base_token, token_cache) if base_token else 'BASE'
+ quote_symbol = resolve_token_symbol(quote_token, token_cache) if quote_token else 'QUOTE'
+ pair = f"{base_symbol}/{quote_symbol}"
+
+ # Get bins from pool_info
+ bins = pool_info.get('bins', [])
+
+ # Get current price from pool_info if not in pnl_summary
+ if not current_price:
+ current_price = pool_info.get('price')
+
# Get token prices for USD conversion
token_prices = context.user_data.get("token_prices", {})
@@ -2326,42 +2589,134 @@ async def handle_pos_view(update: Update, context: ContextTypes.DEFAULT_TYPE, po
message += escape_markdown_v2(detail)
# Build keyboard with actions
- connector = pos.get('connector', '')
- pool_address = pos.get('pool_address', '')
dex_url = get_dex_pool_url(connector, pool_address)
+ # Timeframe buttons - highlight current
+ tf_1m = "⢠1m ā¢" if timeframe == "1m" else "1m"
+ tf_1h = "⢠1h ā¢" if timeframe == "1h" else "1h"
+ tf_1d = "⢠1d ā¢" if timeframe == "1d" else "1d"
+
keyboard = [
+ # Row 1: Timeframe selection
+ [
+ InlineKeyboardButton(tf_1m, callback_data=f"dex:pos_view_tf:{pos_index}:1m"),
+ InlineKeyboardButton(tf_1h, callback_data=f"dex:pos_view_tf:{pos_index}:1h"),
+ InlineKeyboardButton(tf_1d, callback_data=f"dex:pos_view_tf:{pos_index}:1d"),
+ ],
+ # Row 2: Actions
[
InlineKeyboardButton("š° Collect Fees", callback_data=f"dex:pos_collect:{pos_index}"),
- InlineKeyboardButton("ā Close Position", callback_data=f"dex:pos_close:{pos_index}")
+ InlineKeyboardButton("ā Close", callback_data=f"dex:pos_close:{pos_index}")
],
]
- # Add View Pool button to see pool details with chart
- if pool_address and connector:
+ # Add DEX link if available
+ if dex_url:
keyboard.append([
- InlineKeyboardButton("š View Pool", callback_data=f"dex:pos_view_pool:{pos_index}"),
- InlineKeyboardButton("š Refresh", callback_data=f"dex:pos_view:{pos_index}")
+ InlineKeyboardButton(f"š {connector.title()}", url=dex_url),
+ InlineKeyboardButton("Ā« Back", callback_data="dex:liquidity")
])
else:
- keyboard.append([InlineKeyboardButton("š Refresh", callback_data=f"dex:pos_view:{pos_index}")])
+ keyboard.append([InlineKeyboardButton("Ā« Back", callback_data="dex:liquidity")])
- # Add DEX link if available
- if dex_url:
- keyboard.append([InlineKeyboardButton(f"š View on {connector.title()}", url=dex_url)])
-
- keyboard.append([InlineKeyboardButton("Ā« Back", callback_data="dex:liquidity")])
reply_markup = InlineKeyboardMarkup(keyboard)
- await update.callback_query.message.edit_text(
- message,
- parse_mode="MarkdownV2",
- reply_markup=reply_markup
- )
+ # Generate combined chart with entry price, lower/upper bounds
+ chart_bytes = None
+ if ohlcv_data or bins:
+ try:
+ # Convert price strings to floats
+ lower_float = float(lower_price) if lower_price else None
+ upper_float = float(upper_price) if upper_price else None
+ entry_float = float(entry_price) if entry_price else None
+ current_float = float(current_price) if current_price else None
+
+ chart_bytes = generate_combined_chart(
+ ohlcv_data=ohlcv_data or [],
+ bins=bins or [],
+ pair_name=pair,
+ timeframe=timeframe,
+ current_price=current_float,
+ base_symbol=base_symbol,
+ quote_symbol=quote_symbol,
+ lower_price=lower_float,
+ upper_price=upper_float,
+ entry_price=entry_float
+ )
+ except Exception as e:
+ logger.warning(f"Failed to generate position chart: {e}")
+
+ # Send as photo with caption, or edit text if no chart
+ if chart_bytes:
+ try:
+ # Handle both BytesIO and raw bytes
+ if isinstance(chart_bytes, BytesIO):
+ photo_file = chart_bytes
+ photo_file.seek(0)
+ else:
+ photo_file = BytesIO(chart_bytes)
+ photo_file.name = "position_chart.png"
+
+ # Delete loading message (either the text loading message we sent or original message)
+ if loading_msg_id:
+ # Delete the loading text message we sent
+ try:
+ await query.message.chat.delete_message(loading_msg_id)
+ except Exception:
+ pass
+ else:
+ # Delete the original message (text message case)
+ try:
+ await query.message.delete()
+ except Exception:
+ pass
+
+ await query.message.chat.send_photo(
+ photo=photo_file,
+ caption=message,
+ parse_mode="MarkdownV2",
+ reply_markup=reply_markup
+ )
+ except Exception as e:
+ logger.warning(f"Failed to send position chart: {e}")
+ # Fallback to text
+ await query.message.chat.send_message(
+ message,
+ parse_mode="MarkdownV2",
+ reply_markup=reply_markup
+ )
+ else:
+ # No chart available, just update or send message with details
+ if loading_msg_id:
+ # Delete loading message and send new
+ try:
+ await query.message.chat.delete_message(loading_msg_id)
+ except Exception:
+ pass
+ await query.message.chat.send_message(
+ message,
+ parse_mode="MarkdownV2",
+ reply_markup=reply_markup
+ )
+ else:
+ # Try to edit the text message
+ try:
+ await query.message.edit_text(
+ message,
+ parse_mode="MarkdownV2",
+ reply_markup=reply_markup
+ )
+ except Exception:
+ # Message was deleted, send new one
+ await query.message.chat.send_message(
+ message,
+ parse_mode="MarkdownV2",
+ reply_markup=reply_markup
+ )
except Exception as e:
logger.error(f"Error viewing position: {e}", exc_info=True)
- await update.callback_query.answer(f"Error: {str(e)[:100]}")
+ await query.answer(f"Error: {str(e)[:100]}")
async def handle_pos_view_pool(update: Update, context: ContextTypes.DEFAULT_TYPE, pos_index: str) -> None:
@@ -3032,7 +3387,8 @@ async def show_add_position_menu(
update: Update,
context: ContextTypes.DEFAULT_TYPE,
send_new: bool = False,
- show_help: bool = False
+ show_help: bool = False,
+ timeframe: str = "1h"
) -> None:
"""Display the add position configuration menu with liquidity chart
@@ -3041,6 +3397,7 @@ async def show_add_position_menu(
context: The context object
send_new: If True, always send a new message instead of editing
show_help: If True, show detailed help instead of balances/ASCII
+ timeframe: OHLCV timeframe (1m, 1h, 1d) - default 1h
"""
from io import BytesIO
@@ -3239,6 +3596,10 @@ async def show_add_position_menu(
help_text += f"š± *Price:* `{escape_markdown_v2(str(current_price)[:10])}`\n"
if bin_step:
help_text += f"š *Bin Step:* `{escape_markdown_v2(str(bin_step))}` _\\(default 20 bins each side\\)_\n"
+ # Add fee if available
+ base_fee = pool_info.get('base_fee_percentage') or selected_pool.get('base_fee_percentage')
+ if base_fee:
+ help_text += f"šø *Fee:* `{escape_markdown_v2(str(base_fee))}%`\n"
# Fetch and display token balances (cached with 60s TTL)
try:
@@ -3318,10 +3679,10 @@ async def show_add_position_menu(
u_str = f"{upper_p:.6f}"
# Show range with percentages
- l_pct_str = f"{lower_pct:+.1f}%" if lower_pct is not None else ""
- u_pct_str = f"{upper_pct:+.1f}%" if upper_pct is not None else ""
- help_text += f"š *L:* `{escape_markdown_v2(l_str)}` _{escape_markdown_v2(l_pct_str)}_\n"
- help_text += f"š *U:* `{escape_markdown_v2(u_str)}` _{escape_markdown_v2(u_pct_str)}_\n"
+ l_pct_str = f"({lower_pct:+.1f}%)" if lower_pct is not None else ""
+ u_pct_str = f"({upper_pct:+.1f}%)" if upper_pct is not None else ""
+ help_text += f"š *Lower:* `{escape_markdown_v2(l_str)}` _{escape_markdown_v2(l_pct_str)}_\n"
+ help_text += f"š *Upper:* `{escape_markdown_v2(u_str)}` _{escape_markdown_v2(u_pct_str)}_\n"
# Validate bin range
if current_price and bin_step:
@@ -3335,86 +3696,123 @@ async def show_add_position_menu(
except (ValueError, TypeError):
pass
- # Show amounts
- help_text += f"š° *{escape_markdown_v2(base_symbol)}:* `{escape_markdown_v2(_format_number(base_amount))}`\n"
- help_text += f"šµ *{escape_markdown_v2(quote_symbol)}:* `{escape_markdown_v2(_format_number(quote_amount))}`\n"
+ # Show amounts with $ values
+ base_amt_fmt = escape_markdown_v2(_format_number(base_amount))
+ quote_amt_fmt = escape_markdown_v2(_format_number(quote_amount))
+
+ # Calculate $ values for amounts
+ try:
+ price_float = float(current_price) if current_price else 0
+ base_usd = base_amount * price_float # base token value in quote (usually USD)
+ quote_usd = quote_amount # quote is usually USDC/USDT
+ except (ValueError, TypeError):
+ base_usd, quote_usd = 0, 0
+
+ base_usd_str = f" _${escape_markdown_v2(_format_number(base_usd))}_" if base_usd > 0 else ""
+ quote_usd_str = f" _${escape_markdown_v2(_format_number(quote_usd))}_" if quote_usd > 0 else ""
+
+ help_text += f"š° *Add:* `{base_amt_fmt}` {escape_markdown_v2(base_symbol)}{base_usd_str}\n"
+ help_text += f"šµ *Add:* `{quote_amt_fmt}` {escape_markdown_v2(quote_symbol)}{quote_usd_str}\n"
+
+ # Add edit template - each field on its own line
+ l_pct_edit = f"{lower_pct:.1f}%" if lower_pct is not None else "-5%"
+ # For upper, don't show + prefix (parser treats unsigned as positive)
+ u_pct_edit = f"{upper_pct:.1f}%" if upper_pct is not None else "5%"
+ help_text += f"\n_To edit, send:_\n"
+ help_text += f"`L:{escape_markdown_v2(l_pct_edit)}`\n"
+ help_text += f"`U:{escape_markdown_v2(u_pct_edit)}`\n"
+ help_text += f"`B:{escape_markdown_v2(base_amount_str)}`\n"
+ help_text += f"`Q:{escape_markdown_v2(quote_amount_str)}`\n"
# NOTE: ASCII visualization is added AFTER we know if chart image is available
# This is done below, after chart_bytes is generated
- # Build keyboard - show percentages in buttons for L/U
- lower_pct = params.get('lower_pct')
- upper_pct = params.get('upper_pct')
- lower_display = f"{lower_pct:.1f}%" if lower_pct is not None else (params.get('lower_price', 'ā')[:8] if params.get('lower_price') else 'ā')
- upper_display = f"+{upper_pct:.1f}%" if upper_pct is not None and upper_pct >= 0 else (f"{upper_pct:.1f}%" if upper_pct is not None else (params.get('upper_price', 'ā')[:8] if params.get('upper_price') else 'ā'))
- base_display = params.get('amount_base') or '10%'
- quote_display = params.get('amount_quote') or '10%'
+ # Build simplified keyboard (no L/U/B/Q buttons - use text input instead)
strategy_display = params.get('strategy_type', '0')
-
- # Strategy type name mapping
strategy_names = {'0': 'Spot', '1': 'Curve', '2': 'BidAsk'}
strategy_name = strategy_names.get(strategy_display, 'Spot')
+ # Build GeckoTerminal link for pool
+ gecko_url = f"https://www.geckoterminal.com/solana/pools/{pool_address}"
+
+ # Timeframe buttons - highlight current
+ tf_1m = "⢠1m ā¢" if timeframe == "1m" else "1m"
+ tf_1h = "⢠1h ā¢" if timeframe == "1h" else "1h"
+ tf_1d = "⢠1d ā¢" if timeframe == "1d" else "1d"
+
keyboard = [
+ # Row 1: Timeframe selection
[
- InlineKeyboardButton(
- f"š L: {lower_display}",
- callback_data="dex:pos_set_lower"
- ),
- InlineKeyboardButton(
- f"š U: {upper_display}",
- callback_data="dex:pos_set_upper"
- )
+ InlineKeyboardButton(tf_1m, callback_data="dex:pos_tf:1m"),
+ InlineKeyboardButton(tf_1h, callback_data="dex:pos_tf:1h"),
+ InlineKeyboardButton(tf_1d, callback_data="dex:pos_tf:1d"),
],
+ # Row 2: Strategy + Add Position
[
- InlineKeyboardButton(
- f"š° Base: {base_display}",
- callback_data="dex:pos_set_base"
- ),
- InlineKeyboardButton(
- f"šµ Quote: {quote_display}",
- callback_data="dex:pos_set_quote"
- )
+ InlineKeyboardButton(f"šÆ {strategy_name}", callback_data="dex:pos_toggle_strategy"),
+ InlineKeyboardButton("ā Add Position", callback_data="dex:pos_add_confirm"),
],
+ # Row 3: Refresh + Gecko + Back
[
- InlineKeyboardButton(
- f"šÆ Strategy: {strategy_name}",
- callback_data="dex:pos_toggle_strategy"
- )
+ InlineKeyboardButton("š Refresh", callback_data="dex:pos_refresh"),
+ InlineKeyboardButton("š¦ Gecko", url=gecko_url),
+ InlineKeyboardButton("Ā« Back", callback_data="dex:pool_detail_refresh"),
]
]
- # Help/Back toggle and action buttons
- help_button = (
- InlineKeyboardButton("Ā« Position", callback_data="dex:pool_detail_refresh")
- if show_help else
- InlineKeyboardButton("ā Help", callback_data="dex:pos_help")
- )
- keyboard.append([
- InlineKeyboardButton("ā Add Position", callback_data="dex:pos_add_confirm"),
- InlineKeyboardButton("š Refresh", callback_data="dex:pos_refresh"),
- help_button,
- ])
- keyboard.append([
- InlineKeyboardButton("Ā« Back to Pool", callback_data="dex:pool_detail_refresh")
- ])
-
reply_markup = InlineKeyboardMarkup(keyboard)
- # Generate chart image if bins available (only for main view, not help)
+ # Generate combined OHLCV + Liquidity chart (only for main view, not help)
chart_bytes = None
- if bins and not show_help:
+ if not show_help:
+ # Try to get cached OHLCV data for combined chart
try:
- chart_bytes = generate_liquidity_chart(
- bins=bins,
- active_bin_id=pool_info.get('active_bin_id'),
- current_price=current_val,
- pair_name=pair,
- lower_price=lower_val,
- upper_price=upper_val
- )
+ gecko_network = get_gecko_network(network)
+ ohlcv_cache_key = f"ohlcv_{gecko_network}_{pool_address}_{timeframe}_token"
+ ohlcv_data = get_cached(context.user_data, ohlcv_cache_key, ttl=300) # 5min cache
+
+ if ohlcv_data is None:
+ # Fetch OHLCV data
+ ohlcv_data, ohlcv_error = await fetch_ohlcv(
+ pool_address=pool_address,
+ network=gecko_network,
+ timeframe=timeframe,
+ currency="token",
+ user_data=context.user_data
+ )
+ if ohlcv_error:
+ logger.warning(f"OHLCV fetch error: {ohlcv_error}")
+ ohlcv_data = []
+
+ # Generate combined chart if we have OHLCV or bins
+ if ohlcv_data or bins:
+ chart_bytes = generate_combined_chart(
+ ohlcv_data=ohlcv_data or [],
+ bins=bins or [],
+ pair_name=pair,
+ timeframe=timeframe,
+ current_price=current_val,
+ base_symbol=base_symbol,
+ quote_symbol=quote_symbol,
+ lower_price=lower_val,
+ upper_price=upper_val
+ )
except Exception as e:
- logger.warning(f"Failed to generate chart for add position: {e}")
+ logger.warning(f"Failed to generate combined chart: {e}")
+
+ # Fallback to liquidity-only chart if combined chart failed
+ if not chart_bytes and bins:
+ try:
+ chart_bytes = generate_liquidity_chart(
+ bins=bins,
+ active_bin_id=pool_info.get('active_bin_id'),
+ current_price=current_val,
+ pair_name=pair,
+ lower_price=lower_val,
+ upper_price=upper_val
+ )
+ except Exception as e:
+ logger.warning(f"Failed to generate fallback chart: {e}")
# Add ASCII visualization ONLY if there's NO chart image (ASCII is fallback)
# This avoids caption too long error on Telegram (1024 char limit for photo captions)
@@ -3425,47 +3823,51 @@ async def show_add_position_menu(
help_text += r"_ā in range ā out ā current price āā your bounds_" + "\n"
# Determine how to send
+ # Check for loading message from timeframe switch
+ loading_msg_id = context.user_data.pop("_loading_msg_id", None)
+ loading_chat_id = context.user_data.pop("_loading_chat_id", None)
+
# Check if we have a stored menu message we can edit
stored_menu_msg_id = context.user_data.get("add_position_menu_msg_id")
stored_menu_chat_id = context.user_data.get("add_position_menu_chat_id")
stored_menu_is_photo = context.user_data.get("add_position_menu_is_photo", False)
- if send_new or not update.callback_query:
- chat = update.message.chat if update.message else update.callback_query.message.chat
+ if loading_msg_id or send_new or not update.callback_query:
+ # Loading message case, send_new, or text input - need to send new message
+ chat = update.callback_query.message.chat if update.callback_query else update.message.chat
- # Try to edit stored message if available (for text input updates)
- if stored_menu_msg_id and stored_menu_chat_id:
+ # Delete loading message if it exists
+ if loading_msg_id and loading_chat_id:
try:
- if stored_menu_is_photo:
- # Edit caption for photo message
- await update.get_bot().edit_message_caption(
- chat_id=stored_menu_chat_id,
- message_id=stored_menu_msg_id,
- caption=help_text,
- parse_mode="MarkdownV2",
- reply_markup=reply_markup
- )
- else:
- # Edit text for regular message
- await update.get_bot().edit_message_text(
- chat_id=stored_menu_chat_id,
- message_id=stored_menu_msg_id,
- text=help_text,
- parse_mode="MarkdownV2",
- reply_markup=reply_markup
- )
- return
- except Exception as e:
- if "not modified" not in str(e).lower():
- logger.debug(f"Could not edit stored menu, sending new: {e}")
- else:
- return
+ await update.get_bot().delete_message(
+ chat_id=loading_chat_id,
+ message_id=loading_msg_id
+ )
+ except Exception:
+ pass
+
+ # For text input updates with chart, delete old message and send new one
+ # (can't replace photo, only edit caption)
+ if stored_menu_msg_id and stored_menu_chat_id and not loading_msg_id:
+ # Delete old message to allow sending new one with updated chart
+ try:
+ await update.get_bot().delete_message(
+ chat_id=stored_menu_chat_id,
+ message_id=stored_menu_msg_id
+ )
+ except Exception:
+ pass # Message may already be deleted
# Send new message and store its ID
if chart_bytes:
try:
- photo_file = BytesIO(chart_bytes)
- photo_file.name = "liquidity.png"
+ # Handle both BytesIO objects and raw bytes
+ if isinstance(chart_bytes, BytesIO):
+ photo_file = chart_bytes
+ photo_file.seek(0) # Ensure we're at the start
+ else:
+ photo_file = BytesIO(chart_bytes)
+ photo_file.name = "ohlcv_liquidity.png"
sent_msg = await chat.send_photo(
photo=photo_file,
caption=help_text,
@@ -3548,10 +3950,16 @@ async def handle_pos_toggle_strategy(update: Update, context: ContextTypes.DEFAU
await show_add_position_menu(update, context)
-async def handle_pos_refresh(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
- """Refresh pool info and token balances by clearing cache"""
+async def handle_pos_refresh(update: Update, context: ContextTypes.DEFAULT_TYPE, timeframe: str = None) -> None:
+ """Refresh pool info and token balances by clearing cache
+
+ Args:
+ timeframe: Optional OHLCV timeframe (1m, 1h, 1d). If None, full refresh with default 1h.
+ """
from ._shared import clear_cache
+ query = update.callback_query
+
# Get current pool info to build cache keys
params = context.user_data.get("add_position_params", {})
selected_pool = context.user_data.get("selected_pool", {})
@@ -3562,26 +3970,57 @@ async def handle_pos_refresh(update: Update, context: ContextTypes.DEFAULT_TYPE)
base_token = selected_pool.get("base_token", "")
quote_token = selected_pool.get("quote_token", "")
- # Clear specific cache keys
- pool_cache_key = f"pool_info_{connector}_{pool_address}"
- balance_cache_key = f"token_balances_{network}_{base_token}_{quote_token}"
+ # Only clear cache and refetch on full refresh (no timeframe switch)
+ if timeframe is None:
+ # Clear specific cache keys
+ pool_cache_key = f"pool_info_{connector}_{pool_address}"
+ balance_cache_key = f"token_balances_{network}_{base_token}_{quote_token}"
- clear_cache(context.user_data, pool_cache_key)
- clear_cache(context.user_data, balance_cache_key)
+ clear_cache(context.user_data, pool_cache_key)
+ clear_cache(context.user_data, balance_cache_key)
- # Also clear stored pool info to force refresh
- context.user_data.pop("selected_pool_info", None)
+ # Also clear stored pool info to force refresh
+ context.user_data.pop("selected_pool_info", None)
- # Refetch pool info
- if pool_address:
- chat_id = update.effective_chat.id
- client = await get_client(chat_id)
- pool_info = await _fetch_pool_info(client, pool_address, connector)
- set_cached(context.user_data, pool_cache_key, pool_info)
- context.user_data["selected_pool_info"] = pool_info
+ # Refetch pool info
+ if pool_address:
+ chat_id = update.effective_chat.id
+ client = await get_client(chat_id)
+ pool_info = await _fetch_pool_info(client, pool_address, connector)
+ set_cached(context.user_data, pool_cache_key, pool_info)
+ context.user_data["selected_pool_info"] = pool_info
- await update.callback_query.answer("Refreshed!")
- await show_add_position_menu(update, context)
+ await query.answer("Refreshed!")
+ timeframe = "1h" # Default timeframe
+ else:
+ # Timeframe switch - show loading transition
+ await query.answer(f"Loading {timeframe} candles...")
+
+ # Show loading state - handle both photo and text messages
+ pair = selected_pool.get('trading_pair', selected_pool.get('name', 'Pool'))
+ loading_msg = rf"ā³ *Loading {timeframe} candles\.\.\.*" + "\n\n"
+ loading_msg += f"š *Pool:* `{escape_markdown_v2(pair)}`\n"
+ loading_msg += r"_Fetching OHLCV data\.\.\._"
+
+ loading_keyboard = [[InlineKeyboardButton("ā³ Loading...", callback_data="noop")]]
+
+ try:
+ # Delete current message (works for both photo and text)
+ await query.message.delete()
+ except Exception:
+ pass
+
+ # Send loading message
+ loading_sent = await query.message.chat.send_message(
+ loading_msg,
+ parse_mode="MarkdownV2",
+ reply_markup=InlineKeyboardMarkup(loading_keyboard)
+ )
+ # Store loading message ID for deletion
+ context.user_data["_loading_msg_id"] = loading_sent.message_id
+ context.user_data["_loading_chat_id"] = loading_sent.chat.id
+
+ await show_add_position_menu(update, context, timeframe=timeframe)
async def handle_pos_use_max_range(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
@@ -3730,7 +4169,7 @@ async def handle_pos_set_base(update: Update, context: ContextTypes.DEFAULT_TYPE
# Get balance for display
balances = context.user_data.get("token_balances", {})
base_bal = balances.get("base_balance", 0)
- bal_info = f"_Balance: {_format_number(base_bal)}_\n\n" if base_bal > 0 else ""
+ bal_info = f"_Balance: {escape_markdown_v2(_format_number(base_bal))}_\n\n" if base_bal > 0 else ""
help_text = (
r"š *Set Base Token Amount*" + "\n\n" +
@@ -3759,7 +4198,7 @@ async def handle_pos_set_quote(update: Update, context: ContextTypes.DEFAULT_TYP
# Get balance for display
balances = context.user_data.get("token_balances", {})
quote_bal = balances.get("quote_balance", 0)
- bal_info = f"_Balance: {_format_number(quote_bal)}_\n\n" if quote_bal > 0 else ""
+ bal_info = f"_Balance: {escape_markdown_v2(_format_number(quote_bal))}_\n\n" if quote_bal > 0 else ""
help_text = (
r"š *Set Quote Token Amount*" + "\n\n" +
@@ -4020,8 +4459,17 @@ def _parse_multi_field_input(user_input: str, current_price: float = None) -> di
if ':' not in user_input:
return {}
- # Split by common separators: ' - ', '-', ',' or spaces
- parts = re.split(r'\s*[-,]\s*|\s+', user_input.strip())
+ # Use regex to find key:value patterns directly (handles negative numbers correctly)
+ # Pattern: word followed by : followed by value (which may start with - or +)
+ pattern = r'([a-zA-Z]+)\s*:\s*([+-]?[\d.]+%?|[\d.]+)'
+ matches = re.findall(pattern, user_input, re.IGNORECASE)
+
+ # Convert matches to parts format for compatibility with existing logic
+ parts = [f"{k}:{v}" for k, v in matches]
+
+ # If no regex matches, fall back to simple split (for backwards compatibility)
+ if not parts:
+ parts = re.split(r'\s*,\s*|\s+', user_input.strip())
for part in parts:
part = part.strip()
@@ -4148,14 +4596,14 @@ async def process_add_position(
if not hasattr(client, 'gateway_clmm'):
raise ValueError("Gateway CLMM not available")
- result = await client.gateway_clmm.add_liquidity(
+ result = await client.gateway_clmm.open_position(
connector=connector,
network=network,
pool_address=params["pool_address"],
lower_price=Decimal(params["lower_price"]),
upper_price=Decimal(params["upper_price"]),
- amount_base=Decimal(params["amount_base"]) if params["amount_base"] else None,
- amount_quote=Decimal(params["amount_quote"]) if params["amount_quote"] else None,
+ base_token_amount=Decimal(params["amount_base"]) if params["amount_base"] else None,
+ quote_token_amount=Decimal(params["amount_quote"]) if params["amount_quote"] else None,
)
if result is None:
diff --git a/handlers/dex/visualizations.py b/handlers/dex/visualizations.py
index 629e9f0..48c8464 100644
--- a/handlers/dex/visualizations.py
+++ b/handlers/dex/visualizations.py
@@ -563,7 +563,10 @@ def generate_combined_chart(
timeframe: str,
current_price: float = None,
base_symbol: str = None,
- quote_symbol: str = None
+ quote_symbol: str = None,
+ lower_price: float = None,
+ upper_price: float = None,
+ entry_price: float = None
) -> Optional[io.BytesIO]:
"""Generate combined OHLCV + Liquidity distribution chart
@@ -576,9 +579,12 @@ def generate_combined_chart(
bins: List of bin data with price, base_token_amount, quote_token_amount
pair_name: Trading pair name
timeframe: Timeframe string
- current_price: Current price for reference line
+ current_price: Current price for reference line (orange dashed)
base_symbol: Base token symbol
quote_symbol: Quote token symbol
+ lower_price: Lower bound of position range (blue dotted)
+ upper_price: Upper bound of position range (blue dotted)
+ entry_price: Entry price for existing position (green dashed)
Returns:
BytesIO buffer with PNG image or None if failed
@@ -592,7 +598,7 @@ def generate_combined_chart(
logger.warning("No OHLCV data for combined chart")
return None
- # Parse OHLCV data
+ # Parse OHLCV data - filter out empty candles (where all OHLC values are 0 or null)
times = []
opens = []
highs = []
@@ -605,6 +611,20 @@ def generate_combined_chart(
ts, o, h, l, c = candle[:5]
v = candle[5] if len(candle) > 5 else 0
+ # Parse OHLC values
+ try:
+ o_val = float(o) if o is not None else 0
+ h_val = float(h) if h is not None else 0
+ l_val = float(l) if l is not None else 0
+ c_val = float(c) if c is not None else 0
+ except (ValueError, TypeError):
+ continue
+
+ # Skip empty candles (all zeros or all same value with zero volume)
+ if o_val == 0 and h_val == 0 and l_val == 0 and c_val == 0:
+ continue
+
+ # Parse timestamp
if isinstance(ts, (int, float)):
times.append(datetime.fromtimestamp(ts))
elif hasattr(ts, 'to_pydatetime'):
@@ -617,15 +637,26 @@ def generate_combined_chart(
except Exception:
continue
- opens.append(float(o))
- highs.append(float(h))
- lows.append(float(l))
- closes.append(float(c))
+ opens.append(o_val)
+ highs.append(h_val)
+ lows.append(l_val)
+ closes.append(c_val)
volumes.append(float(v) if v else 0)
if not times:
raise ValueError("No valid OHLCV data")
+ # Limit to most recent candles to avoid overcrowding
+ # For 1m: 100 candles = ~1.5 hours, 1h: 100 = ~4 days, 1d: 100 = ~3 months
+ max_candles = 100
+ if len(times) > max_candles:
+ times = times[-max_candles:]
+ opens = opens[-max_candles:]
+ highs = highs[-max_candles:]
+ lows = lows[-max_candles:]
+ closes = closes[-max_candles:]
+ volumes = volumes[-max_candles:]
+
# Process bin data for liquidity
bin_data = []
if bins:
@@ -770,6 +801,69 @@ def generate_combined_chart(
row=1, col=2,
)
+ # Add lower price range line
+ if lower_price:
+ fig.add_hline(
+ y=lower_price,
+ line_dash="dot",
+ line_color="#3b82f6", # Blue
+ line_width=2,
+ row=1, col=1,
+ annotation_text=f"Lower: {lower_price:.6f}",
+ annotation_position="right",
+ annotation_font_color="#3b82f6",
+ )
+ if has_liquidity:
+ fig.add_hline(
+ y=lower_price,
+ line_dash="dot",
+ line_color="#3b82f6",
+ line_width=2,
+ row=1, col=2,
+ )
+
+ # Add upper price range line
+ if upper_price:
+ fig.add_hline(
+ y=upper_price,
+ line_dash="dot",
+ line_color="#3b82f6", # Blue
+ line_width=2,
+ row=1, col=1,
+ annotation_text=f"Upper: {upper_price:.6f}",
+ annotation_position="right",
+ annotation_font_color="#3b82f6",
+ )
+ if has_liquidity:
+ fig.add_hline(
+ y=upper_price,
+ line_dash="dot",
+ line_color="#3b82f6",
+ line_width=2,
+ row=1, col=2,
+ )
+
+ # Add entry price line (green dashed) - for viewing existing positions
+ if entry_price:
+ fig.add_hline(
+ y=entry_price,
+ line_dash="dash",
+ line_color="#22c55e", # Green
+ line_width=2,
+ row=1, col=1,
+ annotation_text=f"Entry: {entry_price:.6f}",
+ annotation_position="left",
+ annotation_font_color="#22c55e",
+ )
+ if has_liquidity:
+ fig.add_hline(
+ y=entry_price,
+ line_dash="dash",
+ line_color="#22c55e",
+ line_width=2,
+ row=1, col=2,
+ )
+
# Build title
if base_symbol and quote_symbol:
title = f"{base_symbol}/{quote_symbol} - {timeframe} + Liquidity"
@@ -810,13 +904,32 @@ def generate_combined_chart(
bargap=0.1, # Gap between bars
)
+ # Calculate expected interval for rangebreaks (to hide gaps in trading)
+ if len(times) >= 2:
+ # Calculate time delta between candles
+ time_deltas = [(times[i+1] - times[i]).total_seconds() for i in range(len(times)-1) if times[i+1] > times[i]]
+ if time_deltas:
+ # Use median delta as the expected interval
+ expected_interval = sorted(time_deltas)[len(time_deltas)//2]
+ # Set dvalue for rangebreaks (gaps > 2x expected interval)
+ gap_threshold = expected_interval * 2 * 1000 # Convert to ms
+ else:
+ gap_threshold = None
+ else:
+ gap_threshold = None
+
# Update axes styling (use unified theme)
- fig.update_xaxes(
+ # Add rangebreaks to hide gaps in trading data
+ xaxis_config = dict(
gridcolor=DARK_THEME["grid_color"],
color=DARK_THEME["axis_color"],
showgrid=True,
zeroline=False,
)
+ if gap_threshold:
+ xaxis_config["rangebreaks"] = [dict(dvalue=gap_threshold)]
+
+ fig.update_xaxes(**xaxis_config)
fig.update_yaxes(
gridcolor=DARK_THEME["grid_color"],
color=DARK_THEME["axis_color"],
diff --git a/handlers/routines/__init__.py b/handlers/routines/__init__.py
new file mode 100644
index 0000000..e3146c8
--- /dev/null
+++ b/handlers/routines/__init__.py
@@ -0,0 +1,940 @@
+"""
+Routines Handler - Run configurable Python scripts via Telegram.
+
+Features:
+- Auto-discovery of routines from routines/ folder
+- Text-based config editing (key=value)
+- Interval routines: run repeatedly at configurable interval
+- One-shot routines: run once (foreground or background)
+- Multi-instance support for different configs
+"""
+
+import asyncio
+import hashlib
+import logging
+import time
+
+from telegram import Update, InlineKeyboardButton, InlineKeyboardMarkup
+from telegram.ext import ContextTypes, CallbackContext
+
+from handlers import clear_all_input_states
+from routines.base import discover_routines, get_routine
+from utils.auth import restricted
+from utils.telegram_formatters import escape_markdown_v2
+
+logger = logging.getLogger(__name__)
+
+# Job metadata: {job_name: {start_time, config, routine_name}}
+_job_info: dict[str, dict] = {}
+
+# Last results: {chat_id: {key: {result, duration, end_time}}}
+_last_results: dict[int, dict[str, dict]] = {}
+
+
+# =============================================================================
+# Utility Functions
+# =============================================================================
+
+
+def _generate_instance_id(routine_name: str, config_dict: dict) -> str:
+ """Generate unique instance ID from routine name and config."""
+ data = f"{routine_name}:{sorted(config_dict.items())}"
+ return hashlib.md5(data.encode()).hexdigest()[:8]
+
+
+def _job_name(chat_id: int, routine_name: str, instance_id: str) -> str:
+ """Build job name for JobQueue."""
+ return f"routine_{chat_id}_{routine_name}_{instance_id}"
+
+
+def _display_name(name: str) -> str:
+ """Convert snake_case to Title Case."""
+ return name.replace("_", " ").title()
+
+
+def _format_duration(seconds: float) -> str:
+ """Format seconds as human-readable duration."""
+ if seconds < 1:
+ return f"{seconds * 1000:.0f}ms"
+ if seconds < 60:
+ return f"{seconds:.1f}s"
+ if seconds < 3600:
+ return f"{int(seconds // 60)}m {int(seconds % 60)}s"
+ return f"{int(seconds // 3600)}h {int((seconds % 3600) // 60)}m"
+
+
+# =============================================================================
+# Instance Management
+# =============================================================================
+
+
+def _get_instances(
+ context: ContextTypes.DEFAULT_TYPE,
+ chat_id: int,
+ routine_name: str | None = None,
+) -> list[dict]:
+ """Get running instances for a chat, optionally filtered by routine."""
+ prefix = f"routine_{chat_id}_"
+ instances = []
+
+ for job in context.job_queue.jobs():
+ if not job.name or not job.name.startswith(prefix):
+ continue
+
+ parts = job.name.split("_")
+ if len(parts) < 4:
+ continue
+
+ rname = "_".join(parts[2:-1])
+ inst_id = parts[-1]
+
+ if routine_name and rname != routine_name:
+ continue
+
+ info = _job_info.get(job.name, {})
+ instances.append({
+ "job_name": job.name,
+ "routine_name": rname,
+ "instance_id": inst_id,
+ "config": info.get("config", {}),
+ "start_time": info.get("start_time", time.time()),
+ })
+
+ return instances
+
+
+def _stop_instance(context: ContextTypes.DEFAULT_TYPE, chat_id: int, job_name: str) -> bool:
+ """Stop a running instance. Returns True if stopped."""
+ jobs = context.job_queue.get_jobs_by_name(job_name)
+ if not jobs:
+ return False
+
+ info = _job_info.pop(job_name, {})
+ start_time = info.get("start_time", time.time())
+ routine_name = info.get("routine_name", "unknown")
+ instance_id = job_name.split("_")[-1]
+
+ jobs[0].schedule_removal()
+
+ # Store result
+ _store_result(chat_id, routine_name, "(stopped)", time.time() - start_time, instance_id)
+
+ # Clean up state
+ try:
+ user_data = context.application.user_data.get(chat_id, {})
+ user_data.pop(f"{routine_name}_state_{chat_id}_{instance_id}", None)
+ except Exception:
+ pass
+
+ return True
+
+
+def _stop_all(
+ context: ContextTypes.DEFAULT_TYPE,
+ chat_id: int,
+ routine_name: str | None = None,
+) -> int:
+ """Stop all instances, optionally filtered by routine. Returns count."""
+ instances = _get_instances(context, chat_id, routine_name)
+ return sum(1 for i in instances if _stop_instance(context, chat_id, i["job_name"]))
+
+
+# =============================================================================
+# Result Storage
+# =============================================================================
+
+
+def _store_result(
+ chat_id: int,
+ routine_name: str,
+ result: str,
+ duration: float,
+ instance_id: str | None = None,
+) -> None:
+ """Store execution result."""
+ if chat_id not in _last_results:
+ _last_results[chat_id] = {}
+
+ # Always store under routine_name for easy retrieval
+ _last_results[chat_id][routine_name] = {
+ "result": result,
+ "duration": duration,
+ "end_time": time.time(),
+ "instance_id": instance_id,
+ }
+
+
+def _get_result(chat_id: int, routine_name: str) -> dict | None:
+ """Get last result for a routine."""
+ return _last_results.get(chat_id, {}).get(routine_name)
+
+
+# =============================================================================
+# Job Callbacks
+# =============================================================================
+
+
+async def _interval_callback(context: CallbackContext) -> None:
+ """Execute one iteration of an interval routine."""
+ data = context.job.data or {}
+ routine_name = data["routine_name"]
+ chat_id = data["chat_id"]
+ config_dict = data["config_dict"]
+ instance_id = data["instance_id"]
+
+ routine = get_routine(routine_name)
+ if not routine:
+ return
+
+ # Prepare context for routine
+ user_data = context.application.user_data.get(chat_id, {})
+ context._chat_id = chat_id
+ context._instance_id = instance_id
+ context._user_data = user_data
+
+ job_name = context.job.name
+ info = _job_info.get(job_name, {})
+ start_time = info.get("start_time", time.time())
+
+ try:
+ config = routine.config_class(**config_dict)
+ result = await routine.run_fn(config, context)
+ result_text = str(result)[:500] if result else "Running..."
+ logger.debug(f"{routine_name}[{instance_id}]: {result_text[:50]}")
+ except Exception as e:
+ result_text = f"Error: {e}"
+ logger.error(f"{routine_name}[{instance_id}] error: {e}")
+
+ # Store result for display in detail view
+ _store_result(chat_id, routine_name, result_text, time.time() - start_time)
+
+
+async def _oneshot_callback(context: CallbackContext) -> None:
+ """Execute a one-shot routine and update UI or send message."""
+ data = context.job.data or {}
+ routine_name = data["routine_name"]
+ chat_id = data["chat_id"]
+ config_dict = data["config_dict"]
+ instance_id = data["instance_id"]
+ msg_id = data.get("msg_id")
+ background = data.get("background", False)
+
+ job_name = context.job.name
+ info = _job_info.get(job_name, {})
+ start_time = info.get("start_time", time.time())
+
+ routine = get_routine(routine_name)
+ if not routine:
+ return
+
+ # Prepare context
+ user_data = context.application.user_data.get(chat_id, {})
+ context._chat_id = chat_id
+ context._instance_id = instance_id
+ context._user_data = user_data
+
+ try:
+ config = routine.config_class(**config_dict)
+ result = await routine.run_fn(config, context)
+ result_text = str(result)[:500] if result else "Completed"
+ status = "completed"
+ except Exception as e:
+ result_text = f"Error: {e}"
+ status = "error"
+ logger.error(f"{routine_name}[{instance_id}] failed: {e}")
+
+ duration = time.time() - start_time
+ _job_info.pop(job_name, None)
+ _store_result(chat_id, routine_name, result_text, duration, instance_id)
+
+ if background:
+ # Send result as new message
+ icon = "ā
" if status == "completed" else "ā"
+ text = (
+ f"{icon} *{escape_markdown_v2(_display_name(routine_name))}*\n"
+ f"Duration: {escape_markdown_v2(_format_duration(duration))}\n\n"
+ f"```\n{result_text[:400]}\n```"
+ )
+ try:
+ await context.bot.send_message(
+ chat_id=chat_id,
+ text=text,
+ parse_mode="MarkdownV2",
+ )
+ except Exception as e:
+ logger.error(f"Failed to send background result: {e}")
+ else:
+ # Update existing message
+ await _update_after_run(context, routine_name, chat_id, msg_id, config_dict, result_text, status)
+
+
+async def _update_after_run(
+ context: CallbackContext,
+ routine_name: str,
+ chat_id: int,
+ msg_id: int | None,
+ config_dict: dict,
+ result_text: str,
+ status: str,
+) -> None:
+ """Update the routine detail message after execution."""
+ if not msg_id:
+ return
+
+ routine = get_routine(routine_name)
+ if not routine:
+ return
+
+ fields = routine.get_fields()
+ config_lines = [f"{k}={config_dict.get(k, v['default'])}" for k, v in fields.items()]
+
+ icon = "ā
" if status == "completed" else "ā"
+ result_info = _get_result(chat_id, routine_name)
+ duration_str = _format_duration(result_info["duration"]) if result_info else ""
+
+ text = (
+ f"ā” *{escape_markdown_v2(_display_name(routine_name).upper())}*\n"
+ f"āāāāāāāāāāāāāāāāāāāāā\n"
+ f"_{escape_markdown_v2(routine.description)}_\n\n"
+ f"Status: āŖ Ready\n\n"
+ f"āā Config āāāāāāāāāāāāāāāāā\n"
+ f"```\n{chr(10).join(config_lines)}\n```\n"
+ f"āā _āļø send key\\=value to edit_\n\n"
+ f"āā {icon} Result ā {escape_markdown_v2(duration_str)} āāāā\n"
+ f"```\n{result_text[:300]}\n```\n"
+ f"āāāāāāāāāāāāāāāāāāāāāāāāāā"
+ )
+
+ keyboard = [
+ [
+ InlineKeyboardButton("ā¶ļø Run", callback_data=f"routines:run:{routine_name}"),
+ InlineKeyboardButton("š Background", callback_data=f"routines:bg:{routine_name}"),
+ ],
+ [
+ InlineKeyboardButton("ā Help", callback_data=f"routines:help:{routine_name}"),
+ InlineKeyboardButton("Ā« Back", callback_data="routines:menu"),
+ ],
+ ]
+
+ try:
+ await context.bot.edit_message_text(
+ chat_id=chat_id,
+ message_id=msg_id,
+ text=text,
+ parse_mode="MarkdownV2",
+ reply_markup=InlineKeyboardMarkup(keyboard),
+ )
+ except Exception as e:
+ if "not modified" not in str(e).lower():
+ logger.debug(f"Could not update message: {e}")
+
+
+# =============================================================================
+# UI Display Functions
+# =============================================================================
+
+
+async def _show_menu(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
+ """Show main routines menu."""
+ chat_id = update.effective_chat.id
+ routines = discover_routines(force_reload=True)
+ all_instances = _get_instances(context, chat_id)
+
+ if not routines:
+ text = (
+ "ā” *ROUTINES*\n"
+ "āāāāāāāāāāāāāāāāāāāāā\n\n"
+ "No routines found\\.\n\n"
+ "Add Python files to `routines/` folder\\."
+ )
+ keyboard = [[InlineKeyboardButton("š Reload", callback_data="routines:reload")]]
+ else:
+ keyboard = []
+
+ if all_instances:
+ keyboard.append([
+ InlineKeyboardButton(f"š Running ({len(all_instances)})", callback_data="routines:tasks")
+ ])
+
+ for name in sorted(routines.keys()):
+ routine = routines[name]
+ count = len(_get_instances(context, chat_id, name))
+
+ if count > 0:
+ label = f"š¢ {_display_name(name)} ({count})"
+ else:
+ icon = "š" if routine.is_interval else "ā”"
+ label = f"{icon} {_display_name(name)}"
+
+ keyboard.append([InlineKeyboardButton(label, callback_data=f"routines:select:{name}")])
+
+ keyboard.append([InlineKeyboardButton("š Reload", callback_data="routines:reload")])
+
+ running = len(all_instances)
+ status = f"š¢ {running} running" if running else "All idle"
+
+ text = (
+ "ā” *ROUTINES*\n"
+ "āāāāāāāāāāāāāāāāāāāāā\n\n"
+ f"Status: {escape_markdown_v2(status)}\n\n"
+ "Select a routine to configure and run\\."
+ )
+
+ await _edit_or_send(update, text, InlineKeyboardMarkup(keyboard))
+
+
+async def _show_tasks(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
+ """Show all running tasks."""
+ chat_id = update.effective_chat.id
+ instances = _get_instances(context, chat_id)
+
+ if not instances:
+ text = (
+ "ā” *RUNNING TASKS*\n"
+ "āāāāāāāāāāāāāāāāāāāāā\n\n"
+ "No tasks running\\."
+ )
+ keyboard = [[InlineKeyboardButton("Ā« Back", callback_data="routines:menu")]]
+ else:
+ lines = ["ā” *RUNNING TASKS*", "āāāāāāāāāāāāāāāāāāāāā\n"]
+ keyboard = []
+
+ for inst in instances:
+ name = inst["routine_name"]
+ inst_id = inst["instance_id"]
+ duration = _format_duration(time.time() - inst["start_time"])
+ config = inst["config"]
+
+ lines.append(f"š¢ *{escape_markdown_v2(_display_name(name))}* `{inst_id}`")
+ lines.append(f" {escape_markdown_v2(duration)}")
+
+ if config:
+ preview = ", ".join(f"{k}\\={v}" for k, v in list(config.items())[:2])
+ lines.append(f" `{preview}`")
+ lines.append("")
+
+ keyboard.append([
+ InlineKeyboardButton(f"ā¹ {_display_name(name)[:10]}[{inst_id}]",
+ callback_data=f"routines:stop:{inst['job_name']}")
+ ])
+
+ keyboard.append([InlineKeyboardButton("ā¹ Stop All", callback_data="routines:stopall")])
+ keyboard.append([InlineKeyboardButton("Ā« Back", callback_data="routines:menu")])
+ text = "\n".join(lines)
+
+ await _edit_or_send(update, text, InlineKeyboardMarkup(keyboard))
+
+
+async def _show_detail(update: Update, context: ContextTypes.DEFAULT_TYPE, routine_name: str) -> None:
+ """Show routine configuration and controls."""
+ chat_id = update.effective_chat.id
+ routine = get_routine(routine_name)
+
+ if not routine:
+ await update.callback_query.answer("Routine not found")
+ return
+
+ # Get or initialize config
+ config_key = f"routine_config_{routine_name}"
+ if config_key not in context.user_data:
+ context.user_data[config_key] = routine.get_default_config().model_dump()
+
+ config = context.user_data[config_key]
+ fields = routine.get_fields()
+ instances = _get_instances(context, chat_id, routine_name)
+
+ # Build config display
+ config_lines = [f"{k}={config.get(k, v['default'])}" for k, v in fields.items()]
+
+ # Status
+ if instances:
+ status = f"š¢ {len(instances)} running"
+ else:
+ status = "āŖ Ready"
+
+ # Instances section
+ inst_section = ""
+ if instances:
+ inst_lines = ["\nāā Running āāāāāāāāāāāāāāāāā"]
+ for inst in instances[:5]:
+ dur = _format_duration(time.time() - inst["start_time"])
+ cfg = ", ".join(f"{k}={v}" for k, v in list(inst["config"].items())[:2])
+ inst_lines.append(f"ā `{inst['instance_id']}` {escape_markdown_v2(cfg)} \\({escape_markdown_v2(dur)}\\)")
+ if len(instances) > 5:
+ inst_lines.append(f"ā _\\+{len(instances) - 5} more_")
+ inst_lines.append("āāāāāāāāāāāāāāāāāāāāāāāāāāāā")
+ inst_section = "\n".join(inst_lines)
+
+ # Result section
+ result_section = ""
+ last = _get_result(chat_id, routine_name)
+ if last:
+ icon = "ā" if last["result"].startswith("Error") else "ā
"
+ dur = _format_duration(last["duration"])
+ result_section = (
+ f"\n\nāā {icon} Last ā {escape_markdown_v2(dur)} āāāā\n"
+ f"```\n{last['result'][:250]}\n```\n"
+ f"āāāāāāāāāāāāāāāāāāāāāāāāāāāā"
+ )
+
+ # Type indicator
+ type_str = "š Interval" if routine.is_interval else "ā” One\\-shot"
+
+ text = (
+ f"ā” *{escape_markdown_v2(_display_name(routine_name).upper())}*\n"
+ f"āāāāāāāāāāāāāāāāāāāāā\n"
+ f"_{escape_markdown_v2(routine.description)}_\n"
+ f"{type_str}\n\n"
+ f"Status: {escape_markdown_v2(status)}\n\n"
+ f"āā Config āāāāāāāāāāāāāāāāā\n"
+ f"```\n{chr(10).join(config_lines)}\n```\n"
+ f"āā _āļø send key\\=value to edit_"
+ f"{inst_section}"
+ f"{result_section}"
+ )
+
+ # Build keyboard
+ if routine.is_interval:
+ keyboard = [
+ [InlineKeyboardButton("ā¶ļø Start", callback_data=f"routines:start:{routine_name}")],
+ ]
+ else:
+ keyboard = [
+ [
+ InlineKeyboardButton("ā¶ļø Run", callback_data=f"routines:run:{routine_name}"),
+ InlineKeyboardButton("š Background", callback_data=f"routines:bg:{routine_name}"),
+ ],
+ ]
+
+ if instances:
+ keyboard.append([InlineKeyboardButton(f"ā¹ Stop All ({len(instances)})",
+ callback_data=f"routines:stopall:{routine_name}")])
+
+ keyboard.append([
+ InlineKeyboardButton("ā Help", callback_data=f"routines:help:{routine_name}"),
+ InlineKeyboardButton("Ā« Back", callback_data="routines:menu"),
+ ])
+
+ # Store state for config editing
+ context.user_data["routines_state"] = "editing"
+ context.user_data["routines_editing"] = {
+ "routine": routine_name,
+ "fields": fields,
+ "config_key": config_key,
+ }
+
+ msg = update.callback_query.message if update.callback_query else None
+ if msg:
+ context.user_data["routines_msg_id"] = msg.message_id
+ context.user_data["routines_chat_id"] = msg.chat_id
+
+ await _edit_or_send(update, text, InlineKeyboardMarkup(keyboard))
+
+
+async def _show_help(update: Update, context: ContextTypes.DEFAULT_TYPE, routine_name: str) -> None:
+ """Show field descriptions."""
+ routine = get_routine(routine_name)
+ if not routine:
+ return
+
+ lines = [
+ f"ā *{escape_markdown_v2(_display_name(routine_name).upper())}*",
+ "āāāāāāāāāāāāāāāāāāāāā\n",
+ ]
+
+ for name, info in routine.get_fields().items():
+ lines.append(f"⢠`{escape_markdown_v2(name)}` _{escape_markdown_v2(info['type'])}_")
+ lines.append(f" {escape_markdown_v2(info['description'])}")
+ lines.append(f" Default: `{escape_markdown_v2(str(info['default']))}`\n")
+
+ keyboard = [[InlineKeyboardButton("Ā« Back", callback_data=f"routines:select:{routine_name}")]]
+ await _edit_or_send(update, "\n".join(lines), InlineKeyboardMarkup(keyboard))
+
+
+# =============================================================================
+# Actions
+# =============================================================================
+
+
+async def _run_oneshot(
+ update: Update,
+ context: ContextTypes.DEFAULT_TYPE,
+ routine_name: str,
+ background: bool = False,
+) -> None:
+ """Run a one-shot routine."""
+ chat_id = update.effective_chat.id
+ routine = get_routine(routine_name)
+
+ if not routine:
+ await update.callback_query.answer("Routine not found")
+ return
+
+ config_key = f"routine_config_{routine_name}"
+ config_dict = context.user_data.get(config_key, {})
+
+ try:
+ routine.config_class(**config_dict)
+ except Exception as e:
+ await update.callback_query.answer(f"Config error: {e}")
+ return
+
+ instance_id = _generate_instance_id(routine_name, config_dict)
+ job = _job_name(chat_id, routine_name, instance_id)
+ msg_id = context.user_data.get("routines_msg_id")
+
+ _job_info[job] = {
+ "start_time": time.time(),
+ "config": config_dict,
+ "routine_name": routine_name,
+ }
+
+ context.job_queue.run_once(
+ _oneshot_callback,
+ when=0.1,
+ data={
+ "routine_name": routine_name,
+ "chat_id": chat_id,
+ "config_dict": config_dict,
+ "instance_id": instance_id,
+ "msg_id": msg_id,
+ "background": background,
+ },
+ name=job,
+ chat_id=chat_id,
+ )
+
+ if background:
+ await update.callback_query.answer("š Running in background...")
+ else:
+ await update.callback_query.answer("ā¶ļø Running...")
+
+ await _show_detail(update, context, routine_name)
+
+
+async def _start_interval(update: Update, context: ContextTypes.DEFAULT_TYPE, routine_name: str) -> None:
+ """Start an interval routine."""
+ chat_id = update.effective_chat.id
+ routine = get_routine(routine_name)
+
+ if not routine:
+ await update.callback_query.answer("Routine not found")
+ return
+
+ config_key = f"routine_config_{routine_name}"
+ config_dict = context.user_data.get(config_key, {})
+
+ try:
+ config = routine.config_class(**config_dict)
+ except Exception as e:
+ await update.callback_query.answer(f"Config error: {e}")
+ return
+
+ instance_id = _generate_instance_id(routine_name, config_dict)
+ job = _job_name(chat_id, routine_name, instance_id)
+
+ # Check duplicate
+ if context.job_queue.get_jobs_by_name(job):
+ await update.callback_query.answer("ā ļø Already running with this config")
+ await _show_detail(update, context, routine_name)
+ return
+
+ interval = getattr(config, "interval_sec", 5)
+ msg_id = context.user_data.get("routines_msg_id")
+
+ _job_info[job] = {
+ "start_time": time.time(),
+ "config": config_dict,
+ "routine_name": routine_name,
+ }
+
+ context.job_queue.run_repeating(
+ _interval_callback,
+ interval=interval,
+ first=0.1,
+ data={
+ "routine_name": routine_name,
+ "chat_id": chat_id,
+ "config_dict": config_dict,
+ "instance_id": instance_id,
+ "msg_id": msg_id,
+ },
+ name=job,
+ chat_id=chat_id,
+ )
+
+ await update.callback_query.answer(f"š Started (every {interval}s)")
+ await _show_detail(update, context, routine_name)
+
+
+# =============================================================================
+# Config Input Processing
+# =============================================================================
+
+
+async def _process_config(update: Update, context: ContextTypes.DEFAULT_TYPE, text: str) -> None:
+ """Process key=value config input."""
+ editing = context.user_data.get("routines_editing", {})
+ routine_name = editing.get("routine")
+ fields = editing.get("fields", {})
+ config_key = editing.get("config_key")
+
+ if not routine_name or not config_key:
+ return
+
+ # Delete user message
+ try:
+ await update.message.delete()
+ except Exception:
+ pass
+
+ updates = {}
+ errors = []
+
+ for line in text.split("\n"):
+ line = line.strip()
+ if not line or "=" not in line:
+ continue
+
+ key, _, value = line.partition("=")
+ key, value = key.strip(), value.strip()
+
+ if key not in fields:
+ errors.append(f"Unknown: {key}")
+ continue
+
+ field_type = fields[key]["type"]
+ try:
+ if field_type == "int":
+ value = int(value)
+ elif field_type == "float":
+ value = float(value)
+ elif field_type == "bool":
+ value = value.lower() in ("true", "yes", "1", "on")
+ except ValueError:
+ errors.append(f"Invalid: {key}")
+ continue
+
+ updates[key] = value
+
+ if errors:
+ msg = await update.message.reply_text(f"ā ļø {', '.join(errors)}")
+ asyncio.create_task(_delete_after(msg, 3))
+
+ if not updates:
+ msg = await update.message.reply_text("ā Use: `key=value`", parse_mode="Markdown")
+ asyncio.create_task(_delete_after(msg, 3))
+ return
+
+ if config_key not in context.user_data:
+ routine = get_routine(routine_name)
+ context.user_data[config_key] = routine.get_default_config().model_dump()
+
+ context.user_data[config_key].update(updates)
+
+ msg = await update.message.reply_text(
+ f"ā
{', '.join(f'`{k}={v}`' for k, v in updates.items())}",
+ parse_mode="Markdown",
+ )
+ asyncio.create_task(_delete_after(msg, 2))
+
+ await _refresh_detail(context, routine_name)
+
+
+async def _refresh_detail(context: ContextTypes.DEFAULT_TYPE, routine_name: str) -> None:
+ """Refresh routine detail after config update."""
+ msg_id = context.user_data.get("routines_msg_id")
+ chat_id = context.user_data.get("routines_chat_id")
+
+ if not msg_id or not chat_id:
+ return
+
+ routine = get_routine(routine_name)
+ if not routine:
+ return
+
+ config_key = f"routine_config_{routine_name}"
+ config = context.user_data.get(config_key, {})
+ fields = routine.get_fields()
+ instances = _get_instances(context, chat_id, routine_name)
+
+ config_lines = [f"{k}={config.get(k, v['default'])}" for k, v in fields.items()]
+
+ status = f"š¢ {len(instances)} running" if instances else "āŖ Ready"
+ type_str = "š Interval" if routine.is_interval else "ā” One\\-shot"
+
+ # Result section
+ result_section = ""
+ last = _get_result(chat_id, routine_name)
+ if last:
+ icon = "ā" if last["result"].startswith("Error") else "ā
"
+ dur = _format_duration(last["duration"])
+ result_section = (
+ f"\n\nāā {icon} Last ā {escape_markdown_v2(dur)} āāāā\n"
+ f"```\n{last['result'][:250]}\n```\n"
+ f"āāāāāāāāāāāāāāāāāāāāāāāāāāāā"
+ )
+
+ text = (
+ f"ā” *{escape_markdown_v2(_display_name(routine_name).upper())}*\n"
+ f"āāāāāāāāāāāāāāāāāāāāā\n"
+ f"_{escape_markdown_v2(routine.description)}_\n"
+ f"{type_str}\n\n"
+ f"Status: {escape_markdown_v2(status)}\n\n"
+ f"āā Config āāāāāāāāāāāāāāāāā\n"
+ f"```\n{chr(10).join(config_lines)}\n```\n"
+ f"āā _āļø send key\\=value to edit_"
+ f"{result_section}"
+ )
+
+ if routine.is_interval:
+ keyboard = [
+ [InlineKeyboardButton("ā¶ļø Start", callback_data=f"routines:start:{routine_name}")],
+ ]
+ else:
+ keyboard = [
+ [
+ InlineKeyboardButton("ā¶ļø Run", callback_data=f"routines:run:{routine_name}"),
+ InlineKeyboardButton("š Background", callback_data=f"routines:bg:{routine_name}"),
+ ],
+ ]
+
+ if instances:
+ keyboard.append([InlineKeyboardButton(f"ā¹ Stop All ({len(instances)})",
+ callback_data=f"routines:stopall:{routine_name}")])
+
+ keyboard.append([
+ InlineKeyboardButton("ā Help", callback_data=f"routines:help:{routine_name}"),
+ InlineKeyboardButton("Ā« Back", callback_data="routines:menu"),
+ ])
+
+ try:
+ await context.bot.edit_message_text(
+ chat_id=chat_id,
+ message_id=msg_id,
+ text=text,
+ parse_mode="MarkdownV2",
+ reply_markup=InlineKeyboardMarkup(keyboard),
+ )
+ except Exception as e:
+ if "not modified" not in str(e).lower():
+ logger.debug(f"Could not refresh: {e}")
+
+
+# =============================================================================
+# Helpers
+# =============================================================================
+
+
+async def _edit_or_send(update: Update, text: str, reply_markup: InlineKeyboardMarkup) -> None:
+ """Edit message if callback, otherwise send new."""
+ if update.callback_query:
+ try:
+ await update.callback_query.message.edit_text(
+ text, parse_mode="MarkdownV2", reply_markup=reply_markup
+ )
+ except Exception as e:
+ if "not modified" not in str(e).lower():
+ logger.warning(f"Edit failed: {e}")
+ else:
+ msg = update.message or update.callback_query.message
+ await msg.reply_text(text, parse_mode="MarkdownV2", reply_markup=reply_markup)
+
+
+async def _delete_after(message, seconds: float) -> None:
+ """Delete message after delay."""
+ await asyncio.sleep(seconds)
+ try:
+ await message.delete()
+ except Exception:
+ pass
+
+
+# =============================================================================
+# Handlers
+# =============================================================================
+
+
+@restricted
+async def routines_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
+ """Handle /routines command."""
+ clear_all_input_states(context)
+ await _show_menu(update, context)
+
+
+@restricted
+async def routines_callback_handler(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
+ """Handle callback queries."""
+ query = update.callback_query
+ chat_id = update.effective_chat.id
+ parts = query.data.split(":")
+
+ if len(parts) < 2:
+ await query.answer()
+ return
+
+ action = parts[1]
+
+ if action == "menu":
+ await query.answer()
+ context.user_data.pop("routines_state", None)
+ context.user_data.pop("routines_editing", None)
+ await _show_menu(update, context)
+
+ elif action == "reload":
+ await query.answer("Reloading...")
+ discover_routines(force_reload=True)
+ await _show_menu(update, context)
+
+ elif action == "tasks":
+ await query.answer()
+ await _show_tasks(update, context)
+
+ elif action == "select" and len(parts) >= 3:
+ await query.answer()
+ await _show_detail(update, context, parts[2])
+
+ elif action == "run" and len(parts) >= 3:
+ await _run_oneshot(update, context, parts[2], background=False)
+
+ elif action == "bg" and len(parts) >= 3:
+ await _run_oneshot(update, context, parts[2], background=True)
+
+ elif action == "start" and len(parts) >= 3:
+ await _start_interval(update, context, parts[2])
+
+ elif action == "stop" and len(parts) >= 3:
+ job_name = ":".join(parts[2:])
+ if _stop_instance(context, chat_id, job_name):
+ await query.answer("ā¹ Stopped")
+ else:
+ await query.answer("Not found")
+ await _show_tasks(update, context)
+
+ elif action == "stopall" and len(parts) >= 3:
+ count = _stop_all(context, chat_id, parts[2])
+ await query.answer(f"ā¹ Stopped {count}")
+ await _show_detail(update, context, parts[2])
+
+ elif action == "stopall":
+ count = _stop_all(context, chat_id)
+ await query.answer(f"ā¹ Stopped {count}")
+ await _show_tasks(update, context)
+
+ elif action == "help" and len(parts) >= 3:
+ await query.answer()
+ await _show_help(update, context, parts[2])
+
+ else:
+ await query.answer()
+
+
+async def routines_message_handler(update: Update, context: ContextTypes.DEFAULT_TYPE) -> bool:
+ """Handle text input for config editing."""
+ if context.user_data.get("routines_state") == "editing":
+ await _process_config(update, context, update.message.text.strip())
+ return True
+ return False
+
+
+__all__ = ["routines_command", "routines_callback_handler", "routines_message_handler"]
diff --git a/main.py b/main.py
index 605f69b..079aac2 100644
--- a/main.py
+++ b/main.py
@@ -19,6 +19,7 @@
from handlers.cex import trade_command, cex_callback_handler
from handlers.dex import swap_command, lp_command, dex_callback_handler
from handlers.config import config_command, get_config_callback_handler, get_modify_value_handler
+from handlers.routines import routines_command, routines_callback_handler
from handlers import clear_all_input_states
from utils.auth import restricted
from utils.config import TELEGRAM_TOKEN
@@ -392,6 +393,8 @@ def reload_handlers():
'handlers.config.api_keys',
'handlers.config.gateway',
'handlers.config.user_preferences',
+ 'handlers.routines',
+ 'routines.base',
'utils.auth',
'utils.telegram_formatters',
]
@@ -410,6 +413,7 @@ def register_handlers(application: Application) -> None:
from handlers.cex import trade_command, cex_callback_handler
from handlers.dex import swap_command, lp_command, dex_callback_handler
from handlers.config import config_command, get_config_callback_handler, get_modify_value_handler
+ from handlers.routines import routines_command, routines_callback_handler
# Clear existing handlers
application.handlers.clear()
@@ -422,6 +426,7 @@ def register_handlers(application: Application) -> None:
application.add_handler(CommandHandler("trade", trade_command))
application.add_handler(CommandHandler("lp", lp_command))
application.add_handler(CommandHandler("config", config_command))
+ application.add_handler(CommandHandler("routines", routines_command))
# Add callback query handler for start menu navigation
application.add_handler(CallbackQueryHandler(start_callback_handler, pattern="^(start:|help:)"))
@@ -430,6 +435,7 @@ def register_handlers(application: Application) -> None:
application.add_handler(CallbackQueryHandler(cex_callback_handler, pattern="^cex:"))
application.add_handler(CallbackQueryHandler(dex_callback_handler, pattern="^dex:"))
application.add_handler(CallbackQueryHandler(bots_callback_handler, pattern="^bots:"))
+ application.add_handler(CallbackQueryHandler(routines_callback_handler, pattern="^routines:"))
# Add callback query handler for portfolio settings
application.add_handler(get_portfolio_callback_handler())
@@ -456,6 +462,7 @@ async def post_init(application: Application) -> None:
BotCommand("trade", "Order book trading (CEX/CLOB) with limit orders"),
BotCommand("lp", "Liquidity pool management and explorer"),
BotCommand("config", "Configure API servers and credentials"),
+ BotCommand("routines", "Run configurable Python scripts"),
]
await application.bot.set_my_commands(commands)
@@ -471,10 +478,11 @@ async def watch_and_reload(application: Application) -> None:
logger.warning("watchfiles not installed. Auto-reload disabled. Install with: pip install watchfiles")
return
- watch_path = Path(__file__).parent / "handlers"
- logger.info(f"š Watching for changes in: {watch_path}")
+ handlers_path = Path(__file__).parent / "handlers"
+ routines_path = Path(__file__).parent / "routines"
+ logger.info(f"š Watching for changes in: {handlers_path}, {routines_path}")
- async for changes in awatch(watch_path):
+ async for changes in awatch(handlers_path, routines_path):
logger.info(f"š Detected changes: {changes}")
try:
reload_handlers()
@@ -487,11 +495,11 @@ def get_persistence() -> PicklePersistence:
"""
Build a persistence object that works both locally and in Docker.
- Uses an env var override if provided.
- - Defaults to /data/condor_bot_data.pickle.
+ - Defaults to /condor_bot_data.pickle.
- Ensures the parent directory exists, but does NOT create the file.
"""
base_dir = Path(__file__).parent
- default_path = base_dir / "data" / "condor_bot_data.pickle"
+ default_path = base_dir / "condor_bot_data.pickle"
persistence_path = Path(os.getenv("CONDOR_PERSISTENCE_FILE", default_path))
diff --git a/routines/__init__.py b/routines/__init__.py
new file mode 100644
index 0000000..d8b41ca
--- /dev/null
+++ b/routines/__init__.py
@@ -0,0 +1,7 @@
+"""
+Routines - Auto-discoverable Python scripts with Pydantic configuration.
+
+Each routine is a Python file with:
+- Config: Pydantic BaseModel (docstring = menu description)
+- run(config, context) -> str: Async function
+"""
diff --git a/routines/arb_check.py b/routines/arb_check.py
new file mode 100644
index 0000000..6405ea1
--- /dev/null
+++ b/routines/arb_check.py
@@ -0,0 +1,144 @@
+"""Check for CEX/DEX arbitrage opportunities."""
+
+from decimal import Decimal
+from pydantic import BaseModel, Field
+from telegram.ext import ContextTypes
+
+from servers import get_client
+
+
+class Config(BaseModel):
+ """Check CEX vs DEX price arbitrage opportunities."""
+
+ trading_pair: str = Field(default="SOL-USDC", description="Trading pair (e.g. SOL-USDC)")
+ amount: float = Field(default=1.0, description="Amount to quote")
+ cex_connector: str = Field(default="binance", description="CEX connector")
+ dex_connector: str = Field(default="jupiter", description="DEX connector")
+ dex_network: str = Field(default="solana-mainnet-beta", description="DEX network")
+
+
+async def run(config: Config, context: ContextTypes.DEFAULT_TYPE) -> str:
+ """Check arbitrage between CEX and DEX."""
+ chat_id = context._chat_id if hasattr(context, '_chat_id') else None
+ client = await get_client(chat_id)
+
+ if not client:
+ return "No server available. Configure servers in /config."
+
+ results = []
+ cex_buy, cex_sell = None, None
+ dex_buy, dex_sell = None, None
+
+ # --- CEX Quotes ---
+ try:
+ # Get CEX price for volume (buy and sell)
+ async def get_cex_quote(is_buy: bool):
+ try:
+ result = await client.market_data.get_price_for_volume(
+ connector_name=config.cex_connector,
+ trading_pair=config.trading_pair,
+ volume=config.amount,
+ is_buy=is_buy
+ )
+ if isinstance(result, dict):
+ return (
+ result.get("result_price") or
+ result.get("price") or
+ result.get("average_price")
+ )
+ return None
+ except Exception as e:
+ return None
+
+ import asyncio
+ cex_buy, cex_sell = await asyncio.gather(
+ get_cex_quote(True),
+ get_cex_quote(False)
+ )
+
+ if cex_buy:
+ results.append(f"CEX BUY: {float(cex_buy):.6f}")
+ if cex_sell:
+ results.append(f"CEX SELL: {float(cex_sell):.6f}")
+
+ if not cex_buy and not cex_sell:
+ results.append(f"CEX: No quotes from {config.cex_connector}")
+
+ except Exception as e:
+ results.append(f"CEX Error: {str(e)}")
+
+ # --- DEX Quotes ---
+ try:
+ if hasattr(client, 'gateway_swap'):
+ async def get_dex_quote(side: str):
+ try:
+ result = await client.gateway_swap.get_swap_quote(
+ connector=config.dex_connector,
+ network=config.dex_network,
+ trading_pair=config.trading_pair,
+ side=side,
+ amount=Decimal(str(config.amount)),
+ slippage_pct=Decimal("1.0")
+ )
+ if isinstance(result, dict):
+ return result.get("price")
+ return None
+ except Exception:
+ return None
+
+ dex_buy, dex_sell = await asyncio.gather(
+ get_dex_quote("BUY"),
+ get_dex_quote("SELL")
+ )
+
+ if dex_buy:
+ results.append(f"DEX BUY: {float(dex_buy):.6f}")
+ if dex_sell:
+ results.append(f"DEX SELL: {float(dex_sell):.6f}")
+
+ if not dex_buy and not dex_sell:
+ results.append(f"DEX: No quotes from {config.dex_connector}")
+ else:
+ results.append("DEX: Gateway not available")
+
+ except Exception as e:
+ results.append(f"DEX Error: {str(e)}")
+
+ # --- Arbitrage Analysis ---
+ results.append("")
+ results.append("--- Arbitrage ---")
+
+ opportunities = []
+
+ # Strategy 1: Buy CEX, Sell DEX
+ if cex_buy and dex_sell:
+ cex_buy_f = float(cex_buy)
+ dex_sell_f = float(dex_sell)
+ # For a BUY on CEX: price is what we pay per unit
+ # For a SELL on DEX: price is what we receive per unit
+ spread_pct = ((dex_sell_f - cex_buy_f) / cex_buy_f) * 100
+ profit = (dex_sell_f - cex_buy_f) * config.amount
+ if spread_pct > 0:
+ opportunities.append(f"BUY CEX -> SELL DEX: +{spread_pct:.2f}% (${profit:.2f})")
+ else:
+ results.append(f"BUY CEX -> SELL DEX: {spread_pct:.2f}%")
+
+ # Strategy 2: Buy DEX, Sell CEX
+ if dex_buy and cex_sell:
+ dex_buy_f = float(dex_buy)
+ cex_sell_f = float(cex_sell)
+ spread_pct = ((cex_sell_f - dex_buy_f) / dex_buy_f) * 100
+ profit = (cex_sell_f - dex_buy_f) * config.amount
+ if spread_pct > 0:
+ opportunities.append(f"BUY DEX -> SELL CEX: +{spread_pct:.2f}% (${profit:.2f})")
+ else:
+ results.append(f"BUY DEX -> SELL CEX: {spread_pct:.2f}%")
+
+ if opportunities:
+ results.append("")
+ results.append("OPPORTUNITIES FOUND:")
+ results.extend(opportunities)
+ else:
+ results.append("No profitable arbitrage found.")
+
+ return "\n".join(results)
diff --git a/routines/base.py b/routines/base.py
new file mode 100644
index 0000000..99c6d60
--- /dev/null
+++ b/routines/base.py
@@ -0,0 +1,125 @@
+"""
+Base classes and discovery for routines.
+
+Routine Types:
+- Interval: Has `interval_sec` field in Config ā runs repeatedly at interval
+- One-shot: No `interval_sec` field ā runs once and returns result
+"""
+
+import importlib
+import logging
+from pathlib import Path
+from typing import Any, Callable, Awaitable
+
+from pydantic import BaseModel
+
+logger = logging.getLogger(__name__)
+
+_routines_cache: dict[str, "RoutineInfo"] | None = None
+
+
+class RoutineInfo:
+ """Metadata container for a discovered routine."""
+
+ def __init__(
+ self,
+ name: str,
+ config_class: type[BaseModel],
+ run_fn: Callable[[BaseModel, Any], Awaitable[str]],
+ ):
+ self.name = name
+ self.config_class = config_class
+ self.run_fn = run_fn
+
+ # Extract description from Config docstring
+ doc = config_class.__doc__ or name
+ self.description = doc.strip().split("\n")[0]
+
+ @property
+ def is_interval(self) -> bool:
+ """Check if this is an interval routine (has interval_sec field)."""
+ return "interval_sec" in self.config_class.model_fields
+
+ @property
+ def default_interval(self) -> int:
+ """Get default interval in seconds (only for interval routines)."""
+ if not self.is_interval:
+ return 0
+ field = self.config_class.model_fields["interval_sec"]
+ return field.default if field.default is not None else 5
+
+ def get_default_config(self) -> BaseModel:
+ """Create config instance with default values."""
+ return self.config_class()
+
+ def get_fields(self) -> dict[str, dict]:
+ """Get field metadata for UI display."""
+ fields = {}
+ for name, field_info in self.config_class.model_fields.items():
+ annotation = field_info.annotation
+ type_name = getattr(annotation, "__name__", str(annotation))
+ fields[name] = {
+ "type": type_name,
+ "default": field_info.default,
+ "description": field_info.description or name,
+ }
+ return fields
+
+
+def discover_routines(force_reload: bool = False) -> dict[str, RoutineInfo]:
+ """
+ Discover all routines in the routines folder.
+
+ Each routine module needs:
+ - Config: Pydantic BaseModel with optional docstring description
+ - run(config, context) -> str: Async function that executes the routine
+
+ Args:
+ force_reload: Force reimport of all modules
+
+ Returns:
+ Dict mapping routine name to RoutineInfo
+ """
+ global _routines_cache
+
+ if _routines_cache is not None and not force_reload:
+ return _routines_cache
+
+ routines_dir = Path(__file__).parent
+ routines = {}
+
+ for file_path in routines_dir.glob("*.py"):
+ if file_path.stem in ("__init__", "base"):
+ continue
+
+ try:
+ module_name = f"routines.{file_path.stem}"
+
+ if force_reload and module_name in importlib.sys.modules:
+ importlib.reload(importlib.sys.modules[module_name])
+ else:
+ importlib.import_module(module_name)
+
+ module = importlib.sys.modules[module_name]
+
+ if not hasattr(module, "Config") or not hasattr(module, "run"):
+ logger.warning(f"Routine {file_path.stem}: missing Config or run")
+ continue
+
+ routines[file_path.stem] = RoutineInfo(
+ name=file_path.stem,
+ config_class=module.Config,
+ run_fn=module.run,
+ )
+ logger.debug(f"Discovered routine: {file_path.stem}")
+
+ except Exception as e:
+ logger.error(f"Failed to load routine {file_path.stem}: {e}")
+
+ _routines_cache = routines
+ return routines
+
+
+def get_routine(name: str) -> RoutineInfo | None:
+ """Get a specific routine by name."""
+ return discover_routines().get(name)
diff --git a/routines/hello_world.py b/routines/hello_world.py
new file mode 100644
index 0000000..2c52e69
--- /dev/null
+++ b/routines/hello_world.py
@@ -0,0 +1,22 @@
+"""Example routine - Hello World."""
+
+from pydantic import BaseModel, Field
+from telegram.ext import ContextTypes
+
+
+class Config(BaseModel):
+ """Simple hello world example routine."""
+
+ name: str = Field(default="World", description="Name to greet")
+ repeat: int = Field(default=1, description="Number of times to repeat")
+ uppercase: bool = Field(default=False, description="Use uppercase")
+
+
+async def run(config: Config, context: ContextTypes.DEFAULT_TYPE) -> str:
+ """Execute the routine."""
+ greeting = f"Hello, {config.name}!"
+
+ if config.uppercase:
+ greeting = greeting.upper()
+
+ return "\n".join([greeting] * config.repeat)
diff --git a/routines/price_monitor.py b/routines/price_monitor.py
new file mode 100644
index 0000000..4974bfd
--- /dev/null
+++ b/routines/price_monitor.py
@@ -0,0 +1,127 @@
+"""Monitor price and alert on threshold."""
+
+import logging
+import time
+from pydantic import BaseModel, Field
+from telegram.ext import ContextTypes
+
+from servers import get_client
+from utils.telegram_formatters import escape_markdown_v2
+
+logger = logging.getLogger(__name__)
+
+
+class Config(BaseModel):
+ """Live price monitor with configurable alerts."""
+
+ connector: str = Field(default="binance", description="CEX connector name")
+ trading_pair: str = Field(default="BTC-USDT", description="Trading pair to monitor")
+ threshold_pct: float = Field(default=1.0, description="Alert threshold in %")
+ interval_sec: int = Field(default=10, description="Refresh interval in seconds")
+
+
+async def run(config: Config, context: ContextTypes.DEFAULT_TYPE) -> str:
+ """
+ Monitor price - single iteration.
+
+ Runs silently in background. Sends alert messages when threshold is crossed.
+ Returns status string for the routine handler to display.
+ """
+ chat_id = context._chat_id if hasattr(context, '_chat_id') else None
+ client = await get_client(chat_id)
+
+ if not client:
+ return "No server available"
+
+ # Get user_data and instance_id
+ user_data = getattr(context, '_user_data', None) or getattr(context, 'user_data', {})
+ instance_id = getattr(context, '_instance_id', 'default')
+
+ # State key for this routine instance
+ state_key = f"price_monitor_state_{chat_id}_{instance_id}"
+
+ # Get or initialize state
+ state = user_data.get(state_key, {})
+
+ # Get current price
+ try:
+ prices = await client.market_data.get_prices(
+ connector_name=config.connector,
+ trading_pairs=config.trading_pair
+ )
+ current_price = prices["prices"].get(config.trading_pair)
+ if not current_price:
+ return f"No price for {config.trading_pair}"
+ except Exception as e:
+ return f"Error: {e}"
+
+ # Initialize state on first run
+ if not state:
+ state = {
+ "initial_price": current_price,
+ "last_price": current_price,
+ "high_price": current_price,
+ "low_price": current_price,
+ "alerts_sent": 0,
+ "updates": 0,
+ "start_time": time.time(),
+ }
+ user_data[state_key] = state
+
+ # Send start notification
+ try:
+ pair_esc = escape_markdown_v2(config.trading_pair)
+ price_esc = escape_markdown_v2(f"${current_price:,.2f}")
+ await context.bot.send_message(
+ chat_id=chat_id,
+ text=f"š¢ *Price Monitor Started*\n{pair_esc}: `{price_esc}`",
+ parse_mode="MarkdownV2"
+ )
+ except Exception:
+ pass
+
+ # Update tracking
+ state["high_price"] = max(state["high_price"], current_price)
+ state["low_price"] = min(state["low_price"], current_price)
+
+ # Calculate changes
+ change_from_last = ((current_price - state["last_price"]) / state["last_price"]) * 100
+ change_from_start = ((current_price - state["initial_price"]) / state["initial_price"]) * 100
+
+ # Check threshold for alert
+ if abs(change_from_last) >= config.threshold_pct:
+ direction = "š" if change_from_last > 0 else "š"
+ pair_esc = escape_markdown_v2(config.trading_pair)
+ price_esc = escape_markdown_v2(f"${current_price:,.2f}")
+ change_esc = escape_markdown_v2(f"{change_from_last:+.2f}%")
+
+ try:
+ await context.bot.send_message(
+ chat_id=chat_id,
+ text=(
+ f"{direction} *{pair_esc} Alert*\n"
+ f"Price: `{price_esc}`\n"
+ f"Change: `{change_esc}`"
+ ),
+ parse_mode="MarkdownV2"
+ )
+ state["alerts_sent"] += 1
+ except Exception:
+ pass
+
+ # Update state
+ state["last_price"] = current_price
+ state["updates"] += 1
+ user_data[state_key] = state
+
+ # Build status string for handler display
+ elapsed = int(time.time() - state["start_time"])
+ mins, secs = divmod(elapsed, 60)
+
+ trend = "š" if change_from_start > 0.01 else "š" if change_from_start < -0.01 else "ā”ļø"
+
+ return (
+ f"{trend} ${current_price:,.2f} ({change_from_start:+.2f}%)\n"
+ f"High: ${state['high_price']:,.2f} | Low: ${state['low_price']:,.2f}\n"
+ f"Updates: {state['updates']} | Alerts: {state['alerts_sent']} | {mins}m {secs}s"
+ )
diff --git a/servers.yml b/servers.yml
index c0785b2..bacc35c 100644
--- a/servers.yml
+++ b/servers.yml
@@ -1,7 +1,22 @@
servers:
+ remote:
+ host: 212.85.15.60
+ port: 8000
+ username: eldonero
+ password: barabit
local:
host: localhost
port: 8000
username: admin
password: admin
+ brigado_server:
+ host: 63.250.52.240
+ port: 8000
+ username: admin
+ password: admin
default_server: local
+per_chat_defaults:
+ 481175164: local
+ -1003055388363: brigado_server
+ -4615022999: brigado_server
+ 456181693: remote
diff --git a/utils/telegram_formatters.py b/utils/telegram_formatters.py
index 777d3a9..fce9d45 100644
--- a/utils/telegram_formatters.py
+++ b/utils/telegram_formatters.py
@@ -410,7 +410,12 @@ def format_active_bots(
# Format PnL and volume compactly
pnl_str = f"{pnl:+.2f}"[:8]
- vol_str = f"{volume/1000:.1f}k" if volume >= 1000 else f"{volume:.0f}"
+ if volume >= 1000000:
+ vol_str = f"{volume/1000000:.1f}M"
+ elif volume >= 1000:
+ vol_str = f"{volume/1000:.1f}k"
+ else:
+ vol_str = f"{volume:.0f}"
vol_str = vol_str[:7]
message += f"{ctrl_display:<28} {pnl_str:>8} {vol_str:>7}\n"
@@ -419,7 +424,12 @@ def format_active_bots(
if len(performance) >= 1:
message += f"{'ā'*28} {'ā'*8} {'ā'*7}\n"
pnl_total_str = f"{total_pnl:+.2f}"[:8]
- vol_total = f"{total_volume/1000:.1f}k" if total_volume >= 1000 else f"{total_volume:.0f}"
+ if total_volume >= 1000000:
+ vol_total = f"{total_volume/1000000:.1f}M"
+ elif total_volume >= 1000:
+ vol_total = f"{total_volume/1000:.1f}k"
+ else:
+ vol_total = f"{total_volume:.0f}"
vol_total = vol_total[:7]
message += f"{'TOTAL':<28} {pnl_total_str:>8} {vol_total:>7}\n"
@@ -1040,7 +1050,7 @@ def format_portfolio_overview(
# Show detected movements if any (max 5 most recent)
if detected_movements:
- message += f"_\\({len(detected_movements)} movimiento\\(s\\) detectado\\(s\\) ajustados\\)_\n"
+ message += f"_\\({len(detected_movements)} detected movement\\(s\\) adjusted\\)_\n"
message += "\n"
# ============================================