diff --git a/Readme.md b/Readme.md index a706ca1..1a9207b 100644 --- a/Readme.md +++ b/Readme.md @@ -12,6 +12,7 @@ This project tracks `estimatesmartfee` from a Bitcoin Core node and compares tho - **Historical Accuracy**: Visualizes the accuracy of estimates (within range, overpaid, or underpaid) compared to real block data. - **Mempool Diagram**: Real-time visualization of the mempool fee/weight accumulation curve. - **Block Statistics**: Direct insights into feerate percentiles for recent blocks. +- **Multi-Network Support**: Connect to multiple Bitcoin Core nodes simultaneously (mainnet, testnet, signet, regtest). Each network gets its own collector thread and per-network database. Switch between networks from the UI without restarting. #### Architecture @@ -39,7 +40,7 @@ This project tracks `estimatesmartfee` from a Bitcoin Core node and compares tho - **Node.js**: 22+ #### 1. Configuration -- **Backend**: Copy `backend/rpc_config.ini.example` to `backend/rpc_config.ini` and provide RPC credentials. +- **Backend**: Copy `backend/rpc_config.ini.example` to `backend/rpc_config.ini` and add one `[RPC.]` section per node you want to connect to. Each section needs `URL`, `RPC_USER`, and `RPC_PASSWORD`. The chain is auto-detected via `getblockchaininfo`. Estimates are stored in per-network databases (`fee_analysis.db`, `testnet3/fee_analysis.db`, etc.). #### 2. Manual Startup **Backend:** diff --git a/backend/rpc_config.ini.example b/backend/rpc_config.ini.example index 5fb44d7..6bebb2f 100644 --- a/backend/rpc_config.ini.example +++ b/backend/rpc_config.ini.example @@ -1,4 +1,34 @@ -[RPC_INFO] -URL = -RPC_USER = -RPC_PASSWORD = +# Multi-network RPC configuration. +# Each [RPC.] section defines a connection to a Bitcoin Core node. +# The app validates each section by calling getblockchaininfo on startup. +# +# Supported chains: main, testnet, testnet4, signet, regtest +# Only include sections for nodes you actually run. +# +# Cookie auth (recommended): set RPC_USER=__cookie__ and RPC_PASSWORD to the +# contents of the node's .cookie file for that chain's datadir. + +[RPC.main] +URL = http://127.0.0.1:8332 +RPC_USER = +RPC_PASSWORD = + +# [RPC.testnet] +# URL = http://127.0.0.1:18332 +# RPC_USER = +# RPC_PASSWORD = + +# [RPC.testnet4] +# URL = http://127.0.0.1:48332 +# RPC_USER = +# RPC_PASSWORD = + +# [RPC.signet] +# URL = http://127.0.0.1:38332 +# RPC_USER = +# RPC_PASSWORD = + +# [RPC.regtest] +# URL = http://127.0.0.1:18443 +# RPC_USER = +# RPC_PASSWORD = diff --git a/backend/src/app.py b/backend/src/app.py index 416a233..c0dfae4 100644 --- a/backend/src/app.py +++ b/backend/src/app.py @@ -17,66 +17,79 @@ def create_app(): app = Flask(__name__) - # NOTE: Configure x_for=1 to match your actual proxy depth. - # Without this, X-Forwarded-For spoofing can defeat IP-based limiting. app.wsgi_app = ProxyFix(app.wsgi_app, x_for=1, x_proto=1, x_host=1) CORS(app) - # --------------------------------------------------------------------------- - # Rate limiting - # --------------------------------------------------------------------------- - # Uses the real client IP (respects ProxyFix above). - # Default: 200 requests/day, 60/hour applied to every endpoint unless - # overridden with a per-route @limiter.limit() decorator below. - # --------------------------------------------------------------------------- limiter = Limiter( key_func=get_remote_address, app=app, default_limits=["10000 per day", "1000 per hour"], - # Store state in memory by default. For multi-worker/multi-process - # deployments swap this for a Redis URI: - # storage_uri="redis://localhost:6379" storage_uri="memory://", - # Return 429 JSON instead of HTML when limit is hit - headers_enabled=True, # adds X-RateLimit-* headers to responses + headers_enabled=True, ) - db_service.init_db() - collector_service.start_background_collector() + # Initialise a DB for each configured chain + for chain in rpc_service.registry.chains(): + db_service.init_db(chain=chain) + + collector_service.start_background_collectors() + + # --------------------------------------------------------------------------- + # Helper — resolve ?chain= query param (defaults to first registered chain) + # --------------------------------------------------------------------------- + def _resolve_chain(): + chain = request.args.get("chain") + if chain and chain not in rpc_service.registry: + return None, (jsonify({"error": f"Unknown chain '{chain}'. Available: {rpc_service.registry.chains()}"}), 400) + return chain, None # --------------------------------------------------------------------------- # Routes # --------------------------------------------------------------------------- + @app.route("/networks", methods=['GET']) + @limiter.limit("100 per minute") + def networks(): + return jsonify(rpc_service.get_available_chains()) + @app.route("/fees///", methods=['GET']) - @limiter.limit("50 per minute") # estimatesmartfee is a node RPC call — keep it tight + @limiter.limit("50 per minute") def fees(target, mode, level): VALID_MODES = {"economical", "conservative", "unset"} if mode not in VALID_MODES: return jsonify({"error": f"Invalid mode '{mode}'. Must be one of: {', '.join(VALID_MODES)}"}), 400 + chain, err = _resolve_chain() + if err: + return err try: - result = rpc_service.estimate_smart_fee(conf_target=target, mode=mode, verbosity_level=level) + result = rpc_service.estimate_smart_fee(conf_target=target, mode=mode, verbosity_level=level, chain=chain) return jsonify(result) except Exception as e: logger.error(f"/fees RPC failed: {e}", exc_info=True) return jsonify({"error": "Internal server error"}), 500 @app.route("/mempool-diagram", methods=['GET']) - @limiter.limit("50 per minute") # expensive computation — strict cap + @limiter.limit("50 per minute") def mempool_diagram(): + chain, err = _resolve_chain() + if err: + return err try: - result = rpc_service.get_mempool_feerate_diagram_analysis() + result = rpc_service.get_mempool_feerate_diagram_analysis(chain=chain) return jsonify(result) except Exception as e: logger.error(f"Mempool diagram RPC failed: {e}", exc_info=True) return jsonify({"error": "Internal server error"}), 500 @app.route("/performance-data//", methods=['GET']) - @limiter.limit("50 per minute") # hits DB + RPC + @limiter.limit("50 per minute") def get_performance_data(start_block): target = request.args.get('target', default=2, type=int) + chain, err = _resolve_chain() + if err: + return err try: - data = rpc_service.get_performance_data(start_height=start_block, count=100, target=target) + data = rpc_service.get_performance_data(start_height=start_block, count=100, target=target, chain=chain) return jsonify(data) except Exception as e: logger.error(f"/performance-data RPC failed: {e}", exc_info=True) @@ -86,19 +99,29 @@ def get_performance_data(start_block): @limiter.limit("50 per minute") def get_local_fees_sum(start_block): target = request.args.get('target', default=2, type=int) + chain, err = _resolve_chain() + if err: + return err try: - data = rpc_service.calculate_local_summary(target=target) + data = rpc_service.calculate_local_summary(target=target, chain=chain) return jsonify(data) except Exception as e: logger.error(f"/fees-sum failed: {e}", exc_info=True) return jsonify({"error": "Internal server error"}), 500 @app.route("/blockcount", methods=['GET']) - @limiter.limit("100 per minute") # cheap call, slightly more relaxed + @limiter.limit("100 per minute") def block_count(): + chain, err = _resolve_chain() + if err: + return err try: - result = rpc_service.get_block_count() - return jsonify({"blockcount": result}) + info = rpc_service.get_blockchain_info(chain=chain) + return jsonify({ + "blockcount": info["blockcount"], + "chain": info["chain"], + "chain_display": info["chain_display"], + }) except Exception as e: logger.error(f"/blockcount RPC failed: {e}", exc_info=True) return jsonify({"error": "Internal server error"}), 500 @@ -113,7 +136,6 @@ def page_not_found(error): @app.errorhandler(429) def rate_limit_exceeded(error): - # error.description is the limit string e.g. "30 per 1 minute" logger.warning(f"Rate limit exceeded from {get_remote_address()}: {error.description}") return jsonify({ "error": "Too many requests", diff --git a/backend/src/services/collector_service.py b/backend/src/services/collector_service.py index 4f4cd36..b0e1368 100644 --- a/backend/src/services/collector_service.py +++ b/backend/src/services/collector_service.py @@ -5,44 +5,53 @@ import services.database_service as db_service logger = logging.getLogger("collector") -_collector_started = False +_collectors_started = False -def run_collector(): - logger.info("Starting high-resolution fee estimate collector (7s interval)...") - # 1 and 2 are the same, so we only poll 2 + +def _run_collector_for_chain(chain: str): + """Polling loop for a single chain. Runs forever in a daemon thread.""" + client = rpc_service.get_client(chain) + display = rpc_service.CHAIN_DISPLAY_NAMES.get(chain, chain.upper()) + logger.info(f"[Collector:{display}] Starting (7s interval)...") targets = [2, 7, 144] while True: start_time = time.time() try: - current_height = rpc_service.get_block_count() - + current_height = client.get_block_count() for t in targets: try: - res = rpc_service.estimate_smart_fee(t, "unset", 1) + res = client.estimate_smart_fee(t, "unset", 1) if "feerate_sat_per_vb" in res: rate = res["feerate_sat_per_vb"] - db_service.save_estimate(current_height, t, rate) - # Log as collected for the target - logger.info(f"[Collector] SAVED: target={t} height={current_height} rate={rate:.2f} sat/vB") + db_service.save_estimate(current_height, t, rate, chain=chain) + logger.info( + f"[Collector:{display}] SAVED: target={t} height={current_height} rate={rate:.2f} sat/vB" + ) except Exception as e: - logger.error(f"[Collector] Failed to collect for target {t}: {e}") - + logger.error(f"[Collector:{display}] Failed for target {t}: {e}") except Exception as e: - logger.error(f"[Collector] Loop error: {e}") + logger.error(f"[Collector:{display}] Loop error: {e}") elapsed = time.time() - start_time - # Interval between request should be 7 seconds. - # (https://bitcoin.stackexchange.com/questions/125776/how-long-does-it-take-for-a-transaction-to-propagate-through-the-network) sleep_time = max(0, 7 - elapsed) time.sleep(sleep_time) -def start_background_collector(): - global _collector_started - if _collector_started: - logger.warning("Collector already running, skipping.") + +def start_background_collectors(): + """Spawn one collector thread per registered chain.""" + global _collectors_started + if _collectors_started: + logger.warning("Collectors already running, skipping.") return - _collector_started = True - thread = threading.Thread(target=run_collector, daemon=True) - thread.start() - return thread + _collectors_started = True + + chains = rpc_service.registry.chains() + for chain in chains: + thread = threading.Thread(target=_run_collector_for_chain, args=(chain,), daemon=True) + thread.start() + logger.info(f"Collector thread started for {chain}") + + +# Keep old name as alias for backward compat (tests, existing callers) +start_background_collector = start_background_collectors diff --git a/backend/src/services/database_service.py b/backend/src/services/database_service.py index 8fd94d6..98028ee 100644 --- a/backend/src/services/database_service.py +++ b/backend/src/services/database_service.py @@ -4,16 +4,37 @@ logger = logging.getLogger(__name__) -DB_PATH = os.environ.get( - "DB_PATH", - os.path.join(os.path.dirname(os.path.abspath(__file__)), "fee_analysis.db") +_BASE_DB_DIR = os.environ.get( + "DB_DIR", + os.path.dirname(os.path.abspath(__file__)) ) +DB_FILENAME = "fee_analysis.db" + MAX_RANGE_BLOCKS = 10_000 # safety cap on get_estimates_in_range -def init_db(): +# Bitcoin Core–style subdirectories per network +CHAIN_DIR_MAP = { + "main": "", + "test": "testnet3", + "testnet4": "testnet4", + "signet": "signet", + "regtest": "regtest", +} + + +def get_db_path(chain: str = "main") -> str: + """Return the DB file path for the given chain, creating parent dirs if needed.""" + subdir = CHAIN_DIR_MAP.get(chain, chain) + directory = os.path.join(_BASE_DB_DIR, subdir) if subdir else _BASE_DB_DIR + os.makedirs(directory, exist_ok=True) + return os.path.join(directory, DB_FILENAME) + + +def init_db(chain: str = "main"): + db_path = get_db_path(chain) try: - with sqlite3.connect(DB_PATH) as conn: + with sqlite3.connect(db_path) as conn: cursor = conn.cursor() cursor.execute(''' CREATE TABLE IF NOT EXISTS fee_estimates ( @@ -27,44 +48,44 @@ def init_db(): ''') cursor.execute('CREATE INDEX IF NOT EXISTS idx_poll_height ON fee_estimates(poll_height)') cursor.execute('CREATE INDEX IF NOT EXISTS idx_target ON fee_estimates(target)') - # Composite index for the most common query pattern (poll_height + target together) cursor.execute(''' CREATE INDEX IF NOT EXISTS idx_poll_height_target ON fee_estimates(poll_height, target) ''') conn.commit() - logger.info(f"Database initialised at {DB_PATH}") + logger.info(f"Database initialised at {db_path}") except sqlite3.Error as e: logger.error(f"Failed to initialise database: {e}", exc_info=True) raise -def save_estimate(poll_height, target, feerate): +def save_estimate(poll_height, target, feerate, chain="main"): expected_height = poll_height + target + db_path = get_db_path(chain) try: - with sqlite3.connect(DB_PATH) as conn: + with sqlite3.connect(db_path) as conn: cursor = conn.cursor() cursor.execute(''' INSERT INTO fee_estimates (poll_height, target, estimate_feerate, expected_height) VALUES (?, ?, ?, ?) ''', (poll_height, target, feerate, expected_height)) conn.commit() - logger.debug(f"Saved estimate: poll_height={poll_height}, target={target}, feerate={feerate}") + logger.debug(f"Saved estimate: poll_height={poll_height}, target={target}, feerate={feerate}, chain={chain}") except sqlite3.Error as e: logger.error(f"Failed to save estimate (poll_height={poll_height}, target={target}): {e}", exc_info=True) raise -def get_estimates_in_range(start_height, end_height, target=2): - # Enforce a max block range to prevent runaway queries +def get_estimates_in_range(start_height, end_height, target=2, chain="main"): if end_height - start_height > MAX_RANGE_BLOCKS: logger.warning( f"Requested range [{start_height}, {end_height}] exceeds MAX_RANGE_BLOCKS={MAX_RANGE_BLOCKS}. Clamping." ) end_height = start_height + MAX_RANGE_BLOCKS + db_path = get_db_path(chain) try: - with sqlite3.connect(DB_PATH) as conn: + with sqlite3.connect(db_path) as conn: conn.row_factory = sqlite3.Row cursor = conn.cursor() cursor.execute(''' @@ -84,9 +105,10 @@ def get_estimates_in_range(start_height, end_height, target=2): raise -def get_db_height_range(target=2): +def get_db_height_range(target=2, chain="main"): + db_path = get_db_path(chain) try: - with sqlite3.connect(DB_PATH) as conn: + with sqlite3.connect(db_path) as conn: cursor = conn.cursor() cursor.execute( 'SELECT MIN(poll_height), MAX(poll_height) FROM fee_estimates WHERE target = ?', @@ -97,7 +119,6 @@ def get_db_height_range(target=2): if row and row[0] is None: logger.debug(f"No data in DB for target={target}") - # Return raw tuple — preserves existing caller contract return row except sqlite3.Error as e: logger.error(f"Failed to get DB height range: {e}", exc_info=True) diff --git a/backend/src/services/rpc_service.py b/backend/src/services/rpc_service.py index e87819a..18307e7 100644 --- a/backend/src/services/rpc_service.py +++ b/backend/src/services/rpc_service.py @@ -10,258 +10,410 @@ logger = logging.getLogger("rpc_service") -# --------------------------------------------------------------------------- -# Config — walk up from this file's directory until rpc_config.ini is found, -# or use the RPC_CONFIG_PATH env var to set it explicitly. -# --------------------------------------------------------------------------- -def _find_config(filename: str = "rpc_config.ini") -> Optional[str]: - if env_path := os.environ.get("RPC_CONFIG_PATH"): - return env_path - directory = os.path.dirname(os.path.abspath(__file__)) - # Walk up a maximum of 5 levels to find the config file - for _ in range(5): - candidate = os.path.join(directory, filename) - if os.path.isfile(candidate): - return candidate - directory = os.path.dirname(directory) - return None +CHAIN_DISPLAY_NAMES = { + "main": "MAINNET", "test": "TESTNET", "testnet4": "TESTNET4", + "signet": "SIGNET", "regtest": "REGTEST", +} -_CONFIG_PATH = _find_config() -if _CONFIG_PATH: - logger.debug(f"Loading RPC config from: {_CONFIG_PATH}") -else: - logger.warning("rpc_config.ini not found — relying solely on environment variables.") +DEFAULT_TIMEOUT_SECONDS = 30 -_config = configparser.ConfigParser() -if _CONFIG_PATH: - _config.read(_CONFIG_PATH) +# Shared counter across all clients for JSON-RPC IDs +_rpc_id_counter = itertools.count(1) -def _get_config_val(section: str, option: str, default: Optional[str] = None) -> Optional[str]: - try: - return _config.get(section, option) - except (configparser.NoSectionError, configparser.NoOptionError): - return default + +def _clamp_target(target: int) -> int: + """Bitcoin Core treats targets <= 1 the same as 2.""" + return max(2, target) # --------------------------------------------------------------------------- -# Credentials — private, validated eagerly at import time +# RpcClient — one instance per Bitcoin Core node # --------------------------------------------------------------------------- -_URL = os.environ.get("RPC_URL") or _get_config_val("RPC_INFO", "URL") -_RPCUSER = os.environ.get("RPC_USER") or _get_config_val("RPC_INFO", "RPC_USER") -_RPCPASSWORD = os.environ.get("RPC_PASSWORD") or _get_config_val("RPC_INFO", "RPC_PASSWORD") -if not _URL: - raise EnvironmentError( - "Bitcoin RPC URL is not configured. " - "Set the RPC_URL environment variable or add URL under [RPC_INFO] in rpc_config.ini." - ) +class RpcClient: + """Stateful RPC connection to a single Bitcoin Core node.""" + + def __init__(self, url: str, user: Optional[str] = None, password: Optional[str] = None): + self._url = url + self._user = user + self._password = password + self._session = requests.Session() + self._chain: Optional[str] = None + + @property + def chain(self) -> str: + if self._chain is None: + info = self.get_blockchain_info() + self._chain = info["chain"] + return self._chain + + @property + def chain_display(self) -> str: + return CHAIN_DISPLAY_NAMES.get(self.chain, self.chain.upper()) + + def rpc_call(self, method: str, params: List[Any]) -> Any: + payload = json.dumps({ + "method": method, + "params": params, + "id": next(_rpc_id_counter), + }) + auth = (self._user, self._password) if (self._user or self._password) else None + try: + response = self._session.post( + self._url, data=payload, auth=auth, timeout=DEFAULT_TIMEOUT_SECONDS, + ) + data = response.json() + if data.get("error"): + raise RuntimeError(f"RPC Error ({method}): {data['error']}") + return data.get("result") + except RuntimeError: + raise + except Exception as e: + raise RuntimeError(f"RPC call '{method}' failed: {type(e).__name__}") from e + + # Keep _rpc_call as alias so tests can patch it + _rpc_call = rpc_call + + def get_block_count(self) -> int: + return self.rpc_call("getblockcount", []) + + def get_blockchain_info(self) -> Dict[str, Any]: + result = self.rpc_call("getblockchaininfo", []) + if not result: + return {"chain": "main", "blockcount": self.get_block_count()} + chain = result.get("chain", "main") + blocks = result.get("blocks", self.get_block_count()) + display = CHAIN_DISPLAY_NAMES.get(chain, chain.upper()) + return {"chain": chain, "chain_display": display, "blockcount": blocks} + + def estimate_smart_fee(self, conf_target: int, mode: str = "unset", verbosity_level: int = 2) -> Dict[str, Any]: + effective_target = _clamp_target(conf_target) + result = self.rpc_call("estimatesmartfee", [effective_target, mode]) + if result and "feerate" in result: + result["feerate_sat_per_vb"] = result["feerate"] * 100_000 + + if result is not None: + result["chain"] = self.chain + result["chain_display"] = self.chain_display -DEFAULT_TIMEOUT_SECONDS = 30 + try: + result["mempool_health_statistics"] = self.get_mempool_health_statistics() + except Exception as e: + logger.error(f"Failed to include health stats: {e}") -# Reuse TCP connection across all RPC calls -_session = requests.Session() + return result -# Monotonically increasing JSON-RPC request IDs -_rpc_id_counter = itertools.count(1) + def get_mempool_health_statistics(self) -> List[Dict[str, Any]]: + current_height = self.get_block_count() + stats = [] + mempool_diagram = self.rpc_call("getmempoolfeeratediagram", []) + total_mempool_weight = mempool_diagram[-1]["weight"] if mempool_diagram else 0 + for h in range(current_height - 4, current_height + 1): + try: + b = self.get_single_block_stats(h) + weight = b.get("total_weight", 0) + stats.append({ + "block_height": h, + "block_weight": weight, + "mempool_txs_weight": total_mempool_weight, + "ratio": min(1.0, total_mempool_weight / 4_000_000), + }) + except Exception: + continue + return stats + + @lru_cache(maxsize=2000) + def _get_single_block_stats_cached(self, height: int) -> str: + result = self.rpc_call("getblockstats", [ + height, ["height", "feerate_percentiles", "minfeerate", "maxfeerate", "total_weight"], + ]) + return json.dumps(result) + + def get_single_block_stats(self, height: int) -> Dict[str, Any]: + return json.loads(self._get_single_block_stats_cached(height)) + + def get_mempool_feerate_diagram_analysis(self) -> Dict[str, Any]: + raw_points = self.rpc_call("getmempoolfeeratediagram", []) + if not raw_points: + return {"raw": [], "windows": {}} + + BLOCK_WEIGHT = 4_000_000 + max_weight = raw_points[-1]["weight"] + + segments = [] + for i, p in enumerate(raw_points): + if i == 0: + fr = (p["fee"] / p["weight"]) * 400_000_000 if p["weight"] > 0 else 0 + else: + prev = raw_points[i - 1] + dw = p["weight"] - prev["weight"] + df = p["fee"] - prev["fee"] + fr = (df / dw) * 400_000_000 if dw > 0 else 0 + segments.append({"w": p["weight"], "fr": fr}) + + def _feerate_at_weight(w_target: float) -> float: + for seg in segments: + if seg["w"] >= w_target: + return seg["fr"] + return segments[-1]["fr"] if segments else 0 + + def _window_percentiles(weight_limit: int) -> Dict[str, float]: + actual_limit = min(weight_limit, max_weight) + return { + str(int(p * 100)): _feerate_at_weight(p * actual_limit) + for p in (0.05, 0.25, 0.50, 0.75, 0.95) + } + + windows = { + "1": _window_percentiles(BLOCK_WEIGHT), + "2": _window_percentiles(BLOCK_WEIGHT * 2), + "3": _window_percentiles(BLOCK_WEIGHT * 3), + "all": _window_percentiles(max_weight), + } -# --------------------------------------------------------------------------- -# Internal helpers -# --------------------------------------------------------------------------- + return { + "raw": raw_points, + "windows": windows, + "total_weight": max_weight, + "total_fee": raw_points[-1]["fee"], + } -def _clamp_target(target: int) -> int: - """Bitcoin Core treats targets ≤ 1 the same as 2.""" - return max(2, target) + def get_performance_data(self, start_height: int, count: int = 100, target: int = 2) -> Dict[str, Any]: + import services.database_service as db_service + effective_target = _clamp_target(target) + db_rows = db_service.get_estimates_in_range( + start_height, start_height + count, effective_target, chain=self.chain, + ) -def _rpc_call(method: str, params: List[Any]) -> Any: - payload = json.dumps({ - "method": method, - "params": params, - "id": next(_rpc_id_counter), - }) - auth = (_RPCUSER, _RPCPASSWORD) if (_RPCUSER or _RPCPASSWORD) else None - try: - response = _session.post(_URL, data=payload, auth=auth, timeout=DEFAULT_TIMEOUT_SECONDS) - data = response.json() - if data.get("error"): - raise RuntimeError(f"RPC Error ({method}): {data['error']}") - return data.get("result") - except RuntimeError: - raise - except Exception as e: - # Wrap transport-level errors without re-logging — callers decide log level - raise RuntimeError(f"RPC call '{method}' failed: {type(e).__name__}") from e + latest_estimates_map = {row["poll_height"]: row["estimate_feerate"] for row in db_rows} + estimates = [{"height": h, "rate": latest_estimates_map[h]} for h in sorted(latest_estimates_map)] + blocks = [] + for h in range(start_height, start_height + count): + try: + b = self.get_single_block_stats(h) + p = b.get("feerate_percentiles", [0, 0, 0, 0, 0]) + blocks.append({"height": h, "low": p[0], "high": p[4]}) + except Exception: + logger.debug(f"Skipping block stats for height {h} — RPC unavailable") + continue -# --------------------------------------------------------------------------- -# Block stats — cached, returns a copy to prevent cache corruption -# --------------------------------------------------------------------------- + return {"blocks": blocks, "estimates": estimates} -@lru_cache(maxsize=2000) -def _get_single_block_stats_cached(height: int) -> tuple: - """ - Returns a frozen (JSON-serialised) snapshot so the lru_cache holds - immutable data. Use get_single_block_stats() for normal access. - """ - result = _rpc_call("getblockstats", [height, ["height", "feerate_percentiles", "minfeerate", "maxfeerate", "total_weight"]]) - return json.dumps(result) # freeze as string + def calculate_local_summary(self, target: int = 2) -> Dict[str, Any]: + import services.database_service as db_service + effective_target = _clamp_target(target) + current_h = self.get_block_count() + db_rows = db_service.get_estimates_in_range( + current_h - 1000, current_h, effective_target, chain=self.chain, + ) -def get_single_block_stats(height: int) -> Dict[str, Any]: - """Returns a fresh dict each call — safe to mutate without corrupting the cache.""" - return json.loads(_get_single_block_stats_cached(height)) + total = over = under = within = 0 + for row in db_rows: + poll_h = row["poll_height"] + target_val = row["target"] + est = row["estimate_feerate"] + window_end = poll_h + target_val -# --------------------------------------------------------------------------- -# Public RPC wrappers -# --------------------------------------------------------------------------- + if window_end > current_h: + continue + + total += 1 + is_under = True + is_over = False + + for h in range(poll_h + 1, window_end + 1): + try: + b = self.get_single_block_stats(h) + p = b.get("feerate_percentiles", [0, 0, 0, 0, 0]) + if est >= p[0]: + is_under = False + if est > p[4]: + is_over = True + except Exception: + logger.debug(f"Skipping block {h} in summary calculation — RPC unavailable") + continue + + if is_under: + under += 1 + elif is_over: + over += 1 + else: + within += 1 -def get_block_count() -> int: - return _rpc_call("getblockcount", []) - -def estimate_smart_fee(conf_target: int, mode: str = "unset", verbosity_level: int = 2) -> Dict[str, Any]: - effective_target = _clamp_target(conf_target) - result = _rpc_call("estimatesmartfee", [effective_target, mode, verbosity_level]) - if result and "feerate" in result: - # feerate is BTC/kVB → sat/vB: × 1e8 (BTC→sat) ÷ 1e3 (kVB→vB) = × 1e5 - result["feerate_sat_per_vb"] = result["feerate"] * 100_000 - - return result - -def get_mempool_feerate_diagram_analysis() -> Dict[str, Any]: - raw_points = _rpc_call("getmempoolfeeratediagram", []) - if not raw_points: - return {"raw": [], "windows": {}} - - # Weight of a standard full block in weight units - BLOCK_WEIGHT = 4_000_000 - max_weight = raw_points[-1]["weight"] - - # Pre-calculate per-segment feerates - # Conversion: (fee_BTC / weight_WU) × 4e8 = sat/vB - # (1 vB = 4 WU; 1 BTC = 1e8 sat → factor = 1e8 / 4 = 25_000_000... but - # raw_points["fee"] is in BTC and weight in WU, so sat/vB = fee/weight × 4e8 / 4 - # = fee/weight × 1e8 — however Bitcoin Core actually returns fee in BTC and weight - # in WU where 1 vB = 4 WU, so sat/vB = (fee_BTC × 1e8) / (weight_WU / 4) - # = fee_BTC × 4e8 / weight_WU. Factor 400_000_000 is correct.) - segments = [] - for i, p in enumerate(raw_points): - if i == 0: - fr = (p["fee"] / p["weight"]) * 400_000_000 if p["weight"] > 0 else 0 - else: - prev = raw_points[i - 1] - dw = p["weight"] - prev["weight"] - df = p["fee"] - prev["fee"] - fr = (df / dw) * 400_000_000 if dw > 0 else 0 - segments.append({"w": p["weight"], "fr": fr}) - - def _feerate_at_weight(w_target: float) -> float: - for seg in segments: - if seg["w"] >= w_target: - return seg["fr"] - return segments[-1]["fr"] if segments else 0 - - def _window_percentiles(weight_limit: int) -> Dict[str, float]: - actual_limit = min(weight_limit, max_weight) return { - str(int(p * 100)): _feerate_at_weight(p * actual_limit) - for p in (0.05, 0.25, 0.50, 0.75, 0.95) + "total": total, + "within_val": within, + "within_perc": within / total if total > 0 else 0, + "overpayment_val": over, + "overpayment_perc": over / total if total > 0 else 0, + "underpayment_val": under, + "underpayment_perc": under / total if total > 0 else 0, } - windows = { - "1": _window_percentiles(BLOCK_WEIGHT), - "2": _window_percentiles(BLOCK_WEIGHT * 2), - "3": _window_percentiles(BLOCK_WEIGHT * 3), - "all": _window_percentiles(max_weight), - } - return { - "raw": raw_points, - "windows": windows, - "total_weight": max_weight, - "total_fee": raw_points[-1]["fee"], - } +# --------------------------------------------------------------------------- +# RpcRegistry — loads config, creates one RpcClient per [RPC.] section +# --------------------------------------------------------------------------- + +def _find_config(filename: str = "rpc_config.ini") -> Optional[str]: + if env_path := os.environ.get("RPC_CONFIG_PATH"): + return env_path + directory = os.path.dirname(os.path.abspath(__file__)) + for _ in range(5): + candidate = os.path.join(directory, filename) + if os.path.isfile(candidate): + return os.path.abspath(candidate) + directory = os.path.dirname(directory) + return None + + +class RpcRegistry: + """Registry of RpcClient instances keyed by chain name.""" + + def __init__(self): + self._clients: Dict[str, RpcClient] = {} + self._default_chain: Optional[str] = None + + @property + def default_chain(self) -> str: + if self._default_chain: + return self._default_chain + if self._clients: + return next(iter(self._clients)) + raise RuntimeError("No RPC clients configured") + + def add_client(self, chain: str, client: RpcClient): + self._clients[chain] = client + if self._default_chain is None: + self._default_chain = chain + + def get_client(self, chain: Optional[str] = None) -> RpcClient: + key = chain or self.default_chain + if key not in self._clients: + raise ValueError(f"No RPC client for chain '{key}'. Available: {list(self._clients.keys())}") + return self._clients[key] + + def available_chains(self) -> List[Dict[str, str]]: + result = [] + for chain, client in self._clients.items(): + display = CHAIN_DISPLAY_NAMES.get(chain, chain.upper()) + result.append({"chain": chain, "chain_display": display}) + return result + + def chains(self) -> List[str]: + return list(self._clients.keys()) + + def __contains__(self, chain: str) -> bool: + return chain in self._clients + + def __len__(self) -> int: + return len(self._clients) + + +def _build_registry() -> RpcRegistry: + """Build the registry from config file and/or environment variables.""" + registry = RpcRegistry() + config_path = _find_config() + config = configparser.ConfigParser() + if config_path: + config.read(config_path) + logger.debug(f"Loading RPC config from: {config_path}") + + # New multi-section format: [RPC.main], [RPC.test], etc. + for section in config.sections(): + if section.startswith("RPC."): + chain_hint = section.split(".", 1)[1] + url = config.get(section, "URL", fallback="").strip() + user = config.get(section, "RPC_USER", fallback="").strip() + password = config.get(section, "RPC_PASSWORD", fallback="").strip() + if not url: + logger.warning(f"Skipping [{section}]: no URL configured") + continue + client = RpcClient(url, user or None, password or None) + try: + actual_chain = client.chain + if chain_hint != actual_chain: + logger.warning( + f"[{section}] config says '{chain_hint}' but node reports '{actual_chain}'; using '{actual_chain}'" + ) + registry.add_client(actual_chain, client) + logger.info(f"Registered RPC client: {CHAIN_DISPLAY_NAMES.get(actual_chain, actual_chain)} ({url})") + except Exception as e: + logger.warning(f"Skipping [{section}] ({url}): {e}") + + if len(registry) == 0: + raise EnvironmentError( + "No RPC connections configured. " + "Add [RPC.] sections to rpc_config.ini." + ) + + return registry + + +# Module-level singleton — lazy so tests can patch _build_registry before first access +_registry: Optional[RpcRegistry] = None + + +def _get_registry() -> RpcRegistry: + global _registry + if _registry is None: + _registry = _build_registry() + return _registry + + +# Public alias for direct access (e.g. registry.chains()) +class _RegistryProxy: + """Proxy that defers registry creation until first attribute access.""" + def __getattr__(self, name): + return getattr(_get_registry(), name) + def __contains__(self, item): + return item in _get_registry() + def __len__(self): + return len(_get_registry()) + +registry = _RegistryProxy() # --------------------------------------------------------------------------- -# Performance / summary logic +# Convenience functions — delegate to default or specified client # --------------------------------------------------------------------------- -def get_performance_data(start_height: int, count: int = 100, target: int = 2) -> Dict[str, Any]: - import services.database_service as db_service # late import — breaks circular dep +def get_client(chain: Optional[str] = None) -> RpcClient: + return _get_registry().get_client(chain) - effective_target = _clamp_target(target) - db_rows = db_service.get_estimates_in_range(start_height, start_height + count, effective_target) - # Deduplicate to latest estimate per height (dict preserves insertion order in Py3.7+) - latest_estimates_map = {row["poll_height"]: row["estimate_feerate"] for row in db_rows} - estimates = [{"height": h, "rate": latest_estimates_map[h]} for h in sorted(latest_estimates_map)] +def get_current_chain() -> str: + return registry.default_chain - blocks = [] - for h in range(start_height, start_height + count): - try: - b = get_single_block_stats(h) - p = b.get("feerate_percentiles", [0, 0, 0, 0, 0]) - blocks.append({"height": h, "low": p[0], "high": p[4]}) - except Exception: - logger.debug(f"Skipping block stats for height {h} — RPC unavailable") - continue - return {"blocks": blocks, "estimates": estimates} +def get_available_chains() -> List[Dict[str, str]]: + return registry.available_chains() -def calculate_local_summary(target: int = 2) -> Dict[str, Any]: - import services.database_service as db_service # late import — breaks circular dep +def get_block_count(chain: Optional[str] = None) -> int: + return get_client(chain).get_block_count() - effective_target = _clamp_target(target) - current_h = get_block_count() - db_rows = db_service.get_estimates_in_range(current_h - 1000, current_h, effective_target) +def get_blockchain_info(chain: Optional[str] = None) -> Dict[str, Any]: + return get_client(chain).get_blockchain_info() - total = 0 - over = 0 - under = 0 - within = 0 - for row in db_rows: - poll_h = row["poll_height"] - target_val = row["target"] - est = row["estimate_feerate"] - window_end = poll_h + target_val +def estimate_smart_fee(conf_target: int, mode: str = "unset", verbosity_level: int = 2, chain: Optional[str] = None) -> Dict[str, Any]: + return get_client(chain).estimate_smart_fee(conf_target, mode, verbosity_level) - if window_end > current_h: - continue - total += 1 - is_under = True - is_over = False +def get_mempool_feerate_diagram_analysis(chain: Optional[str] = None) -> Dict[str, Any]: + return get_client(chain).get_mempool_feerate_diagram_analysis() + + +def get_performance_data(start_height: int, count: int = 100, target: int = 2, chain: Optional[str] = None) -> Dict[str, Any]: + return get_client(chain).get_performance_data(start_height, count, target) - for h in range(poll_h + 1, window_end + 1): - try: - b = get_single_block_stats(h) - p = b.get("feerate_percentiles", [0, 0, 0, 0, 0]) - if est >= p[0]: - is_under = False - if est > p[4]: - is_over = True - except Exception: - logger.debug(f"Skipping block {h} in summary calculation — RPC unavailable") - continue - if is_under: - under += 1 - elif is_over: - over += 1 - else: - within += 1 - - return { - "total": total, - "within_val": within, - "within_perc": within / total if total > 0 else 0, - "overpayment_val": over, - "overpayment_perc": over / total if total > 0 else 0, - "underpayment_val": under, - "underpayment_perc": under / total if total > 0 else 0, - } +def calculate_local_summary(target: int = 2, chain: Optional[str] = None) -> Dict[str, Any]: + return get_client(chain).calculate_local_summary(target) diff --git a/backend/tests/helpers.py b/backend/tests/helpers.py index a7028d8..8cf651f 100644 --- a/backend/tests/helpers.py +++ b/backend/tests/helpers.py @@ -1,13 +1,28 @@ import os import sys -from unittest.mock import patch +from unittest.mock import patch, MagicMock sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../src'))) +def _make_mock_registry(): + """Create a mock registry that claims to have only 'main' chain.""" + reg = MagicMock() + reg.chains.return_value = ["main"] + reg.available_chains.return_value = [{"chain": "main", "chain_display": "MAINNET"}] + reg.__contains__ = lambda self, x: x == "main" + reg.__len__ = lambda self: 1 + return reg + + def make_app(): """Create a Flask test app with all side effects patched out.""" - with patch('services.database_service.init_db', return_value=None), \ + mock_reg = _make_mock_registry() + + with patch('services.rpc_service._registry', mock_reg), \ + patch('services.rpc_service._get_registry', return_value=mock_reg), \ + patch('services.database_service.init_db', return_value=None), \ + patch('services.collector_service.start_background_collectors', return_value=None), \ patch('services.collector_service.start_background_collector', return_value=None): from app import create_app app = create_app() diff --git a/backend/tests/test_app.py b/backend/tests/test_app.py index e7a3781..7b4323c 100644 --- a/backend/tests/test_app.py +++ b/backend/tests/test_app.py @@ -1,5 +1,5 @@ import unittest -from unittest.mock import patch +from unittest.mock import patch, MagicMock from helpers import make_app @@ -8,21 +8,86 @@ class TestApp(unittest.TestCase): def setUp(self): self.client = make_app().test_client() + # --- /networks ---------------------------------------------------------- + + @patch('services.rpc_service.get_available_chains', return_value=[ + {"chain": "main", "chain_display": "MAINNET"}, + {"chain": "test", "chain_display": "TESTNET"}, + ]) + def test_networks_returns_available_chains(self, _): + r = self.client.get('/networks') + self.assertEqual(r.status_code, 200) + chains = r.json + self.assertEqual(len(chains), 2) + self.assertEqual(chains[0]['chain'], 'main') + self.assertEqual(chains[1]['chain'], 'test') + + @patch('services.rpc_service.get_available_chains', return_value=[ + {"chain": "signet", "chain_display": "SIGNET"}, + {"chain": "testnet4", "chain_display": "TESTNET4"}, + {"chain": "regtest", "chain_display": "REGTEST"}, + ]) + def test_networks_returns_all_configured_chains(self, _): + r = self.client.get('/networks') + self.assertEqual(r.status_code, 200) + chain_names = [c['chain'] for c in r.json] + self.assertEqual(chain_names, ['signet', 'testnet4', 'regtest']) + # --- /blockcount -------------------------------------------------------- - @patch('services.rpc_service.get_block_count', return_value=800000) + @patch('services.rpc_service.get_blockchain_info', return_value={ + "blockcount": 800000, "chain": "main", "chain_display": "MAINNET" + }) def test_block_count_success(self, _): r = self.client.get('/blockcount') self.assertEqual(r.status_code, 200) self.assertEqual(r.json['blockcount'], 800000) - - @patch('services.rpc_service.get_block_count', side_effect=RuntimeError("node down")) + self.assertEqual(r.json['chain'], 'main') + self.assertEqual(r.json['chain_display'], 'MAINNET') + + def test_block_count_returns_all_supported_networks(self): + cases = [ + ("main", "MAINNET"), + ("test", "TESTNET"), + ("testnet4", "TESTNET4"), + ("signet", "SIGNET"), + ("regtest", "REGTEST"), + ] + for chain, display in cases: + with self.subTest(chain=chain): + mock_reg = MagicMock() + mock_reg.__contains__ = lambda self, x: True + with patch('services.rpc_service._get_registry', return_value=mock_reg), \ + patch('services.rpc_service.get_blockchain_info', return_value={ + "blockcount": 800000, "chain": chain, "chain_display": display, + }): + r = self.client.get(f'/blockcount?chain={chain}') + self.assertEqual(r.status_code, 200) + self.assertEqual(r.json['chain'], chain) + self.assertEqual(r.json['chain_display'], display) + + @patch('services.rpc_service.get_blockchain_info', side_effect=RuntimeError("node down")) def test_block_count_error_does_not_leak(self, _): r = self.client.get('/blockcount') self.assertEqual(r.status_code, 500) self.assertNotIn('node down', r.json.get('error', '')) - # --- /fees/// -------------------------------------- + # --- ?chain= validation ------------------------------------------------- + + def test_unknown_chain_returns_400(self): + r = self.client.get('/blockcount?chain=fakenet') + self.assertEqual(r.status_code, 400) + self.assertIn('error', r.json) + + def test_unknown_chain_returns_400_on_fees(self): + r = self.client.get('/fees/2/economical/2?chain=fakenet') + self.assertEqual(r.status_code, 400) + + def test_unknown_chain_returns_400_on_mempool(self): + r = self.client.get('/mempool-diagram?chain=fakenet') + self.assertEqual(r.status_code, 400) + + # --- /fees/// with ?chain= ------------------------- @patch('services.rpc_service.estimate_smart_fee', return_value={"feerate": 0.0001, "blocks": 2}) def test_fees_success(self, _): @@ -30,6 +95,14 @@ def test_fees_success(self, _): self.assertEqual(r.status_code, 200) self.assertEqual(r.json['feerate'], 0.0001) + def test_fees_passes_chain_param(self): + mock_reg = MagicMock() + mock_reg.__contains__ = lambda self, x: True + with patch('services.rpc_service._get_registry', return_value=mock_reg), \ + patch('services.rpc_service.estimate_smart_fee', return_value={"feerate": 0.0001}) as mock: + self.client.get('/fees/2/economical/2?chain=signet') + mock.assert_called_once_with(conf_target=2, mode='economical', verbosity_level=2, chain='signet') + def test_fees_all_valid_modes_accepted(self): for mode in ('economical', 'conservative', 'unset'): with patch('services.rpc_service.estimate_smart_fee', return_value={"feerate": 0.0001}): @@ -79,7 +152,15 @@ def test_performance_data_success(self, _): def test_performance_data_passes_target_query_param(self): with patch('services.rpc_service.get_performance_data', return_value={"blocks": [], "estimates": []}) as mock: self.client.get('/performance-data/800000/?target=7') - mock.assert_called_once_with(start_height=800000, count=100, target=7) + mock.assert_called_once_with(start_height=800000, count=100, target=7, chain=None) + + def test_performance_data_passes_chain_param(self): + mock_reg = MagicMock() + mock_reg.__contains__ = lambda self, x: True + with patch('services.rpc_service._get_registry', return_value=mock_reg), \ + patch('services.rpc_service.get_performance_data', return_value={"blocks": [], "estimates": []}) as mock: + self.client.get('/performance-data/800000/?target=2&chain=regtest') + mock.assert_called_once_with(start_height=800000, count=100, target=2, chain='regtest') @patch('services.rpc_service.get_performance_data', side_effect=RuntimeError("db fail")) def test_performance_data_error_does_not_leak(self, _): @@ -105,7 +186,15 @@ def test_fees_sum_success(self, _): def test_fees_sum_passes_target_query_param(self): with patch('services.rpc_service.calculate_local_summary', return_value={"total": 0}) as mock: self.client.get('/fees-sum/800000/?target=144') - mock.assert_called_once_with(target=144) + mock.assert_called_once_with(target=144, chain=None) + + def test_fees_sum_passes_chain_param(self): + mock_reg = MagicMock() + mock_reg.__contains__ = lambda self, x: True + with patch('services.rpc_service._get_registry', return_value=mock_reg), \ + patch('services.rpc_service.calculate_local_summary', return_value={"total": 0}) as mock: + self.client.get('/fees-sum/800000/?target=2&chain=testnet4') + mock.assert_called_once_with(target=2, chain='testnet4') @patch('services.rpc_service.calculate_local_summary', side_effect=RuntimeError("summary fail")) def test_fees_sum_error_does_not_leak(self, _): diff --git a/backend/tests/test_database_service.py b/backend/tests/test_database_service.py index ffc1701..8d72ea1 100644 --- a/backend/tests/test_database_service.py +++ b/backend/tests/test_database_service.py @@ -1,4 +1,5 @@ import os +import shutil import sqlite3 import tempfile import unittest @@ -7,31 +8,32 @@ class TestDatabaseService(unittest.TestCase): def setUp(self): - """Each test gets its own isolated temporary SQLite DB.""" - self.tmp = tempfile.NamedTemporaryFile(suffix='.db', delete=False) - self.tmp.close() + """Each test gets its own isolated temporary directory as DB_DIR.""" + self.tmpdir = tempfile.mkdtemp() import services.database_service as db - self._orig_path = db.DB_PATH - db.DB_PATH = self.tmp.name + self._orig_base_dir = db._BASE_DB_DIR + db._BASE_DB_DIR = self.tmpdir self.db = db self.db.init_db() def tearDown(self): - self.db.DB_PATH = self._orig_path - os.unlink(self.tmp.name) + self.db._BASE_DB_DIR = self._orig_base_dir + shutil.rmtree(self.tmpdir, ignore_errors=True) # --- init_db ------------------------------------------------------------ def test_creates_table(self): - conn = sqlite3.connect(self.tmp.name) + db_path = self.db.get_db_path() + conn = sqlite3.connect(db_path) cursor = conn.cursor() cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='fee_estimates'") self.assertIsNotNone(cursor.fetchone()) conn.close() def test_creates_all_indexes(self): - conn = sqlite3.connect(self.tmp.name) + db_path = self.db.get_db_path() + conn = sqlite3.connect(db_path) cursor = conn.cursor() cursor.execute("SELECT name FROM sqlite_master WHERE type='index'") index_names = {row[0] for row in cursor.fetchall()} @@ -47,6 +49,62 @@ def test_is_idempotent(self): except Exception as e: self.fail(f"init_db raised on repeated call: {e}") + # --- per-network directory isolation ------------------------------------ + + def test_mainnet_db_at_root(self): + path = self.db.get_db_path("main") + self.assertEqual(path, os.path.join(self.tmpdir, "fee_analysis.db")) + + def test_testnet_db_in_testnet3(self): + self.db.init_db("test") + path = self.db.get_db_path("test") + self.assertEqual(path, os.path.join(self.tmpdir, "testnet3", "fee_analysis.db")) + self.assertTrue(os.path.isfile(path)) + + def test_signet_db_in_signet(self): + self.db.init_db("signet") + path = self.db.get_db_path("signet") + self.assertEqual(path, os.path.join(self.tmpdir, "signet", "fee_analysis.db")) + self.assertTrue(os.path.isfile(path)) + + def test_regtest_db_in_regtest(self): + self.db.init_db("regtest") + path = self.db.get_db_path("regtest") + self.assertEqual(path, os.path.join(self.tmpdir, "regtest", "fee_analysis.db")) + self.assertTrue(os.path.isfile(path)) + + def test_testnet4_db_in_testnet4(self): + self.db.init_db("testnet4") + path = self.db.get_db_path("testnet4") + self.assertEqual(path, os.path.join(self.tmpdir, "testnet4", "fee_analysis.db")) + self.assertTrue(os.path.isfile(path)) + + def test_networks_are_isolated(self): + """Data written to one network is invisible to another.""" + self.db.init_db("test") + self.db.save_estimate(800000, target=2, feerate=15.5, chain="main") + self.db.save_estimate(800000, target=2, feerate=99.0, chain="test") + + main_rows = self.db.get_estimates_in_range(800000, 800000, target=2, chain="main") + test_rows = self.db.get_estimates_in_range(800000, 800000, target=2, chain="test") + + self.assertEqual(len(main_rows), 1) + self.assertAlmostEqual(main_rows[0]['estimate_feerate'], 15.5) + self.assertEqual(len(test_rows), 1) + self.assertAlmostEqual(test_rows[0]['estimate_feerate'], 99.0) + + def test_all_networks_are_isolated(self): + """Each chain's DB is independent from every other chain.""" + chains = ["main", "test", "testnet4", "signet", "regtest"] + for c in chains: + self.db.init_db(c) + self.db.save_estimate(100, target=2, feerate=float(chains.index(c) + 1), chain=c) + + for c in chains: + rows = self.db.get_estimates_in_range(100, 100, target=2, chain=c) + self.assertEqual(len(rows), 1, msg=f"Expected 1 row for {c}") + self.assertAlmostEqual(rows[0]['estimate_feerate'], float(chains.index(c) + 1)) + # --- save_estimate / get_estimates_in_range ----------------------------- def test_save_and_retrieve(self): @@ -58,7 +116,8 @@ def test_save_and_retrieve(self): def test_expected_height_computed_correctly(self): self.db.save_estimate(poll_height=800000, target=7, feerate=10.0) - conn = sqlite3.connect(self.tmp.name) + db_path = self.db.get_db_path() + conn = sqlite3.connect(db_path) conn.row_factory = sqlite3.Row cursor = conn.cursor() cursor.execute('SELECT expected_height FROM fee_estimates WHERE poll_height=800000') diff --git a/backend/tests/test_rpc_service.py b/backend/tests/test_rpc_service.py index c38b202..234f019 100644 --- a/backend/tests/test_rpc_service.py +++ b/backend/tests/test_rpc_service.py @@ -1,51 +1,50 @@ -import importlib import json import unittest from unittest.mock import MagicMock, patch +from services.rpc_service import RpcClient, _clamp_target -class TestRpcService(unittest.TestCase): + +class TestClampTarget(unittest.TestCase): + + def test_below_2(self): + self.assertEqual(_clamp_target(1), 2) + self.assertEqual(_clamp_target(0), 2) + self.assertEqual(_clamp_target(-5), 2) + + def test_at_or_above_2(self): + self.assertEqual(_clamp_target(2), 2) + self.assertEqual(_clamp_target(7), 7) + self.assertEqual(_clamp_target(144), 144) + + +class TestRpcClient(unittest.TestCase): def setUp(self): - # Reload module each time so lru_cache and counters start fresh - import services.rpc_service as rpc - importlib.reload(rpc) - self.rpc = rpc + self.client = RpcClient("http://test:8332", "user", "pass") def _mock_post(self, result=None, error=None): mock_response = MagicMock() mock_response.json.return_value = {"result": result, "error": error, "id": 1} return MagicMock(return_value=mock_response) - # --- _clamp_target ------------------------------------------------------ - - def test_clamp_target_below_2(self): - self.assertEqual(self.rpc._clamp_target(1), 2) - self.assertEqual(self.rpc._clamp_target(0), 2) - self.assertEqual(self.rpc._clamp_target(-5), 2) - - def test_clamp_target_at_or_above_2(self): - self.assertEqual(self.rpc._clamp_target(2), 2) - self.assertEqual(self.rpc._clamp_target(7), 7) - self.assertEqual(self.rpc._clamp_target(144), 144) - - # --- _rpc_call ---------------------------------------------------------- + # --- rpc_call ----------------------------------------------------------- def test_rpc_call_success(self): - with patch.object(self.rpc._session, 'post', self._mock_post(result=42)): - self.assertEqual(self.rpc._rpc_call("getblockcount", []), 42) + with patch.object(self.client._session, 'post', self._mock_post(result=42)): + self.assertEqual(self.client.rpc_call("getblockcount", []), 42) def test_rpc_call_rpc_error_raises(self): - with patch.object(self.rpc._session, 'post', self._mock_post(error={"code": -1, "message": "bad"})): + with patch.object(self.client._session, 'post', self._mock_post(error={"code": -1, "message": "bad"})): with self.assertRaises(RuntimeError) as ctx: - self.rpc._rpc_call("getblockcount", []) + self.client.rpc_call("getblockcount", []) self.assertIn("RPC Error", str(ctx.exception)) def test_rpc_call_transport_error_does_not_leak_details(self): mock_post = MagicMock(side_effect=ConnectionError("refused")) - with patch.object(self.rpc._session, 'post', mock_post): + with patch.object(self.client._session, 'post', mock_post): with self.assertRaises(RuntimeError) as ctx: - self.rpc._rpc_call("getblockcount", []) + self.client.rpc_call("getblockcount", []) self.assertNotIn('refused', str(ctx.exception)) def test_rpc_call_uses_incrementing_ids(self): @@ -57,9 +56,9 @@ def capture(url, data, **kwargs): resp.json.return_value = {"result": 1, "error": None, "id": captured_ids[-1]} return resp - with patch.object(self.rpc._session, 'post', side_effect=capture): + with patch.object(self.client._session, 'post', side_effect=capture): for _ in range(3): - self.rpc._rpc_call("getblockcount", []) + self.client.rpc_call("getblockcount", []) self.assertEqual(len(set(captured_ids)), 3) self.assertEqual(captured_ids, sorted(captured_ids)) @@ -67,51 +66,103 @@ def capture(url, data, **kwargs): # --- estimate_smart_fee ------------------------------------------------- def test_adds_feerate_sat_per_vb(self): - with patch.object(self.rpc, '_rpc_call', return_value={"feerate": 0.0001, "blocks": 2}): - result = self.rpc.estimate_smart_fee(2, "unset", 2) + def mock_rpc(method, params): + if method == "estimatesmartfee": + return {"feerate": 0.0001, "blocks": 2} + if method == "getblockchaininfo": + return {"chain": "main", "blocks": 800000} + if method == "getblockcount": + return 800000 + if method == "getmempoolfeeratediagram": + return [{"fee": 0.001, "weight": 100000}] + if method == "getblockstats": + return {"height": params[0], "total_weight": 1000000} + return None + + with patch.object(self.client, 'rpc_call', side_effect=mock_rpc): + result = self.client.estimate_smart_fee(2, "unset", 2) self.assertAlmostEqual(result['feerate_sat_per_vb'], 0.0001 * 100_000) def test_feerate_conversion_is_correct(self): - # 1 BTC/kVB = 100_000 sat/vB - with patch.object(self.rpc, '_rpc_call', return_value={"feerate": 1.0, "blocks": 2}): - result = self.rpc.estimate_smart_fee(2, "unset", 2) + def mock_rpc(method, params): + if method == "estimatesmartfee": + return {"feerate": 1.0, "blocks": 2} + if method == "getblockchaininfo": + return {"chain": "main", "blocks": 800000} + if method == "getblockcount": + return 800000 + if method == "getmempoolfeeratediagram": + return [{"fee": 0.001, "weight": 100000}] + if method == "getblockstats": + return {"height": params[0], "total_weight": 1000000} + return None + + with patch.object(self.client, 'rpc_call', side_effect=mock_rpc): + result = self.client.estimate_smart_fee(2, "unset", 2) self.assertAlmostEqual(result['feerate_sat_per_vb'], 100_000.0) def test_no_feerate_key_does_not_crash(self): - with patch.object(self.rpc, '_rpc_call', return_value={"blocks": 2}): - result = self.rpc.estimate_smart_fee(2, "unset", 2) + def mock_rpc(method, params): + if method == "estimatesmartfee": + return {"blocks": 2} + if method == "getblockchaininfo": + return {"chain": "main", "blocks": 800000} + if method == "getblockcount": + return 800000 + if method == "getmempoolfeeratediagram": + return [] + return None + + with patch.object(self.client, 'rpc_call', side_effect=mock_rpc): + result = self.client.estimate_smart_fee(2, "unset", 2) self.assertNotIn('feerate_sat_per_vb', result) def test_clamps_target_in_rpc_call(self): - with patch.object(self.rpc, '_rpc_call', return_value={"feerate": 0.0001}) as mock: - self.rpc.estimate_smart_fee(1, "unset", 2) - self.assertEqual(mock.call_args[0][1][0], 2) # params[0] should be 2 + def mock_rpc(method, params): + if method == "estimatesmartfee": + return {"feerate": 0.0001} + if method == "getblockchaininfo": + return {"chain": "main", "blocks": 800000} + if method == "getblockcount": + return 800000 + if method == "getmempoolfeeratediagram": + return [{"fee": 0.001, "weight": 100000}] + if method == "getblockstats": + return {"height": params[0], "total_weight": 1000000} + return None + + with patch.object(self.client, 'rpc_call', side_effect=mock_rpc) as mock: + self.client.estimate_smart_fee(1, "unset", 2) + esf_calls = [c for c in mock.call_args_list if c[0][0] == "estimatesmartfee"] + self.assertGreater(len(esf_calls), 0) + params = esf_calls[0][0][1] + self.assertEqual(params[0], 2) # --- get_single_block_stats cache safety -------------------------------- def test_mutation_does_not_corrupt_cache(self): stats = {"height": 800000, "feerate_percentiles": [1, 2, 3, 4, 5]} - with patch.object(self.rpc, '_rpc_call', return_value=stats): - result1 = self.rpc.get_single_block_stats(800000) + with patch.object(self.client, 'rpc_call', return_value=stats): + result1 = self.client.get_single_block_stats(800000) result1['mutated'] = True - with patch.object(self.rpc, '_rpc_call', return_value=stats): - result2 = self.rpc.get_single_block_stats(800000) + with patch.object(self.client, 'rpc_call', return_value=stats): + result2 = self.client.get_single_block_stats(800000) self.assertNotIn('mutated', result2) def test_second_call_hits_cache(self): stats = {"height": 800000, "feerate_percentiles": [1, 2, 3, 4, 5]} - with patch.object(self.rpc, '_rpc_call', return_value=stats) as mock: - self.rpc.get_single_block_stats(800000) - self.rpc.get_single_block_stats(800000) + with patch.object(self.client, 'rpc_call', return_value=stats) as mock: + self.client.get_single_block_stats(800000) + self.client.get_single_block_stats(800000) mock.assert_called_once() # --- get_mempool_feerate_diagram_analysis -------------------------------- def test_empty_raw_returns_defaults(self): - with patch.object(self.rpc, '_rpc_call', return_value=None): - result = self.rpc.get_mempool_feerate_diagram_analysis() + with patch.object(self.client, 'rpc_call', return_value=None): + result = self.client.get_mempool_feerate_diagram_analysis() self.assertEqual(result, {"raw": [], "windows": {}}) def test_diagram_output_structure(self): @@ -120,8 +171,8 @@ def test_diagram_output_structure(self): {"weight": 2_000_000, "fee": 0.002}, {"weight": 4_000_000, "fee": 0.004}, ] - with patch.object(self.rpc, '_rpc_call', return_value=raw_points): - result = self.rpc.get_mempool_feerate_diagram_analysis() + with patch.object(self.client, 'rpc_call', return_value=raw_points): + result = self.client.get_mempool_feerate_diagram_analysis() self.assertEqual(result['total_weight'], 4_000_000) self.assertEqual(result['total_fee'], 0.004) @@ -136,13 +187,111 @@ def test_diagram_feerates_non_negative(self): {"weight": 500_000, "fee": 0.0005}, {"weight": 4_000_000, "fee": 0.004}, ] - with patch.object(self.rpc, '_rpc_call', return_value=raw_points): - result = self.rpc.get_mempool_feerate_diagram_analysis() + with patch.object(self.client, 'rpc_call', return_value=raw_points): + result = self.client.get_mempool_feerate_diagram_analysis() for window in result['windows'].values(): for fr in window.values(): self.assertGreaterEqual(fr, 0) +class TestChainDetection(unittest.TestCase): + """RpcClient.chain auto-detects the network from getblockchaininfo.""" + + def test_detects_all_supported_networks(self): + cases = [ + ("main", "MAINNET"), + ("test", "TESTNET"), + ("testnet4", "TESTNET4"), + ("signet", "SIGNET"), + ("regtest", "REGTEST"), + ] + for chain, display in cases: + with self.subTest(chain=chain): + client = RpcClient("http://test:8332", "u", "p") + with patch.object(client, 'rpc_call', return_value={"chain": chain, "blocks": 100}): + self.assertEqual(client.chain, chain) + self.assertEqual(client.chain_display, display) + + def test_chain_is_cached_after_first_access(self): + client = RpcClient("http://test:8332", "u", "p") + with patch.object(client, 'rpc_call', return_value={"chain": "signet", "blocks": 0}) as mock: + _ = client.chain + _ = client.chain + rpc_calls = [c for c in mock.call_args_list if c[0][0] == "getblockchaininfo"] + self.assertEqual(len(rpc_calls), 1) + + def test_estimate_smart_fee_attaches_chain(self): + client = RpcClient("http://test:8332", "u", "p") + + def mock_rpc(method, params): + if method == "estimatesmartfee": + return {"feerate": 0.0001, "blocks": 2} + if method == "getblockchaininfo": + return {"chain": "testnet4", "blocks": 100} + if method == "getblockcount": + return 100 + if method == "getmempoolfeeratediagram": + return [] + return None + + with patch.object(client, 'rpc_call', side_effect=mock_rpc): + result = client.estimate_smart_fee(2) + self.assertEqual(result['chain'], 'testnet4') + self.assertEqual(result['chain_display'], 'TESTNET4') + + +class TestRpcRegistry(unittest.TestCase): + + def test_add_and_get_client(self): + from services.rpc_service import RpcRegistry + reg = RpcRegistry() + client = RpcClient("http://test:8332", "u", "p") + client._chain = "signet" + reg.add_client("signet", client) + self.assertIs(reg.get_client("signet"), client) + + def test_default_chain_is_first_added(self): + from services.rpc_service import RpcRegistry + reg = RpcRegistry() + for name in ("regtest", "signet"): + c = RpcClient("http://test:8332", "u", "p") + c._chain = name + reg.add_client(name, c) + self.assertEqual(reg.default_chain, "regtest") + + def test_get_unknown_chain_raises(self): + from services.rpc_service import RpcRegistry + reg = RpcRegistry() + c = RpcClient("http://test:8332", "u", "p") + c._chain = "main" + reg.add_client("main", c) + with self.assertRaises(ValueError): + reg.get_client("fakenet") + + def test_available_chains_includes_display(self): + from services.rpc_service import RpcRegistry + reg = RpcRegistry() + for name in ("signet", "testnet4", "regtest"): + c = RpcClient("http://test:8332", "u", "p") + c._chain = name + reg.add_client(name, c) + chains = reg.available_chains() + chain_names = [c['chain'] for c in chains] + self.assertEqual(chain_names, ["signet", "testnet4", "regtest"]) + displays = [c['chain_display'] for c in chains] + self.assertEqual(displays, ["SIGNET", "TESTNET4", "REGTEST"]) + + def test_contains_and_len(self): + from services.rpc_service import RpcRegistry + reg = RpcRegistry() + c = RpcClient("http://test:8332", "u", "p") + c._chain = "main" + reg.add_client("main", c) + self.assertIn("main", reg) + self.assertNotIn("signet", reg) + self.assertEqual(len(reg), 1) + + if __name__ == '__main__': unittest.main() diff --git a/frontend/src/app/api/[...path]/route.ts b/frontend/src/app/api/[...path]/route.ts index f13cb05..bc59c2d 100644 --- a/frontend/src/app/api/[...path]/route.ts +++ b/frontend/src/app/api/[...path]/route.ts @@ -10,6 +10,7 @@ const ALLOWED_PATH_ROOTS = new Set([ "fees", "performance-data", "fees-sum", + "networks", ]); export async function GET( diff --git a/frontend/src/app/layout.tsx b/frontend/src/app/layout.tsx index dd6f35a..8434431 100644 --- a/frontend/src/app/layout.tsx +++ b/frontend/src/app/layout.tsx @@ -1,6 +1,7 @@ import type { Metadata } from "next"; import { Geist, Geist_Mono } from "next/font/google"; import "./globals.css"; +import { NetworkProvider } from "../context/NetworkContext"; const geistSans = Geist({ variable: "--font-geist-sans", @@ -36,7 +37,9 @@ export default function RootLayout({ className={`${geistSans.variable} ${geistMono.variable} antialiased`} suppressHydrationWarning > - {children} + + {children} + ); diff --git a/frontend/src/app/mempool/page.tsx b/frontend/src/app/mempool/page.tsx index 8e9c499..9aa7b7c 100644 --- a/frontend/src/app/mempool/page.tsx +++ b/frontend/src/app/mempool/page.tsx @@ -1,35 +1,37 @@ "use client"; -import { useState, useEffect } from "react"; +import { useState, useEffect, useCallback } from "react"; import { api, MempoolDiagramResponse } from "../../services/api"; import { Header } from "../../components/common/Header"; +import { useNetwork } from "../../context/NetworkContext"; import MempoolDiagramChart from "../../components/mempool/MempoolDiagramChart"; import { Activity, Database, AlertCircle, RefreshCw, Layers, TrendingUp, Scale, Database as DbIcon } from "lucide-react"; export default function MempoolPage() { + const { chain } = useNetwork(); const [data, setData] = useState(null); const [loading, setLoading] = useState(true); const [error, setError] = useState(null); const [blocksToShow, setBlocksToShow] = useState(1); - const fetchData = async () => { + const fetchData = useCallback(async () => { try { setLoading(true); setError(null); - const result = await api.getMempoolDiagram(); + const result = await api.getMempoolDiagram(chain); setData(result); } catch (err) { setError(err instanceof Error ? err.message : "Failed to fetch mempool diagram"); } finally { setLoading(false); } - }; + }, [chain]); useEffect(() => { fetchData(); const interval = setInterval(fetchData, 30000); return () => clearInterval(interval); - }, []); + }, [fetchData]); const rawData = data?.raw || []; const currentWindowKey = blocksToShow.toString(); diff --git a/frontend/src/app/page.tsx b/frontend/src/app/page.tsx index cc7826b..744ea43 100644 --- a/frontend/src/app/page.tsx +++ b/frontend/src/app/page.tsx @@ -5,10 +5,12 @@ import { api } from "../services/api"; import { AlertCircle, BarChart2, Activity, Loader2, ChevronLeft, ChevronRight } from "lucide-react"; import { FeeEstimateResponse, MempoolHealthStats } from "../types/api"; import { Header } from "../components/common/Header"; +import { useNetwork } from "../context/NetworkContext"; type FeeMode = "economical" | "conservative"; export default function LandingPage() { + const { chain } = useNetwork(); const [target, setTarget] = useState(2); const [mode, setMode] = useState("economical"); const [feeData, setFeeData] = useState(null); @@ -23,8 +25,7 @@ export default function LandingPage() { else setIsUpdating(true); setError(null); - // Backend automatically maps target <= 1 to 2 - const data = await api.getFeeEstimate(confTarget, feeMode, 2); + const data = await api.getFeeEstimate(confTarget, feeMode, 2, chain); setFeeData(data); } catch (err) { const msg = err instanceof Error ? err.message : "Failed to fetch fee data"; @@ -33,7 +34,7 @@ export default function LandingPage() { setInitialLoading(false); setIsUpdating(false); } - }, []); + }, [chain]); useEffect(() => { fetchFee(target, mode, true); @@ -60,11 +61,6 @@ export default function LandingPage() {
-
-
- NETWORK: MAINNET -
-
{[2, 7, 144].map((t) => ( + + {open && hasMultiple && ( +
+ {networks.map((n) => { + const s = getStyle(n.chain); + const isActive = n.chain === chain; + const name = n.chain_display.charAt(0) + n.chain_display.slice(1).toLowerCase(); + return ( + + ); + })} +
+ )} +
+ ); +} diff --git a/frontend/src/context/NetworkContext.tsx b/frontend/src/context/NetworkContext.tsx new file mode 100644 index 0000000..c59a042 --- /dev/null +++ b/frontend/src/context/NetworkContext.tsx @@ -0,0 +1,51 @@ +"use client"; + +import { createContext, useContext, useState, useEffect, useCallback, ReactNode } from "react"; +import { api } from "../services/api"; +import { NetworkInfo } from "../types/api"; + +interface NetworkContextValue { + chain: string | undefined; + chainDisplay: string; + networks: NetworkInfo[]; + setChain: (chain: string) => void; + loading: boolean; +} + +const NetworkContext = createContext({ + chain: undefined, + chainDisplay: "MAINNET", + networks: [], + setChain: () => {}, + loading: true, +}); + +export function NetworkProvider({ children }: { children: ReactNode }) { + const [networks, setNetworks] = useState([]); + const [chain, setChain] = useState(undefined); + const [loading, setLoading] = useState(true); + + useEffect(() => { + api.getNetworks() + .then((nets) => { + setNetworks(nets); + if (nets.length > 0 && !chain) { + setChain(nets[0].chain); + } + }) + .catch(() => {}) + .finally(() => setLoading(false)); + }, []); + + const chainDisplay = networks.find((n) => n.chain === chain)?.chain_display ?? "MAINNET"; + + return ( + + {children} + + ); +} + +export function useNetwork() { + return useContext(NetworkContext); +} diff --git a/frontend/src/hooks/useStats.ts b/frontend/src/hooks/useStats.ts index bc46dcc..53ba047 100644 --- a/frontend/src/hooks/useStats.ts +++ b/frontend/src/hooks/useStats.ts @@ -2,7 +2,7 @@ import { useState, useEffect, useCallback } from "react"; import { api } from "../services/api"; import { AnalyticsSummary, MempoolHealthStats } from "../types/api"; -export function useStats(target: number = 2) { +export function useStats(target: number = 2, chain?: string) { const [performanceData, setPerformanceData] = useState<{ blocks: any[]; estimates: any[] }>({ blocks: [], estimates: [] }); const [summary, setSummary] = useState(null); const [healthStats, setHealthStats] = useState([]); @@ -21,9 +21,9 @@ export function useStats(target: number = 2) { const count = Math.max(1, end - start); const [pData, fSum, feeEst] = await Promise.all([ - api.getPerformanceData(start, count, confTarget), - api.getFeesSum(start, confTarget), - api.getFeeEstimate(confTarget, "unset", 2) + api.getPerformanceData(start, count, confTarget, chain), + api.getFeesSum(start, confTarget, chain), + api.getFeeEstimate(confTarget, "unset", 2, chain) ]); setPerformanceData(pData); @@ -35,31 +35,39 @@ export function useStats(target: number = 2) { } finally { setLoading(false); } - }, []); + }, [chain]); const syncHeight = useCallback(async () => { try { - const { blockcount } = await api.getBlockCount(); + const { blockcount } = await api.getBlockCount(chain); setLatestBlock(blockcount); return blockcount; } catch (err) { return null; } - }, []); + }, [chain]); + // Reset default range and refetch whenever the chain changes (not only on first mount). + // If we only initialized when startBlock === null, switching networks would keep the old + // heights and never resync for the new chain. useEffect(() => { + if (!chain) return; + let cancelled = false; const init = async () => { const currentHeight = await syncHeight(); - if (currentHeight && startBlock === null) { - const s = currentHeight - 100; // Default to 100 for clarity - const e = currentHeight; - setStartBlock(s); - setEndBlock(e); - fetchData(s, e, target); - } + if (cancelled || !currentHeight) return; + const s = currentHeight - 100; + const e = currentHeight; + setStartBlock(s); + setEndBlock(e); + fetchData(s, e, target); }; init(); - }, [syncHeight]); + return () => { + cancelled = true; + }; + // eslint-disable-next-line react-hooks/exhaustive-deps -- target is applied via SYNC; only chain should reset range + }, [chain, syncHeight, fetchData]); const handleApply = () => { if (startBlock !== null && endBlock !== null) { diff --git a/frontend/src/services/api.ts b/frontend/src/services/api.ts index 0501476..21b5e88 100644 --- a/frontend/src/services/api.ts +++ b/frontend/src/services/api.ts @@ -4,6 +4,7 @@ import { FeesStatsMap, BlockchainInfo, FeeEstimateResponse, + NetworkInfo, } from "../types/api"; const API_BASE_PATH = "/api"; @@ -29,9 +30,8 @@ export class BitcoinCoreAPI { private async fetchJson(path: string, options?: RequestInit): Promise { const cleanPath = path.replace(/^\/+/, "").replace(/\/+$/, ""); - const url = this.baseUrl.startsWith("http") - ? `${this.baseUrl.replace(/\/+$/, "")}/${cleanPath}` - : `${this.baseUrl}/${cleanPath}`; + const base = this.baseUrl.replace(/\/+$/, ""); + const url = `${base}/${cleanPath}`; try { const response = await fetch(url, options); if (!response.ok) { @@ -45,24 +45,39 @@ export class BitcoinCoreAPI { } } - async getFeeEstimate(target: number = 2, mode: string = "economical", level: number = 2): Promise { - return this.fetchJson(`fees/${target}/${mode}/${level}`); + private chainParam(chain?: string, existingParams?: string): string { + if (!chain) return existingParams ? `?${existingParams}` : ""; + const sep = existingParams ? "&" : ""; + return `?${existingParams || ""}${sep}chain=${chain}`; } - async getBlockCount(): Promise { - return this.fetchJson(`blockcount`); + async getNetworks(): Promise { + return this.fetchJson("networks"); } - async getPerformanceData(startBlock: number, count: number = 100, target: number = 2): Promise { - return this.fetchJson(`performance-data/${startBlock}/?target=${target}&count=${count}`); + async getFeeEstimate(target: number = 2, mode: string = "economical", level: number = 2, chain?: string): Promise { + const q = chain ? `?chain=${chain}` : ""; + return this.fetchJson(`fees/${target}/${mode}/${level}${q}`); } - async getFeesSum(startBlock: number, target: number = 2): Promise { - return this.fetchJson(`fees-sum/${startBlock}?target=${target}`); + async getBlockCount(chain?: string): Promise { + const q = chain ? `?chain=${chain}` : ""; + return this.fetchJson(`blockcount${q}`); } - async getMempoolDiagram(): Promise { - return this.fetchJson(`mempool-diagram`); + async getPerformanceData(startBlock: number, count: number = 100, target: number = 2, chain?: string): Promise { + const params = `target=${target}&count=${count}${chain ? `&chain=${chain}` : ""}`; + return this.fetchJson(`performance-data/${startBlock}/?${params}`); + } + + async getFeesSum(startBlock: number, target: number = 2, chain?: string): Promise { + const params = `target=${target}${chain ? `&chain=${chain}` : ""}`; + return this.fetchJson(`fees-sum/${startBlock}?${params}`); + } + + async getMempoolDiagram(chain?: string): Promise { + const q = chain ? `?chain=${chain}` : ""; + return this.fetchJson(`mempool-diagram${q}`); } } diff --git a/frontend/src/types/api.ts b/frontend/src/types/api.ts index e204d74..e739b81 100644 --- a/frontend/src/types/api.ts +++ b/frontend/src/types/api.ts @@ -21,6 +21,8 @@ export type FeesStatsMap = Record; export interface BlockchainInfo { blockcount: number; + chain?: string; // "main" | "test" | "signet" | "regtest" + chain_display?: string; // "MAINNET" | "TESTNET" | "SIGNET" | "REGTEST" } export interface MempoolHealthStats { @@ -34,5 +36,12 @@ export interface FeeEstimateResponse { feerate: number; blocks: number; errors?: string[]; + chain?: string; + chain_display?: string; mempool_health_statistics?: MempoolHealthStats[]; } + +export interface NetworkInfo { + chain: string; + chain_display: string; +}