Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion Readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ This project tracks `estimatesmartfee` from a Bitcoin Core node and compares tho
- **Historical Accuracy**: Visualizes the accuracy of estimates (within range, overpaid, or underpaid) compared to real block data.
- **Mempool Diagram**: Real-time visualization of the mempool fee/weight accumulation curve.
- **Block Statistics**: Direct insights into feerate percentiles for recent blocks.
- **Multi-Network Support**: Connect to multiple Bitcoin Core nodes simultaneously (mainnet, testnet, signet, regtest). Each network gets its own collector thread and per-network database. Switch between networks from the UI without restarting.

#### Architecture

Expand Down Expand Up @@ -39,7 +40,7 @@ This project tracks `estimatesmartfee` from a Bitcoin Core node and compares tho
- **Node.js**: 22+

#### 1. Configuration
- **Backend**: Copy `backend/rpc_config.ini.example` to `backend/rpc_config.ini` and provide RPC credentials.
- **Backend**: Copy `backend/rpc_config.ini.example` to `backend/rpc_config.ini` and add one `[RPC.<chain>]` section per node you want to connect to. Each section needs `URL`, `RPC_USER`, and `RPC_PASSWORD`. The chain is auto-detected via `getblockchaininfo`. Estimates are stored in per-network databases (`fee_analysis.db`, `testnet3/fee_analysis.db`, etc.).

#### 2. Manual Startup
**Backend:**
Expand Down
38 changes: 34 additions & 4 deletions backend/rpc_config.ini.example
Original file line number Diff line number Diff line change
@@ -1,4 +1,34 @@
[RPC_INFO]
URL =
RPC_USER =
RPC_PASSWORD =
# Multi-network RPC configuration.
# Each [RPC.<chain>] section defines a connection to a Bitcoin Core node.
# The app validates each section by calling getblockchaininfo on startup.
#
# Supported chains: main, testnet, testnet4, signet, regtest
# Only include sections for nodes you actually run.
#
# Cookie auth (recommended): set RPC_USER=__cookie__ and RPC_PASSWORD to the
# contents of the node's .cookie file for that chain's datadir.

[RPC.main]
URL = http://127.0.0.1:8332
RPC_USER =
RPC_PASSWORD =

# [RPC.testnet]
# URL = http://127.0.0.1:18332
# RPC_USER =
# RPC_PASSWORD =

# [RPC.testnet4]
# URL = http://127.0.0.1:48332
# RPC_USER =
# RPC_PASSWORD =

# [RPC.signet]
# URL = http://127.0.0.1:38332
# RPC_USER =
# RPC_PASSWORD =

# [RPC.regtest]
# URL = http://127.0.0.1:18443
# RPC_USER =
# RPC_PASSWORD =
76 changes: 49 additions & 27 deletions backend/src/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,66 +17,79 @@

def create_app():
app = Flask(__name__)
# NOTE: Configure x_for=1 to match your actual proxy depth.
# Without this, X-Forwarded-For spoofing can defeat IP-based limiting.
app.wsgi_app = ProxyFix(app.wsgi_app, x_for=1, x_proto=1, x_host=1)
CORS(app)

# ---------------------------------------------------------------------------
# Rate limiting
# ---------------------------------------------------------------------------
# Uses the real client IP (respects ProxyFix above).
# Default: 200 requests/day, 60/hour applied to every endpoint unless
# overridden with a per-route @limiter.limit() decorator below.
# ---------------------------------------------------------------------------
limiter = Limiter(
key_func=get_remote_address,
app=app,
default_limits=["10000 per day", "1000 per hour"],
# Store state in memory by default. For multi-worker/multi-process
# deployments swap this for a Redis URI:
# storage_uri="redis://localhost:6379"
storage_uri="memory://",
# Return 429 JSON instead of HTML when limit is hit
headers_enabled=True, # adds X-RateLimit-* headers to responses
headers_enabled=True,
)

db_service.init_db()
collector_service.start_background_collector()
# Initialise a DB for each configured chain
for chain in rpc_service.registry.chains():
db_service.init_db(chain=chain)

collector_service.start_background_collectors()

# ---------------------------------------------------------------------------
# Helper — resolve ?chain= query param (defaults to first registered chain)
# ---------------------------------------------------------------------------
def _resolve_chain():
chain = request.args.get("chain")
if chain and chain not in rpc_service.registry:
return None, (jsonify({"error": f"Unknown chain '{chain}'. Available: {rpc_service.registry.chains()}"}), 400)
return chain, None

# ---------------------------------------------------------------------------
# Routes
# ---------------------------------------------------------------------------

@app.route("/networks", methods=['GET'])
@limiter.limit("100 per minute")
def networks():
return jsonify(rpc_service.get_available_chains())

@app.route("/fees/<int:target>/<string:mode>/<int:level>", methods=['GET'])
@limiter.limit("50 per minute") # estimatesmartfee is a node RPC call — keep it tight
@limiter.limit("50 per minute")
def fees(target, mode, level):
VALID_MODES = {"economical", "conservative", "unset"}
if mode not in VALID_MODES:
return jsonify({"error": f"Invalid mode '{mode}'. Must be one of: {', '.join(VALID_MODES)}"}), 400
chain, err = _resolve_chain()
if err:
return err
try:
result = rpc_service.estimate_smart_fee(conf_target=target, mode=mode, verbosity_level=level)
result = rpc_service.estimate_smart_fee(conf_target=target, mode=mode, verbosity_level=level, chain=chain)
return jsonify(result)
except Exception as e:
logger.error(f"/fees RPC failed: {e}", exc_info=True)
return jsonify({"error": "Internal server error"}), 500

@app.route("/mempool-diagram", methods=['GET'])
@limiter.limit("50 per minute") # expensive computation — strict cap
@limiter.limit("50 per minute")
def mempool_diagram():
chain, err = _resolve_chain()
if err:
return err
try:
result = rpc_service.get_mempool_feerate_diagram_analysis()
result = rpc_service.get_mempool_feerate_diagram_analysis(chain=chain)
return jsonify(result)
except Exception as e:
logger.error(f"Mempool diagram RPC failed: {e}", exc_info=True)
return jsonify({"error": "Internal server error"}), 500

@app.route("/performance-data/<int:start_block>/", methods=['GET'])
@limiter.limit("50 per minute") # hits DB + RPC
@limiter.limit("50 per minute")
def get_performance_data(start_block):
target = request.args.get('target', default=2, type=int)
chain, err = _resolve_chain()
if err:
return err
try:
data = rpc_service.get_performance_data(start_height=start_block, count=100, target=target)
data = rpc_service.get_performance_data(start_height=start_block, count=100, target=target, chain=chain)
return jsonify(data)
except Exception as e:
logger.error(f"/performance-data RPC failed: {e}", exc_info=True)
Expand All @@ -86,19 +99,29 @@ def get_performance_data(start_block):
@limiter.limit("50 per minute")
def get_local_fees_sum(start_block):
target = request.args.get('target', default=2, type=int)
chain, err = _resolve_chain()
if err:
return err
try:
data = rpc_service.calculate_local_summary(target=target)
data = rpc_service.calculate_local_summary(target=target, chain=chain)
return jsonify(data)
except Exception as e:
logger.error(f"/fees-sum failed: {e}", exc_info=True)
return jsonify({"error": "Internal server error"}), 500

@app.route("/blockcount", methods=['GET'])
@limiter.limit("100 per minute") # cheap call, slightly more relaxed
@limiter.limit("100 per minute")
def block_count():
chain, err = _resolve_chain()
if err:
return err
try:
result = rpc_service.get_block_count()
return jsonify({"blockcount": result})
info = rpc_service.get_blockchain_info(chain=chain)
return jsonify({
"blockcount": info["blockcount"],
"chain": info["chain"],
"chain_display": info["chain_display"],
})
except Exception as e:
logger.error(f"/blockcount RPC failed: {e}", exc_info=True)
return jsonify({"error": "Internal server error"}), 500
Expand All @@ -113,7 +136,6 @@ def page_not_found(error):

@app.errorhandler(429)
def rate_limit_exceeded(error):
# error.description is the limit string e.g. "30 per 1 minute"
logger.warning(f"Rate limit exceeded from {get_remote_address()}: {error.description}")
return jsonify({
"error": "Too many requests",
Expand Down
55 changes: 32 additions & 23 deletions backend/src/services/collector_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,44 +5,53 @@
import services.database_service as db_service

logger = logging.getLogger("collector")
_collector_started = False
_collectors_started = False

def run_collector():
logger.info("Starting high-resolution fee estimate collector (7s interval)...")
# 1 and 2 are the same, so we only poll 2

def _run_collector_for_chain(chain: str):
"""Polling loop for a single chain. Runs forever in a daemon thread."""
client = rpc_service.get_client(chain)
display = rpc_service.CHAIN_DISPLAY_NAMES.get(chain, chain.upper())
logger.info(f"[Collector:{display}] Starting (7s interval)...")
targets = [2, 7, 144]

while True:
start_time = time.time()
try:
current_height = rpc_service.get_block_count()

current_height = client.get_block_count()
for t in targets:
try:
res = rpc_service.estimate_smart_fee(t, "unset", 1)
res = client.estimate_smart_fee(t, "unset", 1)
if "feerate_sat_per_vb" in res:
rate = res["feerate_sat_per_vb"]
db_service.save_estimate(current_height, t, rate)
# Log as collected for the target
logger.info(f"[Collector] SAVED: target={t} height={current_height} rate={rate:.2f} sat/vB")
db_service.save_estimate(current_height, t, rate, chain=chain)
logger.info(
f"[Collector:{display}] SAVED: target={t} height={current_height} rate={rate:.2f} sat/vB"
)
except Exception as e:
logger.error(f"[Collector] Failed to collect for target {t}: {e}")

logger.error(f"[Collector:{display}] Failed for target {t}: {e}")
except Exception as e:
logger.error(f"[Collector] Loop error: {e}")
logger.error(f"[Collector:{display}] Loop error: {e}")

elapsed = time.time() - start_time
# Interval between request should be 7 seconds.
# (https://bitcoin.stackexchange.com/questions/125776/how-long-does-it-take-for-a-transaction-to-propagate-through-the-network)
sleep_time = max(0, 7 - elapsed)
time.sleep(sleep_time)

def start_background_collector():
global _collector_started
if _collector_started:
logger.warning("Collector already running, skipping.")

def start_background_collectors():
"""Spawn one collector thread per registered chain."""
global _collectors_started
if _collectors_started:
logger.warning("Collectors already running, skipping.")
return
_collector_started = True
thread = threading.Thread(target=run_collector, daemon=True)
thread.start()
return thread
_collectors_started = True

chains = rpc_service.registry.chains()
for chain in chains:
thread = threading.Thread(target=_run_collector_for_chain, args=(chain,), daemon=True)
thread.start()
logger.info(f"Collector thread started for {chain}")


# Keep old name as alias for backward compat (tests, existing callers)
start_background_collector = start_background_collectors
53 changes: 37 additions & 16 deletions backend/src/services/database_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,37 @@

logger = logging.getLogger(__name__)

DB_PATH = os.environ.get(
"DB_PATH",
os.path.join(os.path.dirname(os.path.abspath(__file__)), "fee_analysis.db")
_BASE_DB_DIR = os.environ.get(
"DB_DIR",
os.path.dirname(os.path.abspath(__file__))
)

DB_FILENAME = "fee_analysis.db"

MAX_RANGE_BLOCKS = 10_000 # safety cap on get_estimates_in_range

def init_db():
# Bitcoin Core–style subdirectories per network
CHAIN_DIR_MAP = {
"main": "",
"test": "testnet3",
"testnet4": "testnet4",
"signet": "signet",
"regtest": "regtest",
}


def get_db_path(chain: str = "main") -> str:
"""Return the DB file path for the given chain, creating parent dirs if needed."""
subdir = CHAIN_DIR_MAP.get(chain, chain)
directory = os.path.join(_BASE_DB_DIR, subdir) if subdir else _BASE_DB_DIR
os.makedirs(directory, exist_ok=True)
return os.path.join(directory, DB_FILENAME)


def init_db(chain: str = "main"):
db_path = get_db_path(chain)
try:
with sqlite3.connect(DB_PATH) as conn:
with sqlite3.connect(db_path) as conn:
cursor = conn.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS fee_estimates (
Expand All @@ -27,44 +48,44 @@ def init_db():
''')
cursor.execute('CREATE INDEX IF NOT EXISTS idx_poll_height ON fee_estimates(poll_height)')
cursor.execute('CREATE INDEX IF NOT EXISTS idx_target ON fee_estimates(target)')
# Composite index for the most common query pattern (poll_height + target together)
cursor.execute('''
CREATE INDEX IF NOT EXISTS idx_poll_height_target
ON fee_estimates(poll_height, target)
''')
conn.commit()
logger.info(f"Database initialised at {DB_PATH}")
logger.info(f"Database initialised at {db_path}")
except sqlite3.Error as e:
logger.error(f"Failed to initialise database: {e}", exc_info=True)
raise


def save_estimate(poll_height, target, feerate):
def save_estimate(poll_height, target, feerate, chain="main"):
expected_height = poll_height + target
db_path = get_db_path(chain)
try:
with sqlite3.connect(DB_PATH) as conn:
with sqlite3.connect(db_path) as conn:
cursor = conn.cursor()
cursor.execute('''
INSERT INTO fee_estimates (poll_height, target, estimate_feerate, expected_height)
VALUES (?, ?, ?, ?)
''', (poll_height, target, feerate, expected_height))
conn.commit()
logger.debug(f"Saved estimate: poll_height={poll_height}, target={target}, feerate={feerate}")
logger.debug(f"Saved estimate: poll_height={poll_height}, target={target}, feerate={feerate}, chain={chain}")
except sqlite3.Error as e:
logger.error(f"Failed to save estimate (poll_height={poll_height}, target={target}): {e}", exc_info=True)
raise


def get_estimates_in_range(start_height, end_height, target=2):
# Enforce a max block range to prevent runaway queries
def get_estimates_in_range(start_height, end_height, target=2, chain="main"):
if end_height - start_height > MAX_RANGE_BLOCKS:
logger.warning(
f"Requested range [{start_height}, {end_height}] exceeds MAX_RANGE_BLOCKS={MAX_RANGE_BLOCKS}. Clamping."
)
end_height = start_height + MAX_RANGE_BLOCKS

db_path = get_db_path(chain)
try:
with sqlite3.connect(DB_PATH) as conn:
with sqlite3.connect(db_path) as conn:
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor.execute('''
Expand All @@ -84,9 +105,10 @@ def get_estimates_in_range(start_height, end_height, target=2):
raise


def get_db_height_range(target=2):
def get_db_height_range(target=2, chain="main"):
db_path = get_db_path(chain)
try:
with sqlite3.connect(DB_PATH) as conn:
with sqlite3.connect(db_path) as conn:
cursor = conn.cursor()
cursor.execute(
'SELECT MIN(poll_height), MAX(poll_height) FROM fee_estimates WHERE target = ?',
Expand All @@ -97,7 +119,6 @@ def get_db_height_range(target=2):
if row and row[0] is None:
logger.debug(f"No data in DB for target={target}")

# Return raw tuple — preserves existing caller contract
return row
except sqlite3.Error as e:
logger.error(f"Failed to get DB height range: {e}", exc_info=True)
Expand Down
Loading