Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
# Release Notes

## [2.0.2] - 2025-11-30
- **Validator**: Implement burn mechanism - UID 239 receives 80% of emissions, remaining miners share 20%

## [2.0.1] - 2025-11-28
- **Validator**: Added validator synchronization hour configuration

Expand Down
10 changes: 9 additions & 1 deletion neurons/validator/db/operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -858,7 +858,13 @@ async def get_events_for_metagraph_scoring(self, max_events: int = 1000) -> list
return events

async def set_metagraph_scores(
self, event_id: str, n_events: int, winner_weight: float, decay_power: float
self,
event_id: str,
n_events: int,
burn_weight: float,
winner_weight: float,
decay_power: float,
burn_uid: int,
) -> list:
"""
Calculate the moving average of metagraph scores for a given event
Expand All @@ -869,8 +875,10 @@ async def set_metagraph_scores(
parameters={
"event_id": event_id,
"n_events": n_events,
"burn_weight": burn_weight,
"winner_weight": winner_weight,
"decay_power": decay_power,
"burn_uid": burn_uid,
},
)

Expand Down
9 changes: 5 additions & 4 deletions neurons/validator/db/sql/metagraph_score.sql
Original file line number Diff line number Diff line change
Expand Up @@ -52,17 +52,18 @@ power_rank_sum AS (
-- Calculate sum of power-adjusted ranks for non-winners (for normalization)
SELECT SUM(POWER(rank, -:decay_power)) as total_power_rank
FROM ranked_miners
WHERE rank > 1
WHERE miner_uid != :burn_uid AND rank > 1
),
payload AS (
-- Assign metagraph score: winner_weight to rank 1, remainder distributed by power rank
-- Assign metagraph score: burn_weight to burn UID, winner_weight to rank 1, remainder distributed by power rank
SELECT
rm.miner_uid,
rm.miner_hotkey,
CASE
WHEN rm.rank = 1 THEN :winner_weight
WHEN rm.miner_uid = :burn_uid THEN :burn_weight
WHEN rm.rank = 1 THEN (1.0 - :burn_weight) * :winner_weight
ELSE (
(1.0 - :winner_weight)
(1.0 - :burn_weight) * (1.0 - :winner_weight)
* POWER(rm.rank, -:decay_power)
/ (SELECT total_power_rank FROM power_rank_sum)
)
Expand Down
85 changes: 67 additions & 18 deletions neurons/validator/db/tests/test_db_operations_part_1.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,12 @@
from neurons.validator.models.miner import MinersModel
from neurons.validator.models.prediction import PredictionExportedStatus, PredictionsModel
from neurons.validator.models.score import ScoresModel
from neurons.validator.tasks.metagraph_scoring import (
BURN_WEIGHT,
DECAY_POWER,
MOVING_AVERAGE_EVENTS,
WINNER_WEIGHT,
)
from neurons.validator.utils.common.interval import SCORING_WINDOW_INTERVALS
from neurons.validator.utils.logger.logger import NuminousLogger

Expand Down Expand Up @@ -2435,10 +2441,11 @@ async def test_set_metagraph_scores(
]
await db_operations.upsert_events(events)

# Create scores for 3 miners across 3 events
# Miner 10: Brier scores [0.04, 0.09, 0.16] → avg = 0.0967 (best)
# Create scores for 4 miners across 3 events (including burn UID 239)
# Miner 10: Brier scores [0.04, 0.09, 0.16] → avg = 0.0967 (best non-burn)
# Miner 20: Brier scores [0.25, 0.36, 0.49] → avg = 0.3667 (worst)
# Miner 30: Brier scores [0.09, 0.16, 0.25] → avg = 0.1667
# Miner 239: Brier scores [1.00, 1.00, 1.00] → avg = 1.0 (burn miner, gets 80% regardless)
scores = [
# Event 1 (outcome=1)
ScoresModel(
Expand All @@ -2465,6 +2472,14 @@ async def test_set_metagraph_scores(
event_score=0.09, # (0.7 - 1)^2 = 0.09
spec_version=1,
),
ScoresModel(
event_id="event1",
miner_uid=239,
miner_hotkey="hk239",
prediction=0.0,
event_score=1.00, # (0.0 - 1)^2 = 1.00 (worst possible)
spec_version=1,
),
# Event 2 (outcome=0)
ScoresModel(
event_id="event2",
Expand All @@ -2490,6 +2505,14 @@ async def test_set_metagraph_scores(
event_score=0.16, # (0.4 - 0)^2 = 0.16
spec_version=1,
),
ScoresModel(
event_id="event2",
miner_uid=239,
miner_hotkey="hk239",
prediction=1.0,
event_score=1.00, # (1.0 - 0)^2 = 1.00 (worst possible)
spec_version=1,
),
# Event 3 (outcome=1)
ScoresModel(
event_id="event3",
Expand All @@ -2515,59 +2538,85 @@ async def test_set_metagraph_scores(
event_score=0.25, # (0.5 - 1)^2 = 0.25
spec_version=1,
),
ScoresModel(
event_id="event3",
miner_uid=239,
miner_hotkey="hk239",
prediction=0.0,
event_score=1.00, # (0.0 - 1)^2 = 1.00 (worst possible)
spec_version=1,
),
]
await db_operations.insert_scores(scores)

raw_scores = await db_client.many(
"SELECT event_id, miner_uid FROM scores ORDER BY event_id, miner_uid"
)
assert len(raw_scores) == 9
assert len(raw_scores) == 12

updated = await db_operations.set_metagraph_scores(
event_id="event3", n_events=5, winner_weight=0.99, decay_power=1.0
event_id="event3",
n_events=MOVING_AVERAGE_EVENTS,
burn_weight=BURN_WEIGHT,
winner_weight=WINNER_WEIGHT,
decay_power=DECAY_POWER,
burn_uid=239,
)
assert updated == []

actual_rows = await db_client.many(
"SELECT event_id, miner_uid, processed, metagraph_score, other_data FROM scores ORDER BY event_id, miner_uid",
use_row_factory=True,
)
assert len(actual_rows) == 9
assert len(actual_rows) == 12

for i in range(6):
for i in range(8):
assert actual_rows[i]["processed"] == 0
assert actual_rows[i]["metagraph_score"] is None
assert actual_rows[i]["other_data"] is None

# Event 3 should be updated with power decay distribution
# Miner 10 should win (lowest avg Brier = 0.0967) → gets 99%
miner10_row = actual_rows[6] # event3, miner_uid=10
# Event 3 should be updated with burn mechanism:
# UID 239 gets BURN_WEIGHT (80%), remaining 20% uses winner-take-all (99%/1% split)

# Miner 10: best non-burn (avg = 0.0967, rank 1)
# → gets (1 - BURN_WEIGHT) * WINNER_WEIGHT = 0.20 * 0.99 = 0.198
miner10_row = actual_rows[8] # event3, miner_uid=10
assert miner10_row["event_id"] == "event3"
assert miner10_row["miner_uid"] == 10
assert miner10_row["processed"] == 1
assert miner10_row["metagraph_score"] == pytest.approx(0.99, abs=1e-3)
assert miner10_row["metagraph_score"] == pytest.approx(0.198, abs=1e-3)
miner10_data = json.loads(miner10_row["other_data"])
assert miner10_data["average_brier_score"] == pytest.approx(0.0967, abs=1e-3)
assert miner10_data["rank"] == 1

# Miner 20 should be last (highest avg Brier = 0.3667) → rank 3
# Gets: 0.01 * (1/3) / (1/2 + 1/3) = 0.004
miner20_row = actual_rows[7] # event3, miner_uid=20
# Miner 20: worst non-burn (avg = 0.3667, rank 3)
# → gets (1 - BURN_WEIGHT) * (1 - WINNER_WEIGHT) * (1/3) / (1/2 + 1/3) = 0.20 * 0.01 * 0.4 = 0.0008
miner20_row = actual_rows[9] # event3, miner_uid=20
assert miner20_row["event_id"] == "event3"
assert miner20_row["miner_uid"] == 20
assert miner20_row["processed"] == 1
assert miner20_row["metagraph_score"] == pytest.approx(0.004, abs=1e-3)
assert miner20_row["metagraph_score"] == pytest.approx(0.0008, abs=1e-4)
miner20_data = json.loads(miner20_row["other_data"])
assert miner20_data["average_brier_score"] == pytest.approx(0.3667, abs=1e-3)
assert miner20_data["rank"] == 3

# Miner 30 should be middle (avg Brier = 0.1667) → rank 2
# Gets: 0.01 * (1/2) / (1/2 + 1/3) = 0.006
miner30_row = actual_rows[8] # event3, miner_uid=30
# Miner 30: middle (avg = 0.1667, rank 2)
# → gets (1 - BURN_WEIGHT) * (1 - WINNER_WEIGHT) * (1/2) / (1/2 + 1/3) = 0.20 * 0.01 * 0.6 = 0.0012
miner30_row = actual_rows[10] # event3, miner_uid=30
assert miner30_row["event_id"] == "event3"
assert miner30_row["miner_uid"] == 30
assert miner30_row["processed"] == 1
assert miner30_row["metagraph_score"] == pytest.approx(0.006, abs=1e-3)
assert miner30_row["metagraph_score"] == pytest.approx(0.0012, abs=1e-4)
miner30_data = json.loads(miner30_row["other_data"])
assert miner30_data["average_brier_score"] == pytest.approx(0.1667, abs=1e-3)
assert miner30_data["rank"] == 2

# Miner 239: burn miner (avg = 1.0, rank 4 worst) → gets fixed BURN_WEIGHT
miner239_row = actual_rows[11] # event3, miner_uid=239
assert miner239_row["event_id"] == "event3"
assert miner239_row["miner_uid"] == 239
assert miner239_row["processed"] == 1
assert miner239_row["metagraph_score"] == pytest.approx(BURN_WEIGHT, abs=1e-3)
miner239_data = json.loads(miner239_row["other_data"])
assert miner239_data["average_brier_score"] == pytest.approx(1.0, abs=1e-3)
assert miner239_data["rank"] == 4
1 change: 1 addition & 0 deletions neurons/validator/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,7 @@ async def main():
db_operations=db_operations,
page_size=1000,
logger=logger,
metagraph=bt_metagraph,
)

export_scores_task = ExportScores(
Expand Down
29 changes: 27 additions & 2 deletions neurons/validator/tasks/metagraph_scoring.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
from neurons.validator.db.operations import DatabaseOperations
from neurons.validator.scheduler.task import AbstractTask
from neurons.validator.utils.common.converters import torch_or_numpy_to_int
from neurons.validator.utils.if_metagraph import IfMetagraph
from neurons.validator.utils.logger.logger import NuminousLogger

MOVING_AVERAGE_EVENTS = 101 # How many previous events to consider for the moving average
WINNER_WEIGHT = 0.99 # Winner gets this percentage
BURN_WEIGHT = 0.80 # Burn UID gets this percentage
WINNER_WEIGHT = 0.99 # Winner gets this percentage of remaining (after burn)
DECAY_POWER = 1.0 # Decay steepness: 1.0=inverse rank, 1.5=steeper, 0.5=gentler


Expand All @@ -12,13 +15,15 @@ class MetagraphScoring(AbstractTask):
page_size: int
db_operations: DatabaseOperations
logger: NuminousLogger
metagraph: IfMetagraph

def __init__(
self,
interval_seconds: float,
page_size: int,
db_operations: DatabaseOperations,
logger: NuminousLogger,
metagraph: IfMetagraph,
):
if not isinstance(interval_seconds, float) or interval_seconds <= 0:
raise ValueError("interval_seconds must be a positive number (float).")
Expand All @@ -30,6 +35,7 @@ def __init__(
self.interval = interval_seconds
self.page_size = page_size
self.db_operations = db_operations
self.metagraph = metagraph

self.errors_count = 0
self.logger = logger
Expand All @@ -42,16 +48,33 @@ def name(self):
def interval_seconds(self):
return self.interval

def get_owner_neuron_uid(self) -> int:
owner_uid = None
owner_hotkey = self.metagraph.owner_hotkey

for idx, uid in enumerate(self.metagraph.uids):
int_uid = torch_or_numpy_to_int(uid)
hotkey = self.metagraph.hotkeys[idx]

if hotkey == owner_hotkey:
owner_uid = int_uid
break

assert owner_uid is not None, "Owner uid not found in metagraph uids"

return owner_uid

async def run(self):
events_to_score = await self.db_operations.get_events_for_metagraph_scoring(
max_events=self.page_size
)
if not events_to_score:
self.logger.debug("No events to calculate metagraph scores.")
else:
burn_uid = self.get_owner_neuron_uid()
self.logger.debug(
"Found events to calculate metagraph scores.",
extra={"n_events": len(events_to_score)},
extra={"n_events": len(events_to_score), "burn_uid": burn_uid},
)

for event in events_to_score:
Expand All @@ -64,8 +87,10 @@ async def run(self):
res = await self.db_operations.set_metagraph_scores(
event["event_id"],
n_events=MOVING_AVERAGE_EVENTS,
burn_weight=BURN_WEIGHT,
winner_weight=WINNER_WEIGHT,
decay_power=DECAY_POWER,
burn_uid=burn_uid,
)
if res == []:
self.logger.debug(
Expand Down
Loading
Loading