diff --git a/CHANGELOG.md b/CHANGELOG.md index dc66530..74af97d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,8 @@ # Release Notes +## [2.0.2] - 2025-11-30 +- **Validator**: Implement burn mechanism - UID 239 receives 80% of emissions, remaining miners share 20% + ## [2.0.1] - 2025-11-28 - **Validator**: Added validator synchronization hour configuration diff --git a/neurons/validator/db/operations.py b/neurons/validator/db/operations.py index ce73964..ed74eb1 100644 --- a/neurons/validator/db/operations.py +++ b/neurons/validator/db/operations.py @@ -858,7 +858,13 @@ async def get_events_for_metagraph_scoring(self, max_events: int = 1000) -> list return events async def set_metagraph_scores( - self, event_id: str, n_events: int, winner_weight: float, decay_power: float + self, + event_id: str, + n_events: int, + burn_weight: float, + winner_weight: float, + decay_power: float, + burn_uid: int, ) -> list: """ Calculate the moving average of metagraph scores for a given event @@ -869,8 +875,10 @@ async def set_metagraph_scores( parameters={ "event_id": event_id, "n_events": n_events, + "burn_weight": burn_weight, "winner_weight": winner_weight, "decay_power": decay_power, + "burn_uid": burn_uid, }, ) diff --git a/neurons/validator/db/sql/metagraph_score.sql b/neurons/validator/db/sql/metagraph_score.sql index 10533f8..5a64602 100644 --- a/neurons/validator/db/sql/metagraph_score.sql +++ b/neurons/validator/db/sql/metagraph_score.sql @@ -52,17 +52,18 @@ power_rank_sum AS ( -- Calculate sum of power-adjusted ranks for non-winners (for normalization) SELECT SUM(POWER(rank, -:decay_power)) as total_power_rank FROM ranked_miners - WHERE rank > 1 + WHERE miner_uid != :burn_uid AND rank > 1 ), payload AS ( - -- Assign metagraph score: winner_weight to rank 1, remainder distributed by power rank + -- Assign metagraph score: burn_weight to burn UID, winner_weight to rank 1, remainder distributed by power rank SELECT rm.miner_uid, rm.miner_hotkey, CASE - WHEN rm.rank = 1 THEN :winner_weight + WHEN rm.miner_uid = :burn_uid THEN :burn_weight + WHEN rm.rank = 1 THEN (1.0 - :burn_weight) * :winner_weight ELSE ( - (1.0 - :winner_weight) + (1.0 - :burn_weight) * (1.0 - :winner_weight) * POWER(rm.rank, -:decay_power) / (SELECT total_power_rank FROM power_rank_sum) ) diff --git a/neurons/validator/db/tests/test_db_operations_part_1.py b/neurons/validator/db/tests/test_db_operations_part_1.py index 8b3121d..0523935 100644 --- a/neurons/validator/db/tests/test_db_operations_part_1.py +++ b/neurons/validator/db/tests/test_db_operations_part_1.py @@ -13,6 +13,12 @@ from neurons.validator.models.miner import MinersModel from neurons.validator.models.prediction import PredictionExportedStatus, PredictionsModel from neurons.validator.models.score import ScoresModel +from neurons.validator.tasks.metagraph_scoring import ( + BURN_WEIGHT, + DECAY_POWER, + MOVING_AVERAGE_EVENTS, + WINNER_WEIGHT, +) from neurons.validator.utils.common.interval import SCORING_WINDOW_INTERVALS from neurons.validator.utils.logger.logger import NuminousLogger @@ -2435,10 +2441,11 @@ async def test_set_metagraph_scores( ] await db_operations.upsert_events(events) - # Create scores for 3 miners across 3 events - # Miner 10: Brier scores [0.04, 0.09, 0.16] → avg = 0.0967 (best) + # Create scores for 4 miners across 3 events (including burn UID 239) + # Miner 10: Brier scores [0.04, 0.09, 0.16] → avg = 0.0967 (best non-burn) # Miner 20: Brier scores [0.25, 0.36, 0.49] → avg = 0.3667 (worst) # Miner 30: Brier scores [0.09, 0.16, 0.25] → avg = 0.1667 + # Miner 239: Brier scores [1.00, 1.00, 1.00] → avg = 1.0 (burn miner, gets 80% regardless) scores = [ # Event 1 (outcome=1) ScoresModel( @@ -2465,6 +2472,14 @@ async def test_set_metagraph_scores( event_score=0.09, # (0.7 - 1)^2 = 0.09 spec_version=1, ), + ScoresModel( + event_id="event1", + miner_uid=239, + miner_hotkey="hk239", + prediction=0.0, + event_score=1.00, # (0.0 - 1)^2 = 1.00 (worst possible) + spec_version=1, + ), # Event 2 (outcome=0) ScoresModel( event_id="event2", @@ -2490,6 +2505,14 @@ async def test_set_metagraph_scores( event_score=0.16, # (0.4 - 0)^2 = 0.16 spec_version=1, ), + ScoresModel( + event_id="event2", + miner_uid=239, + miner_hotkey="hk239", + prediction=1.0, + event_score=1.00, # (1.0 - 0)^2 = 1.00 (worst possible) + spec_version=1, + ), # Event 3 (outcome=1) ScoresModel( event_id="event3", @@ -2515,16 +2538,29 @@ async def test_set_metagraph_scores( event_score=0.25, # (0.5 - 1)^2 = 0.25 spec_version=1, ), + ScoresModel( + event_id="event3", + miner_uid=239, + miner_hotkey="hk239", + prediction=0.0, + event_score=1.00, # (0.0 - 1)^2 = 1.00 (worst possible) + spec_version=1, + ), ] await db_operations.insert_scores(scores) raw_scores = await db_client.many( "SELECT event_id, miner_uid FROM scores ORDER BY event_id, miner_uid" ) - assert len(raw_scores) == 9 + assert len(raw_scores) == 12 updated = await db_operations.set_metagraph_scores( - event_id="event3", n_events=5, winner_weight=0.99, decay_power=1.0 + event_id="event3", + n_events=MOVING_AVERAGE_EVENTS, + burn_weight=BURN_WEIGHT, + winner_weight=WINNER_WEIGHT, + decay_power=DECAY_POWER, + burn_uid=239, ) assert updated == [] @@ -2532,42 +2568,55 @@ async def test_set_metagraph_scores( "SELECT event_id, miner_uid, processed, metagraph_score, other_data FROM scores ORDER BY event_id, miner_uid", use_row_factory=True, ) - assert len(actual_rows) == 9 + assert len(actual_rows) == 12 - for i in range(6): + for i in range(8): assert actual_rows[i]["processed"] == 0 assert actual_rows[i]["metagraph_score"] is None assert actual_rows[i]["other_data"] is None - # Event 3 should be updated with power decay distribution - # Miner 10 should win (lowest avg Brier = 0.0967) → gets 99% - miner10_row = actual_rows[6] # event3, miner_uid=10 + # Event 3 should be updated with burn mechanism: + # UID 239 gets BURN_WEIGHT (80%), remaining 20% uses winner-take-all (99%/1% split) + + # Miner 10: best non-burn (avg = 0.0967, rank 1) + # → gets (1 - BURN_WEIGHT) * WINNER_WEIGHT = 0.20 * 0.99 = 0.198 + miner10_row = actual_rows[8] # event3, miner_uid=10 assert miner10_row["event_id"] == "event3" assert miner10_row["miner_uid"] == 10 assert miner10_row["processed"] == 1 - assert miner10_row["metagraph_score"] == pytest.approx(0.99, abs=1e-3) + assert miner10_row["metagraph_score"] == pytest.approx(0.198, abs=1e-3) miner10_data = json.loads(miner10_row["other_data"]) assert miner10_data["average_brier_score"] == pytest.approx(0.0967, abs=1e-3) assert miner10_data["rank"] == 1 - # Miner 20 should be last (highest avg Brier = 0.3667) → rank 3 - # Gets: 0.01 * (1/3) / (1/2 + 1/3) = 0.004 - miner20_row = actual_rows[7] # event3, miner_uid=20 + # Miner 20: worst non-burn (avg = 0.3667, rank 3) + # → gets (1 - BURN_WEIGHT) * (1 - WINNER_WEIGHT) * (1/3) / (1/2 + 1/3) = 0.20 * 0.01 * 0.4 = 0.0008 + miner20_row = actual_rows[9] # event3, miner_uid=20 assert miner20_row["event_id"] == "event3" assert miner20_row["miner_uid"] == 20 assert miner20_row["processed"] == 1 - assert miner20_row["metagraph_score"] == pytest.approx(0.004, abs=1e-3) + assert miner20_row["metagraph_score"] == pytest.approx(0.0008, abs=1e-4) miner20_data = json.loads(miner20_row["other_data"]) assert miner20_data["average_brier_score"] == pytest.approx(0.3667, abs=1e-3) assert miner20_data["rank"] == 3 - # Miner 30 should be middle (avg Brier = 0.1667) → rank 2 - # Gets: 0.01 * (1/2) / (1/2 + 1/3) = 0.006 - miner30_row = actual_rows[8] # event3, miner_uid=30 + # Miner 30: middle (avg = 0.1667, rank 2) + # → gets (1 - BURN_WEIGHT) * (1 - WINNER_WEIGHT) * (1/2) / (1/2 + 1/3) = 0.20 * 0.01 * 0.6 = 0.0012 + miner30_row = actual_rows[10] # event3, miner_uid=30 assert miner30_row["event_id"] == "event3" assert miner30_row["miner_uid"] == 30 assert miner30_row["processed"] == 1 - assert miner30_row["metagraph_score"] == pytest.approx(0.006, abs=1e-3) + assert miner30_row["metagraph_score"] == pytest.approx(0.0012, abs=1e-4) miner30_data = json.loads(miner30_row["other_data"]) assert miner30_data["average_brier_score"] == pytest.approx(0.1667, abs=1e-3) assert miner30_data["rank"] == 2 + + # Miner 239: burn miner (avg = 1.0, rank 4 worst) → gets fixed BURN_WEIGHT + miner239_row = actual_rows[11] # event3, miner_uid=239 + assert miner239_row["event_id"] == "event3" + assert miner239_row["miner_uid"] == 239 + assert miner239_row["processed"] == 1 + assert miner239_row["metagraph_score"] == pytest.approx(BURN_WEIGHT, abs=1e-3) + miner239_data = json.loads(miner239_row["other_data"]) + assert miner239_data["average_brier_score"] == pytest.approx(1.0, abs=1e-3) + assert miner239_data["rank"] == 4 diff --git a/neurons/validator/main.py b/neurons/validator/main.py index 9cbb33e..8c471c5 100644 --- a/neurons/validator/main.py +++ b/neurons/validator/main.py @@ -155,6 +155,7 @@ async def main(): db_operations=db_operations, page_size=1000, logger=logger, + metagraph=bt_metagraph, ) export_scores_task = ExportScores( diff --git a/neurons/validator/tasks/metagraph_scoring.py b/neurons/validator/tasks/metagraph_scoring.py index 2f12711..7d654b7 100644 --- a/neurons/validator/tasks/metagraph_scoring.py +++ b/neurons/validator/tasks/metagraph_scoring.py @@ -1,9 +1,12 @@ from neurons.validator.db.operations import DatabaseOperations from neurons.validator.scheduler.task import AbstractTask +from neurons.validator.utils.common.converters import torch_or_numpy_to_int +from neurons.validator.utils.if_metagraph import IfMetagraph from neurons.validator.utils.logger.logger import NuminousLogger MOVING_AVERAGE_EVENTS = 101 # How many previous events to consider for the moving average -WINNER_WEIGHT = 0.99 # Winner gets this percentage +BURN_WEIGHT = 0.80 # Burn UID gets this percentage +WINNER_WEIGHT = 0.99 # Winner gets this percentage of remaining (after burn) DECAY_POWER = 1.0 # Decay steepness: 1.0=inverse rank, 1.5=steeper, 0.5=gentler @@ -12,6 +15,7 @@ class MetagraphScoring(AbstractTask): page_size: int db_operations: DatabaseOperations logger: NuminousLogger + metagraph: IfMetagraph def __init__( self, @@ -19,6 +23,7 @@ def __init__( page_size: int, db_operations: DatabaseOperations, logger: NuminousLogger, + metagraph: IfMetagraph, ): if not isinstance(interval_seconds, float) or interval_seconds <= 0: raise ValueError("interval_seconds must be a positive number (float).") @@ -30,6 +35,7 @@ def __init__( self.interval = interval_seconds self.page_size = page_size self.db_operations = db_operations + self.metagraph = metagraph self.errors_count = 0 self.logger = logger @@ -42,6 +48,22 @@ def name(self): def interval_seconds(self): return self.interval + def get_owner_neuron_uid(self) -> int: + owner_uid = None + owner_hotkey = self.metagraph.owner_hotkey + + for idx, uid in enumerate(self.metagraph.uids): + int_uid = torch_or_numpy_to_int(uid) + hotkey = self.metagraph.hotkeys[idx] + + if hotkey == owner_hotkey: + owner_uid = int_uid + break + + assert owner_uid is not None, "Owner uid not found in metagraph uids" + + return owner_uid + async def run(self): events_to_score = await self.db_operations.get_events_for_metagraph_scoring( max_events=self.page_size @@ -49,9 +71,10 @@ async def run(self): if not events_to_score: self.logger.debug("No events to calculate metagraph scores.") else: + burn_uid = self.get_owner_neuron_uid() self.logger.debug( "Found events to calculate metagraph scores.", - extra={"n_events": len(events_to_score)}, + extra={"n_events": len(events_to_score), "burn_uid": burn_uid}, ) for event in events_to_score: @@ -64,8 +87,10 @@ async def run(self): res = await self.db_operations.set_metagraph_scores( event["event_id"], n_events=MOVING_AVERAGE_EVENTS, + burn_weight=BURN_WEIGHT, winner_weight=WINNER_WEIGHT, decay_power=DECAY_POWER, + burn_uid=burn_uid, ) if res == []: self.logger.debug( diff --git a/neurons/validator/tasks/tests/test_metagraph_scoring.py b/neurons/validator/tasks/tests/test_metagraph_scoring.py index 77e057d..29da2a7 100644 --- a/neurons/validator/tasks/tests/test_metagraph_scoring.py +++ b/neurons/validator/tasks/tests/test_metagraph_scoring.py @@ -1,6 +1,7 @@ import json from unittest.mock import AsyncMock, MagicMock +import numpy as np import pytest from freezegun import freeze_time @@ -25,6 +26,10 @@ def metagraph_scoring_task( db_operations: DatabaseOperations, ): logger = MagicMock(spec=NuminousLogger) + metagraph = MagicMock() + metagraph.owner_hotkey = "test_owner_hotkey" + metagraph.uids = np.array([239, 10, 20, 30]) + metagraph.hotkeys = ["test_owner_hotkey", "hk10", "hk20", "hk30"] with freeze_time("2025-01-02 03:00:00"): return MetagraphScoring( @@ -32,6 +37,7 @@ def metagraph_scoring_task( page_size=100, db_operations=db_operations, logger=logger, + metagraph=metagraph, ) def test_init(self, metagraph_scoring_task: MetagraphScoring): @@ -99,23 +105,42 @@ def test_init(self, metagraph_scoring_task: MetagraphScoring): spec_version=1, processed=False, ), + ScoresModel( + event_id="expected_event_id", + miner_uid=239, + miner_hotkey="hk239", + prediction=0.0, + event_score=1.00, + created_at="2025-01-02 03:00:00", + spec_version=1, + processed=False, + ), ], [ { "event_id": "expected_event_id", "processed": 1, - "metagraph_score": 0.99, # Only miner, rank=1, wins (99%) + "metagraph_score": 0.198, # Rank 1, gets (1-0.80)*0.99 = 0.198 "other_data": { "average_brier_score": 0.80, "rank": 1, }, }, + { + "event_id": "expected_event_id", + "processed": 1, + "metagraph_score": 0.8, # UID 239 gets 80% + "other_data": { + "average_brier_score": 1.00, + "rank": 2, + }, + }, ], { "debug": [ ( "Found events to calculate metagraph scores.", - {"n_events": 1}, + {"n_events": 1, "burn_uid": 239}, ), ( "Processing event for metagraph scoring.", @@ -152,12 +177,22 @@ def test_init(self, metagraph_scoring_task: MetagraphScoring): spec_version=1, processed=False, ), + ScoresModel( + event_id="expected_event_id", + miner_uid=239, + miner_hotkey="hk239", + prediction=0.0, + event_score=1.00, + created_at="2025-01-02 03:00:00", + spec_version=1, + processed=False, + ), ], [ { "event_id": "expected_event_id", "processed": 1, - "metagraph_score": 0.01, # Rank 2, gets 1% + "metagraph_score": 0.002, # Rank 2, only non-winner, gets all (1-0.80)*(1-0.99) = 0.002 "other_data": { "average_brier_score": 0.80, "rank": 2, @@ -166,18 +201,27 @@ def test_init(self, metagraph_scoring_task: MetagraphScoring): { "event_id": "expected_event_id", "processed": 1, - "metagraph_score": 0.99, # Rank 1, wins (99%) + "metagraph_score": 0.198, # Rank 1, gets (1-0.80)*0.99 = 0.198 "other_data": { "average_brier_score": 0.40, "rank": 1, }, }, + { + "event_id": "expected_event_id", + "processed": 1, + "metagraph_score": 0.8, # UID 239 gets 80% + "other_data": { + "average_brier_score": 1.00, + "rank": 3, + }, + }, ], { "debug": [ ( "Found events to calculate metagraph scores.", - {"n_events": 1}, + {"n_events": 1, "burn_uid": 239}, ), ( "Processing event for metagraph scoring.", @@ -210,7 +254,7 @@ def test_init(self, metagraph_scoring_task: MetagraphScoring): "debug": [ ( "Found events to calculate metagraph scores.", - {"n_events": 1}, + {"n_events": 1, "burn_uid": 239}, ), ( "Processing event for metagraph scoring.", @@ -233,7 +277,7 @@ def test_init(self, metagraph_scoring_task: MetagraphScoring): miner_uid=3, miner_hotkey="hk3", prediction=0.75, - event_score=0.80, # Rank 3 (worst) + event_score=0.80, # Rank 3 (worst non-burn) created_at="2025-01-02 03:00:00", spec_version=1, processed=False, @@ -253,7 +297,17 @@ def test_init(self, metagraph_scoring_task: MetagraphScoring): miner_uid=5, miner_hotkey="hk5", prediction=0.75, - event_score=0.10, # Rank 1 (best, lowest Brier) → Winner + event_score=0.10, # Rank 1 (best) + created_at="2025-01-02 03:00:00", + spec_version=1, + processed=False, + ), + ScoresModel( + event_id="expected_event_id", + miner_uid=239, + miner_hotkey="hk239", + prediction=0.0, + event_score=1.00, created_at="2025-01-02 03:00:00", spec_version=1, processed=False, @@ -263,7 +317,7 @@ def test_init(self, metagraph_scoring_task: MetagraphScoring): { "event_id": "expected_event_id", "processed": 1, - "metagraph_score": 0.004, # Rank 3: 1% * (1/3) / (1/2 + 1/3) + "metagraph_score": 0.0004, # Rank 3: (1-0.80)*(1-0.99)*(1/3)/(1/2+1/3) ≈ 0.0004 "other_data": { "average_brier_score": 0.80, "rank": 3, @@ -272,7 +326,7 @@ def test_init(self, metagraph_scoring_task: MetagraphScoring): { "event_id": "expected_event_id", "processed": 1, - "metagraph_score": 0.006, # Rank 2: 1% * (1/2) / (1/2 + 1/3) + "metagraph_score": 0.0006, # Rank 2: (1-0.80)*(1-0.99)*(1/2)/(1/2+1/3) ≈ 0.0006 "other_data": { "average_brier_score": 0.40, "rank": 2, @@ -281,18 +335,27 @@ def test_init(self, metagraph_scoring_task: MetagraphScoring): { "event_id": "expected_event_id", "processed": 1, - "metagraph_score": 0.99, # Rank 1, wins (99%) + "metagraph_score": 0.198, # Rank 1: (1-0.80)*0.99 = 0.198 "other_data": { "average_brier_score": 0.10, "rank": 1, }, }, + { + "event_id": "expected_event_id", + "processed": 1, + "metagraph_score": 0.8, # UID 239 gets 80% + "other_data": { + "average_brier_score": 1.00, + "rank": 4, + }, + }, ], { "debug": [ ( "Found events to calculate metagraph scores.", - {"n_events": 1}, + {"n_events": 1, "burn_uid": 239}, ), ( "Processing event for metagraph scoring.", @@ -319,6 +382,16 @@ def test_init(self, metagraph_scoring_task: MetagraphScoring): spec_version=1, processed=False, ), + ScoresModel( + event_id="expected_event_id_1", + miner_uid=239, + miner_hotkey="hk239", + prediction=0.0, + event_score=1.00, + created_at="2025-01-02 03:00:00", + spec_version=1, + processed=False, + ), ScoresModel( event_id="expected_event_id_2", miner_uid=3, @@ -339,21 +412,40 @@ def test_init(self, metagraph_scoring_task: MetagraphScoring): spec_version=1, processed=False, ), + ScoresModel( + event_id="expected_event_id_2", + miner_uid=239, + miner_hotkey="hk239", + prediction=0.0, + event_score=1.00, + created_at="2025-01-02 03:00:00", + spec_version=1, + processed=False, + ), ], [ { "event_id": "expected_event_id_1", "processed": 1, - "metagraph_score": 0.99, # Only miner, wins (99%) + "metagraph_score": 0.198, # Miner 3: rank 1, gets (1-0.80)*0.99 = 0.198 "other_data": { "average_brier_score": 0.80, "rank": 1, }, }, + { + "event_id": "expected_event_id_1", + "processed": 1, + "metagraph_score": 0.8, # UID 239 gets 80% + "other_data": { + "average_brier_score": 1.00, + "rank": 2, + }, + }, { "event_id": "expected_event_id_2", "processed": 1, - "metagraph_score": 0.01, # Miner 3: avg=(0.80+0.40)/2=0.60, rank 2, gets 1% + "metagraph_score": 0.002, # Miner 3: avg=(0.80+0.40)/2=0.60, rank 2, only non-winner, gets all (1-0.80)*(1-0.99) = 0.002 "other_data": { "average_brier_score": 0.60, "rank": 2, @@ -362,18 +454,27 @@ def test_init(self, metagraph_scoring_task: MetagraphScoring): { "event_id": "expected_event_id_2", "processed": 1, - "metagraph_score": 0.99, # Miner 4: avg=0.40, rank 1, wins (99%) + "metagraph_score": 0.198, # Miner 4: avg=0.40, rank 1, gets (1-0.80)*0.99 = 0.198 "other_data": { "average_brier_score": 0.40, "rank": 1, }, }, + { + "event_id": "expected_event_id_2", + "processed": 1, + "metagraph_score": 0.8, # UID 239 gets 80% + "other_data": { + "average_brier_score": 1.00, + "rank": 3, + }, + }, ], { "debug": [ ( "Found events to calculate metagraph scores.", - {"n_events": 2}, + {"n_events": 2, "burn_uid": 239}, ), ( "Processing event for metagraph scoring.", @@ -408,6 +509,16 @@ def test_init(self, metagraph_scoring_task: MetagraphScoring): spec_version=1, processed=False, ), + ScoresModel( + event_id="expected_event_id_1", + miner_uid=239, + miner_hotkey="hk239", + prediction=0.0, + event_score=1.00, + created_at="2025-01-02 03:00:00", + spec_version=1, + processed=False, + ), ScoresModel( event_id="expected_event_id_2", miner_uid=3, @@ -453,40 +564,64 @@ def test_init(self, metagraph_scoring_task: MetagraphScoring): miner_uid=5, miner_hotkey="hk5", prediction=0.75, - event_score=0.10, # Very low Brier (excellent prediction!) + event_score=0.10, + created_at="2025-01-02 03:00:00", + spec_version=1, + processed=False, + ), + ScoresModel( + event_id="expected_event_id_2", + miner_uid=239, + miner_hotkey="hk239", + prediction=0.0, + event_score=1.00, + created_at="2025-01-02 03:00:00", + spec_version=1, + processed=False, + ), + ScoresModel( + event_id="expected_event_id_3", + miner_uid=239, + miner_hotkey="hk239", + prediction=0.0, + event_score=1.00, created_at="2025-01-02 03:00:00", spec_version=1, processed=False, ), ], [ - # Event 1: Only miner 3 { "event_id": "expected_event_id_1", "processed": 1, - "metagraph_score": 0.99, # Rank 1, wins (99%) + "metagraph_score": 0.198, # Miner 3: rank 1, gets (1-0.80)*0.99 = 0.198 "other_data": { "average_brier_score": 0.80, "rank": 1, }, }, - # Event 2: Miner 3 avg=0.60, Miner 4 avg=0.40, Miner 5 avg=0.10 - # Order: 5(0.10) < 4(0.40) < 3(0.60) + { + "event_id": "expected_event_id_1", + "processed": 1, + "metagraph_score": 0.8, # UID 239 gets 80% + "other_data": { + "average_brier_score": 1.00, + "rank": 2, + }, + }, { "event_id": "expected_event_id_2", "processed": 1, - "metagraph_score": 0.004, # Miner 3: Rank 3, gets 0.4% + "metagraph_score": 0.0004, # Miner 3: avg=0.60, rank 3, gets (1-0.80)*(1-0.99)*(1/3)/(1/2+1/3) ≈ 0.0004 "other_data": { "average_brier_score": 0.60, "rank": 3, }, }, - # Event 3: Miner 3 avg=0.60, Miner 4 avg=0.40, Miner 5 avg=0.10 (still in window!) - # Order: 5(0.10) < 4(0.40) < 3(0.60) { "event_id": "expected_event_id_3", "processed": 1, - "metagraph_score": 0.004, # Miner 3: Rank 3, gets 0.4% + "metagraph_score": 0.0004, # Miner 3: avg=0.60, rank 3 "other_data": { "average_brier_score": 0.60, "rank": 3, @@ -495,7 +630,7 @@ def test_init(self, metagraph_scoring_task: MetagraphScoring): { "event_id": "expected_event_id_2", "processed": 1, - "metagraph_score": 0.006, # Miner 4: Rank 2, gets 0.6% + "metagraph_score": 0.0006, # Miner 4: avg=0.40, rank 2, gets (1-0.80)*(1-0.99)*(1/2)/(1/2+1/3) ≈ 0.0006 "other_data": { "average_brier_score": 0.40, "rank": 2, @@ -504,7 +639,7 @@ def test_init(self, metagraph_scoring_task: MetagraphScoring): { "event_id": "expected_event_id_3", "processed": 1, - "metagraph_score": 0.006, # Miner 4: Rank 2, gets 0.6% + "metagraph_score": 0.0006, # Miner 4: avg=0.40, rank 2 "other_data": { "average_brier_score": 0.40, "rank": 2, @@ -513,18 +648,36 @@ def test_init(self, metagraph_scoring_task: MetagraphScoring): { "event_id": "expected_event_id_2", "processed": 1, - "metagraph_score": 0.99, # Miner 5: Rank 1, wins (99%) + "metagraph_score": 0.198, # Miner 5: avg=0.10, rank 1, gets (1-0.80)*0.99 = 0.198 "other_data": { "average_brier_score": 0.10, "rank": 1, }, }, + { + "event_id": "expected_event_id_2", + "processed": 1, + "metagraph_score": 0.8, # UID 239 gets 80% + "other_data": { + "average_brier_score": 1.00, + "rank": 4, + }, + }, + { + "event_id": "expected_event_id_3", + "processed": 1, + "metagraph_score": 0.8, # UID 239 gets 80% + "other_data": { + "average_brier_score": 1.00, + "rank": 4, + }, + }, ], { "debug": [ ( "Found events to calculate metagraph scores.", - {"n_events": 3}, + {"n_events": 3, "burn_uid": 239}, ), ( "Processing event for metagraph scoring.", diff --git a/neurons/validator/version.py b/neurons/validator/version.py index 5489182..3aeb858 100644 --- a/neurons/validator/version.py +++ b/neurons/validator/version.py @@ -1,4 +1,4 @@ -__version__ = "2.0.1" +__version__ = "2.0.2" version_split = __version__.split(".")