Skip to content

Commit f81a89a

Browse files
constantiniusclaude
andcommitted
fix(ai-monitoring): Rename LLM -> AI, add aiModelMetadata to GlobalConfig
Rename all LLM prefixes to AI for consistency with existing naming: - LLMModelCost -> AIModelCost - LLMModelMetadata -> AIModelMetadata - LLMModelMetadataConfig -> AIModelMetadataConfig - llm-model-metadata:v1 -> ai-model-metadata:v1 - fetch_llm_model_metadata -> fetch_ai_model_metadata - llm_model_metadata_config -> ai_model_metadata_config Add aiModelMetadata to GlobalConfig alongside aiModelCosts. Relay's normalize_global_config strips unknown fields, so relay globalconfig tests pop aiModelMetadata before comparing until Relay adds support. Restore original fetch_ai_model_costs code untouched from master. New fetch_ai_model_metadata task appended at the bottom of the file. Co-Authored-By: Claude Sonnet 4 <noreply@anthropic.com>
1 parent 88c53d6 commit f81a89a

File tree

6 files changed

+428
-245
lines changed

6 files changed

+428
-245
lines changed

src/sentry/conf/server.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1200,13 +1200,13 @@ def SOCIAL_AUTH_DEFAULT_USERNAME() -> str:
12001200
"task": "relocation:sentry.relocation.transfer.find_relocation_transfer_region",
12011201
"schedule": crontab("*/5", "*", "*", "*", "*"),
12021202
},
1203-
# TODO: Remove fetch-ai-model-costs once all consumers have migrated to fetch-llm-model-metadata
1203+
# TODO(constantinius): Remove fetch-ai-model-costs once all consumers have migrated to fetch-ai-model-metadata
12041204
"fetch-ai-model-costs": {
12051205
"task": "ai_agent_monitoring:sentry.tasks.ai_agent_monitoring.fetch_ai_model_costs",
12061206
"schedule": crontab("*/30", "*", "*", "*", "*"),
12071207
},
1208-
"fetch-llm-model-metadata": {
1209-
"task": "ai_agent_monitoring:sentry.tasks.ai_agent_monitoring.fetch_llm_model_metadata",
1208+
"fetch-ai-model-metadata": {
1209+
"task": "ai_agent_monitoring:sentry.tasks.ai_agent_monitoring.fetch_ai_model_metadata",
12101210
"schedule": crontab("*/30", "*", "*", "*", "*"),
12111211
},
12121212
"llm-issue-detection": {

src/sentry/relay/config/ai_model_costs.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -12,18 +12,18 @@
1212

1313

1414
# Legacy cache key for AI model costs (v2 flat format)
15-
# TODO: Remove once all consumers have migrated to LLM_MODEL_METADATA_CACHE_KEY
15+
# TODO(constantinius): Remove once all consumers have migrated to AI_MODEL_METADATA_CACHE_KEY
1616
AI_MODEL_COSTS_CACHE_KEY = "ai-model-costs:v2"
1717
AI_MODEL_COSTS_CACHE_TTL = 30 * 24 * 60 * 60
1818

1919
# Cache key for storing LLM model metadata (v1 nested format)
20-
LLM_MODEL_METADATA_CACHE_KEY = "llm-model-metadata:v1"
20+
AI_MODEL_METADATA_CACHE_KEY = "ai-model-metadata:v1"
2121
# Cache timeout: 30 days (we re-fetch every 30 minutes, so this provides more than enough overlap)
22-
LLM_MODEL_METADATA_CACHE_TTL = 30 * 24 * 60 * 60
22+
AI_MODEL_METADATA_CACHE_TTL = 30 * 24 * 60 * 60
2323

2424

2525
class AIModelCostV2(TypedDict):
26-
"""Legacy flat format. TODO: Remove once all consumers have migrated."""
26+
"""Legacy flat format. TODO(constantinius): Remove once all consumers have migrated."""
2727

2828
inputPerToken: float
2929
outputPerToken: float
@@ -33,34 +33,34 @@ class AIModelCostV2(TypedDict):
3333

3434

3535
class AIModelCosts(TypedDict):
36-
"""Legacy config type. TODO: Remove once all consumers have migrated."""
36+
"""Legacy config type. TODO(constantinius): Remove once all consumers have migrated."""
3737

3838
version: Required[int]
3939
models: Required[dict[ModelId, AIModelCostV2]]
4040

4141

42-
class LLMModelCost(TypedDict):
42+
class AIModelCost(TypedDict):
4343
inputPerToken: float
4444
outputPerToken: float
4545
outputReasoningPerToken: float
4646
inputCachedPerToken: float
4747
inputCacheWritePerToken: float
4848

4949

50-
class LLMModelMetadata(TypedDict, total=False):
51-
costs: Required[LLMModelCost]
50+
class AIModelMetadata(TypedDict, total=False):
51+
costs: Required[AIModelCost]
5252
contextSize: int
5353

5454

55-
class LLMModelMetadataConfig(TypedDict):
55+
class AIModelMetadataConfig(TypedDict):
5656
version: Required[int]
57-
models: Required[dict[ModelId, LLMModelMetadata]]
57+
models: Required[dict[ModelId, AIModelMetadata]]
5858

5959

6060
def ai_model_costs_config() -> AIModelCosts | None:
6161
"""
6262
Legacy: Get AI model costs configuration.
63-
TODO: Remove once all consumers have migrated to llm_model_metadata_config.
63+
TODO(constantinius): Remove once all consumers have migrated to ai_model_metadata_config.
6464
"""
6565
if settings.SENTRY_AIR_GAP:
6666
return None
@@ -75,19 +75,19 @@ def ai_model_costs_config() -> AIModelCosts | None:
7575
return None
7676

7777

78-
def llm_model_metadata_config() -> LLMModelMetadataConfig | None:
78+
def ai_model_metadata_config() -> AIModelMetadataConfig | None:
7979
"""
8080
Get LLM model metadata configuration.
8181
LLM model metadata is set in cache by a cron job,
8282
if there is no metadata, it should be investigated why.
8383
8484
Returns:
85-
LLMModelMetadataConfig containing cost and context size information for LLM models
85+
AIModelMetadataConfig containing cost and context size information for LLM models
8686
"""
8787
if settings.SENTRY_AIR_GAP:
8888
return None
8989

90-
cached_metadata = cache.get(LLM_MODEL_METADATA_CACHE_KEY)
90+
cached_metadata = cache.get(AI_MODEL_METADATA_CACHE_KEY)
9191
if cached_metadata is not None:
9292
return cached_metadata
9393

src/sentry/relay/globalconfig.py

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,12 @@
11
from typing import Any, TypedDict
22

33
import sentry.options
4-
from sentry.relay.config.ai_model_costs import AIModelCosts, ai_model_costs_config
4+
from sentry.relay.config.ai_model_costs import (
5+
AIModelCosts,
6+
AIModelMetadataConfig,
7+
ai_model_costs_config,
8+
ai_model_metadata_config,
9+
)
510
from sentry.relay.config.measurements import MeasurementsConfig, get_measurements_config
611
from sentry.relay.config.metric_extraction import (
712
MetricExtractionGroups,
@@ -40,7 +45,10 @@ class SpanOpDefaults(TypedDict):
4045

4146
class GlobalConfig(TypedDict, total=False):
4247
measurements: MeasurementsConfig
43-
aiModelCosts: AIModelCosts | None
48+
aiModelCosts: (
49+
AIModelCosts | None
50+
) # TODO(constantinius): Remove once all consumers use aiModelMetadata
51+
aiModelMetadata: AIModelMetadataConfig | None
4452
metricExtraction: MetricExtractionGroups
4553
filters: GenericFiltersConfig | None
4654
spanOpDefaults: SpanOpDefaults
@@ -79,7 +87,8 @@ def get_global_config() -> GlobalConfig:
7987

8088
global_config: GlobalConfig = {
8189
"measurements": get_measurements_config(),
82-
"aiModelCosts": ai_model_costs_config(),
90+
"aiModelCosts": ai_model_costs_config(), # TODO(constantinius): Remove once all consumers use aiModelMetadata
91+
"aiModelMetadata": ai_model_metadata_config(),
8392
"metricExtraction": global_metric_extraction_groups(),
8493
"spanOpDefaults": span_op_defaults(),
8594
}

0 commit comments

Comments
 (0)