Skip to content

Commit e76851e

Browse files
committed
ref(ai-agents): Prefer gen_ai.response.model over gen_ai.request.model
Swaps agent insight widgets and prebuilt dashboard configs to query gen_ai.response.model instead of gen_ai.request.model, since response model reflects what the provider actually used.
1 parent f0da4f4 commit e76851e

File tree

5 files changed

+25
-25
lines changed

5 files changed

+25
-25
lines changed

static/app/views/dashboards/utils/prebuiltConfigs/ai/aiAgentsModels.ts

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -23,11 +23,11 @@ const FIRST_ROW_WIDGETS = spaceWidgetsEquallyOnRow(
2323
name: '',
2424
conditions: AI_GENERATIONS_FILTER,
2525
fields: [
26-
SpanFields.GEN_AI_REQUEST_MODEL,
26+
SpanFields.GEN_AI_RESPONSE_MODEL,
2727
`sum(${SpanFields.GEN_AI_COST_TOTAL_TOKENS})`,
2828
],
2929
aggregates: [`sum(${SpanFields.GEN_AI_COST_TOTAL_TOKENS})`],
30-
columns: [SpanFields.GEN_AI_REQUEST_MODEL],
30+
columns: [SpanFields.GEN_AI_RESPONSE_MODEL],
3131
fieldAliases: [t('Model'), t('Total Cost')],
3232
orderby: `-sum(${SpanFields.GEN_AI_COST_TOTAL_TOKENS})`,
3333
},
@@ -46,11 +46,11 @@ const FIRST_ROW_WIDGETS = spaceWidgetsEquallyOnRow(
4646
name: '',
4747
conditions: AI_GENERATIONS_FILTER,
4848
fields: [
49-
SpanFields.GEN_AI_REQUEST_MODEL,
49+
SpanFields.GEN_AI_RESPONSE_MODEL,
5050
`sum(${SpanFields.GEN_AI_USAGE_TOTAL_TOKENS})`,
5151
],
5252
aggregates: [`sum(${SpanFields.GEN_AI_USAGE_TOTAL_TOKENS})`],
53-
columns: [SpanFields.GEN_AI_REQUEST_MODEL],
53+
columns: [SpanFields.GEN_AI_RESPONSE_MODEL],
5454
fieldAliases: [t('Model'), t('Total Tokens')],
5555
orderby: `-sum(${SpanFields.GEN_AI_USAGE_TOTAL_TOKENS})`,
5656
},
@@ -108,7 +108,7 @@ const MODELS_TABLE = {
108108
name: '',
109109
conditions: AI_GENERATIONS_FILTER,
110110
fields: [
111-
SpanFields.GEN_AI_REQUEST_MODEL,
111+
SpanFields.GEN_AI_RESPONSE_MODEL,
112112
'count()',
113113
'equation|count_if(span.status,equals,internal_error)',
114114
`avg(${SpanFields.SPAN_DURATION})`,
@@ -130,7 +130,7 @@ const MODELS_TABLE = {
130130
`sum(${SpanFields.GEN_AI_USAGE_OUTPUT_TOKENS})`,
131131
`sum(${SpanFields.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING})`,
132132
],
133-
columns: [SpanFields.GEN_AI_REQUEST_MODEL],
133+
columns: [SpanFields.GEN_AI_RESPONSE_MODEL],
134134
fieldAliases: [
135135
t('Model'),
136136
t('Requests'),
@@ -164,8 +164,8 @@ export const AI_AGENTS_MODELS_PREBUILT_CONFIG: PrebuiltDashboard = {
164164
{
165165
dataset: WidgetType.SPANS,
166166
tag: {
167-
key: 'gen_ai.request.model',
168-
name: 'gen_ai.request.model',
167+
key: 'gen_ai.response.model',
168+
name: 'gen_ai.response.model',
169169
kind: FieldKind.TAG,
170170
},
171171
value: '',

static/app/views/insights/pages/agents/components/llmCallsWidget.tsx

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ export function LLMCallsWidget() {
4242

4343
const generationsRequest = useSpans(
4444
{
45-
fields: ['gen_ai.request.model', 'count()'],
45+
fields: ['gen_ai.response.model', 'count()'],
4646
sorts: [{field: 'count()', kind: 'desc'}],
4747
search: fullQuery,
4848
limit: 3,
@@ -54,7 +54,7 @@ export function LLMCallsWidget() {
5454
{
5555
...pageFilterChartParams,
5656
query: fullQuery,
57-
groupBy: [SpanFields.GEN_AI_REQUEST_MODEL],
57+
groupBy: [SpanFields.GEN_AI_RESPONSE_MODEL],
5858
yAxis: ['count(span.duration)'],
5959
sort: {field: 'count(span.duration)', kind: 'desc'},
6060
topEvents: 3,
@@ -109,7 +109,7 @@ export function LLMCallsWidget() {
109109
const footer = hasData && (
110110
<WidgetFooterTable>
111111
{models?.map((item, index) => {
112-
const modelId = item['gen_ai.request.model'];
112+
const modelId = item['gen_ai.response.model'];
113113
return (
114114
<Fragment key={modelId}>
115115
<div>
@@ -149,7 +149,7 @@ export function LLMCallsWidget() {
149149
yAxes: ['count(span.duration)'],
150150
},
151151
],
152-
groupBy: ['gen_ai.request.model'],
152+
groupBy: ['gen_ai.response.model'],
153153
query: fullQuery,
154154
sort: `-count(span.duration)`,
155155
interval: pageFilterChartParams.interval,

static/app/views/insights/pages/agents/components/modelCostWidget.tsx

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ export function ModelCostWidget() {
4141

4242
const tokensRequest = useSpans(
4343
{
44-
fields: ['gen_ai.request.model', 'sum(gen_ai.cost.total_tokens)'],
44+
fields: ['gen_ai.response.model', 'sum(gen_ai.cost.total_tokens)'],
4545
sorts: [{field: 'sum(gen_ai.cost.total_tokens)', kind: 'desc'}],
4646
search: fullQuery,
4747
limit: 3,
@@ -53,7 +53,7 @@ export function ModelCostWidget() {
5353
{
5454
...pageFilterChartParams,
5555
query: fullQuery,
56-
groupBy: [SpanFields.GEN_AI_REQUEST_MODEL],
56+
groupBy: [SpanFields.GEN_AI_RESPONSE_MODEL],
5757
yAxis: ['sum(gen_ai.cost.total_tokens)'],
5858
sort: {field: 'sum(gen_ai.cost.total_tokens)', kind: 'desc'},
5959
topEvents: 3,
@@ -111,7 +111,7 @@ export function ModelCostWidget() {
111111
const footer = hasData && (
112112
<WidgetFooterTable>
113113
{tokens?.map((item, index) => {
114-
const modelId = `${item['gen_ai.request.model']}`;
114+
const modelId = `${item['gen_ai.response.model']}`;
115115
return (
116116
<Fragment key={modelId}>
117117
<div>
@@ -149,7 +149,7 @@ export function ModelCostWidget() {
149149
yAxes: ['sum(gen_ai.cost.total_tokens)'],
150150
},
151151
],
152-
groupBy: ['gen_ai.request.model'],
152+
groupBy: ['gen_ai.response.model'],
153153
query: fullQuery,
154154
sort: `-sum(gen_ai.cost.total_tokens)`,
155155
interval: pageFilterChartParams.interval,

static/app/views/insights/pages/agents/components/modelsTable.tsx

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ export function ModelsTable() {
100100
const modelsRequest = useSpans(
101101
{
102102
fields: [
103-
'gen_ai.request.model',
103+
'gen_ai.response.model',
104104
'sum(gen_ai.usage.input_tokens)',
105105
'sum(gen_ai.usage.output_tokens)',
106106
'sum(gen_ai.usage.output_tokens.reasoning)',
@@ -126,7 +126,7 @@ export function ModelsTable() {
126126
}
127127

128128
return modelsRequest.data.map(span => ({
129-
model: span['gen_ai.request.model'],
129+
model: span['gen_ai.response.model'],
130130
requests: span['count()'] ?? 0,
131131
avg: span['avg(span.duration)'] ?? 0,
132132
p95: span['p95(span.duration)'] ?? 0,
@@ -223,9 +223,9 @@ const BodyCell = memo(function BodyCell({
223223
yAxes: ['avg(span.duration)'],
224224
},
225225
],
226-
query: `gen_ai.request.model:${dataRow.model}`,
226+
query: `gen_ai.response.model:${dataRow.model}`,
227227
field: [
228-
'gen_ai.request.model',
228+
'gen_ai.response.model',
229229
'gen_ai.operation.name',
230230
'gen_ai.usage.input_tokens',
231231
'gen_ai.usage.output_tokens',
@@ -268,7 +268,7 @@ const BodyCell = memo(function BodyCell({
268268
<ErrorCell
269269
value={dataRow.errors}
270270
target={getExploreUrl({
271-
query: `${query} span.status:internal_error gen_ai.request.model:"${dataRow.model}"`,
271+
query: `${query} span.status:internal_error gen_ai.response.model:"${dataRow.model}"`,
272272
organization,
273273
selection,
274274
referrer: Referrer.MODELS_TABLE,

static/app/views/insights/pages/agents/components/tokenUsageWidget.tsx

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ export function TokenUsageWidget() {
4242

4343
const tokensRequest = useSpans(
4444
{
45-
fields: ['gen_ai.request.model', 'sum(gen_ai.usage.total_tokens)'],
45+
fields: ['gen_ai.response.model', 'sum(gen_ai.usage.total_tokens)'],
4646
sorts: [{field: 'sum(gen_ai.usage.total_tokens)', kind: 'desc'}],
4747
search: fullQuery,
4848
limit: 3,
@@ -54,7 +54,7 @@ export function TokenUsageWidget() {
5454
{
5555
...pageFilterChartParams,
5656
query: fullQuery,
57-
groupBy: [SpanFields.GEN_AI_REQUEST_MODEL],
57+
groupBy: [SpanFields.GEN_AI_RESPONSE_MODEL],
5858
yAxis: ['sum(gen_ai.usage.total_tokens)'],
5959
sort: {field: 'sum(gen_ai.usage.total_tokens)', kind: 'desc'},
6060
topEvents: 3,
@@ -112,7 +112,7 @@ export function TokenUsageWidget() {
112112
const footer = hasData && (
113113
<WidgetFooterTable>
114114
{tokens?.map((item, index) => {
115-
const modelId = `${item['gen_ai.request.model']}`;
115+
const modelId = `${item['gen_ai.response.model']}`;
116116
return (
117117
<Fragment key={modelId}>
118118
<div>
@@ -152,7 +152,7 @@ export function TokenUsageWidget() {
152152
yAxes: ['sum(gen_ai.usage.total_tokens)'],
153153
},
154154
],
155-
groupBy: ['gen_ai.request.model'],
155+
groupBy: ['gen_ai.response.model'],
156156
query: fullQuery,
157157
sort: `-sum(gen_ai.usage.total_tokens)`,
158158
interval: pageFilterChartParams.interval,

0 commit comments

Comments
 (0)