diff --git a/src/sentry/incidents/logic.py b/src/sentry/incidents/logic.py index c4dcee28e2a75f..7b2057e5bd5cd1 100644 --- a/src/sentry/incidents/logic.py +++ b/src/sentry/incidents/logic.py @@ -6,6 +6,7 @@ from copy import deepcopy from dataclasses import dataclass, replace from datetime import datetime, timedelta, timezone +from re import Match from typing import Any, TypedDict from uuid import UUID, uuid4 @@ -24,6 +25,7 @@ from sentry.db.models import Model from sentry.db.models.manager.base_query_set import BaseQuerySet from sentry.deletions.models.scheduleddeletion import CellScheduledDeletion +from sentry.discover.arithmetic import is_equation, parse_arithmetic, strip_equation from sentry.incidents import tasks from sentry.incidents.events import IncidentCreatedEvent, IncidentStatusUpdatedEvent from sentry.incidents.models.alert_rule import ( @@ -1848,12 +1850,15 @@ def get_opsgenie_teams(organization_id: int, integration_id: int) -> list[tuple[ def get_column_from_aggregate( - aggregate: str, allow_mri: bool, allow_eap: bool = False + aggregate: str, + allow_mri: bool, + allow_eap: bool = False, + match: Match[str] | None = None, ) -> str | None: # These functions exist as SnQLFunction definitions and are not supported in the older # logic for resolving functions. We parse these using `fields.is_function`, otherwise # they will fail using the old resolve_field logic. - match = is_function(aggregate) + match = is_function(aggregate) if match is None else match if match and ( match.group("function") in SPANS_METRICS_FUNCTIONS or match.group("function") in METRICS_LAYER_UNSUPPORTED_TRANSACTION_METRICS_FUNCTIONS @@ -1900,21 +1905,29 @@ def check_aggregate_column_support( aggregate: str, allow_mri: bool = False, allow_eap: bool = False ) -> bool: # TODO(ddm): remove `allow_mri` once the experimental feature flag is removed. - column = get_column_from_aggregate(aggregate, allow_mri, allow_eap) - match = is_function(aggregate) - function = match.group("function") if match else None - return ( - column is None - or is_measurement(column) - or column in SUPPORTED_COLUMNS - or column in TRANSLATABLE_COLUMNS - or (is_mri(column) and allow_mri) - or ( - isinstance(function, str) - and column in INSIGHTS_FUNCTION_VALID_ARGS_MAP.get(function, []) - ) - or allow_eap - ) + if is_equation(aggregate): + _, _, terms = parse_arithmetic(strip_equation(aggregate)) + else: + terms = [aggregate] + + for term in terms: + match = is_function(term) + column = get_column_from_aggregate(term, allow_mri, allow_eap, match) + function = match.group("function") if match else None + if not ( + column is None + or is_measurement(column) + or column in SUPPORTED_COLUMNS + or column in TRANSLATABLE_COLUMNS + or (is_mri(column) and allow_mri) + or ( + isinstance(function, str) + and column in INSIGHTS_FUNCTION_VALID_ARGS_MAP.get(function, []) + ) + or allow_eap + ): + return False + return True def translate_aggregate_field( diff --git a/src/sentry/search/eap/trace_metrics/validator.py b/src/sentry/search/eap/trace_metrics/validator.py index c1fac22c3cf0e9..3c5fadf2f47804 100644 --- a/src/sentry/search/eap/trace_metrics/validator.py +++ b/src/sentry/search/eap/trace_metrics/validator.py @@ -3,6 +3,7 @@ from rest_framework import serializers +from sentry.discover.arithmetic import is_equation, parse_arithmetic, strip_equation from sentry.exceptions import InvalidSearchQuery from sentry.search.eap.resolver import SearchResolver from sentry.search.eap.trace_metrics.config import TraceMetricsSearchResolverConfig @@ -58,14 +59,20 @@ def validate_trace_metrics_aggregate(aggregate: str) -> None: Raises: serializers.ValidationError: If the aggregate is invalid """ - try: - trace_metric = extract_trace_metric_from_aggregate(aggregate) - if trace_metric is None: - raise InvalidSearchQuery( - f"Trace metrics aggregate {aggregate} must specify metric name, type, and unit" + if is_equation(aggregate): + _, _, terms = parse_arithmetic(strip_equation(aggregate)) + else: + terms = [aggregate] + + for term in terms: + try: + trace_metric = extract_trace_metric_from_aggregate(term) + if trace_metric is None: + raise InvalidSearchQuery( + f"Trace metrics aggregate {term} must specify metric name, type, and unit" + ) + except InvalidSearchQuery as e: + logger.exception(f"Invalid trace metrics aggregate: {term} {e}") + raise serializers.ValidationError( + {"aggregate": f"Invalid trace metrics aggregate: {term}"} ) - except InvalidSearchQuery as e: - logger.exception("Invalid trace metrics aggregate: %s %s", aggregate, e) - raise serializers.ValidationError( - {"aggregate": f"Invalid trace metrics aggregate: {aggregate}"} - ) diff --git a/tests/sentry/workflow_engine/endpoints/test_organization_project_detector_index.py b/tests/sentry/workflow_engine/endpoints/test_organization_project_detector_index.py index 02154abe82629a..c5b7817758002f 100644 --- a/tests/sentry/workflow_engine/endpoints/test_organization_project_detector_index.py +++ b/tests/sentry/workflow_engine/endpoints/test_organization_project_detector_index.py @@ -426,6 +426,58 @@ def test_use_transactions_instead_of_generic_metrics_dataset(self) -> None: assert query_sub.snuba_query.aggregate == "count()" assert query_sub.snuba_query.event_types == [SnubaQueryEventType.EventType.TRANSACTION] + @with_feature( + [ + "organizations:tracemetrics-alerts", + "organizations:tracemetrics-enabled", + ] + ) + def test_use_metrics_equation_as_aggregate(self) -> None: + data = {**self.valid_data} + equation = 'equation|count_if(`agent_name:"Agent Run"`,value,request_duration,distribution,none) * 2' + data["dataSources"] = [ + { + "queryType": SnubaQuery.Type.PERFORMANCE.value, + "dataset": Dataset.EventsAnalyticsPlatform.value, + "query": "", + "aggregate": equation, + "timeWindow": 300, + "environment": self.environment.name, + "eventTypes": [SnubaQueryEventType.EventType.TRACE_ITEM_METRIC.name.lower()], + } + ] + + with self.tasks(): + response = self.get_success_response( + self.organization.slug, + self.project.slug, + **data, + status_code=201, + ) + + assert ( + response.data["dataSources"][0]["queryObj"]["snubaQuery"]["dataset"] + == Dataset.EventsAnalyticsPlatform.value + ) + assert response.data["dataSources"][0]["queryObj"]["snubaQuery"]["query"] == "" + assert response.data["dataSources"][0]["queryObj"]["snubaQuery"]["aggregate"] == equation + + detector = Detector.objects.get(id=response.data["id"]) + data_source = DataSource.objects.get(detector=detector) + assert data_source.type == data_source_type_registry.get_key( + QuerySubscriptionDataSourceHandler + ) + assert data_source.organization_id == self.organization.id + query_sub = QuerySubscription.objects.get(id=int(data_source.source_id)) + assert query_sub.project == self.project + assert query_sub.snuba_query.type == SnubaQuery.Type.PERFORMANCE.value + assert query_sub.snuba_query.dataset == Dataset.EventsAnalyticsPlatform.value + assert query_sub.snuba_query.query == "" + assert query_sub.snuba_query.aggregate == equation + assert query_sub.snuba_query.event_types == [ + SnubaQueryEventType.EventType.TRACE_ITEM_METRIC + ] + @cell_silo_test class OrganizationProjectDetectorIndexMonitorPostTest(APITestCase):