Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/sentry/event_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -2006,7 +2006,7 @@ def _get_severity_metadata_for_group(

Returns {} if conditions aren't met or on exception.
"""
from sentry.receivers.rules import PLATFORMS_WITH_PRIORITY_ALERTS
from sentry.workflow_engine.receivers.project_workflows import PLATFORMS_WITH_PRIORITY_ALERTS

if killswitch_matches_context(
"issues.severity.skip-seer-requests", {"project_id": event.project_id}
Expand Down
1 change: 0 additions & 1 deletion src/sentry/receivers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
from .owners import * # noqa: F401,F403
from .releases import * # noqa: F401,F403
from .rule_snooze import * # noqa: F401,F403
from .rules import * # noqa: F401,F403
from .sentry_apps import * # noqa: F401,F403
from .stats import * # noqa: F401,F403
from .superuser import * # noqa: F401,F403
Expand Down
116 changes: 116 additions & 0 deletions src/sentry/workflow_engine/defaults/workflows.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
from typing import Sequence

from django.db import router, transaction

from sentry.models.organization import Organization
from sentry.models.project import Project
from sentry.notifications.models.notificationaction import ActionTarget
from sentry.notifications.types import FallthroughChoiceType
from sentry.workflow_engine.defaults.detectors import _ensure_detector
from sentry.workflow_engine.models import (
Action,
DataCondition,
DataConditionGroup,
DataConditionGroupAction,
DetectorWorkflow,
Workflow,
WorkflowDataConditionGroup,
)
from sentry.workflow_engine.models.data_condition import Condition
from sentry.workflow_engine.typings.grouptype import IssueStreamGroupType

DEFAULT_WORKFLOW_LABEL = "Send a notification for high priority issues"


def connect_workflows_to_issue_stream(
project: Project,
workflows: list[Workflow],
) -> Sequence[DetectorWorkflow]:
# Because we don't know if this signal is handled already or not...
issue_stream_detector = _ensure_detector(project, IssueStreamGroupType.slug)

Comment on lines +28 to +31
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bug: An UnableToAcquireLockApiError in ensure_default_workflows is silently swallowed by send_robust(), causing workflow creation to fail without any logging or alerts.
Severity: MEDIUM

Suggested Fix

Wrap the call to _ensure_detector within ensure_default_workflows in a try/except block. Explicitly catch the UnableToAcquireLockApiError, log a warning, and capture the exception to Sentry for visibility, similar to the pattern used in project_detectors.py.

Prompt for AI Agent
Review the code at the location below. A potential bug has been identified by an AI
agent.
Verify if this is a real issue. If it is, propose a fix; if not, explain why it's not
valid.

Location: src/sentry/workflow_engine/defaults/workflows.py#L28-L31

Potential issue: The `ensure_default_workflows` function, called via the
`project_created` signal, can raise an `UnableToAcquireLockApiError` if it fails to
acquire a lock when creating a detector. Because the signal is dispatched using
`send_robust()`, this exception is silently swallowed without any logging or error
handling. This results in a silent failure where the default workflow is not created for
the new project, and there is no visibility into the failure, making it difficult to
debug in production.

connections = [
DetectorWorkflow(
workflow=workflow,
detector=issue_stream_detector,
)
for workflow in workflows
]
return DetectorWorkflow.objects.bulk_create(
connections,
ignore_conflicts=True,
)


def create_priority_workflow(org: Organization) -> Workflow:
existing = Workflow.objects.filter(organization=org, name=DEFAULT_WORKFLOW_LABEL).first()
if existing:
return existing

with transaction.atomic(router.db_for_write(Workflow)):
Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

#todo - might be nice to provide a helper to create workflows in the future.

when_condition_group = DataConditionGroup.objects.create(
logic_type=DataConditionGroup.Type.ANY_SHORT_CIRCUIT,
organization=org,
)

workflow = Workflow.objects.create(
organization=org,
name=DEFAULT_WORKFLOW_LABEL,
when_condition_group=when_condition_group,
config={"frequency": 0},
)

# Create the workflow trigger conditions
conditions: list[DataCondition] = []
conditions.append(
DataCondition(
type=Condition.NEW_HIGH_PRIORITY_ISSUE,
condition_group=workflow.when_condition_group,
comparison=True,
condition_result=True,
)
)
conditions.append(
DataCondition(
type=Condition.EXISTING_HIGH_PRIORITY_ISSUE,
condition_group=workflow.when_condition_group,
comparison=True,
condition_result=True,
)
)
DataCondition.objects.bulk_create(conditions)

# Create the Action
action_filter = DataConditionGroup.objects.create(
logic_type=DataConditionGroup.Type.ANY_SHORT_CIRCUIT,
organization=org,
)

action = Action.objects.create(
type=Action.Type.EMAIL,
config={
"target_type": ActionTarget.ISSUE_OWNERS,
"target_identifier": None,
},
data={
"fallthrough_type": FallthroughChoiceType.ACTIVE_MEMBERS.value,
},
)
DataConditionGroupAction.objects.create(
action=action,
condition_group=action_filter,
)

WorkflowDataConditionGroup.objects.create(
workflow=workflow,
condition_group=action_filter,
)

return workflow


def ensure_default_workflows(project: Project) -> list[Workflow]:
workflows = [create_priority_workflow(project.organization)]
connect_workflows_to_issue_stream(project, workflows)

return workflows
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def run(self) -> Workflow:

return workflow

def _create_detector_lookups(self) -> list[Detector | None]:
def _create_detector_lookups(self) -> list[Detector]:
if self.rule.source == RuleSource.CRON_MONITOR:
# Find the cron detector that was created before the rule
monitor_slug = None
Expand All @@ -87,7 +87,7 @@ def _create_detector_lookups(self) -> list[Detector | None]:
break

if not monitor_slug:
return [None]
return []

try:
with in_test_hide_transaction_boundary():
Expand All @@ -105,7 +105,7 @@ def _create_detector_lookups(self) -> list[Detector | None]:
except (Monitor.DoesNotExist, Detector.DoesNotExist):
pass

return [None]
return []

if self.is_dry_run:
error_detector = Detector.objects.filter(
Expand Down Expand Up @@ -135,13 +135,30 @@ def _create_detector_lookups(self) -> list[Detector | None]:
defaults={"config": {}, "name": ISSUE_STREAM_DETECTOR_NAME},
)

return [error_detector, issue_stream_detector]
# We are not returning the error_detector here to simplify
# _connect_default_detectors
return [issue_stream_detector]

def _connect_default_detectors(self, workflow: Workflow) -> None:
default_detectors = self._create_detector_lookups()
for detector in default_detectors:
if detector:
DetectorWorkflow.objects.get_or_create(detector=detector, workflow=workflow)

# do not add references to both issue stream and error group types
# it seems like other types might be relying on this as well,
# so this just says not to link the error groups.
# TODO - provide helpers to more easily create these classes
# and references in code, so we can remove the reliance on this code
references_to_create = [
DetectorWorkflow(
detector=detector,
workflow=workflow,
)
for detector in default_detectors
]

DetectorWorkflow.objects.bulk_create(
references_to_create,
ignore_conflicts=True,
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

curious; why?

Copy link
Copy Markdown
Contributor Author

@saponifi3d saponifi3d Apr 7, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Crons. Crons creates all their workflows / alerts through this layer, so there may be cases where they are already created / have the connection. if i don't ignore_conflicts there's an edge case that those cause 💥

)

def _bulk_create_data_conditions(
self,
Expand Down
1 change: 1 addition & 0 deletions src/sentry/workflow_engine/receivers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,5 +6,6 @@
from .detector import * # NOQA
from .detector_workflow import * # NOQA
from .project_detectors import * # noqa: F401,F403
from .project_workflows import * # noqa: F401,F403
from .workflow import * # NOQA
from .workflow_data_condition_group import * # NOQA
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import logging
from typing import Any

from django.db import router, transaction

Expand All @@ -7,7 +8,8 @@
from sentry.notifications.types import FallthroughChoiceType
from sentry.signals import alert_rule_created, project_created
from sentry.users.services.user.model import RpcUser
from sentry.workflow_engine.migration_helpers.issue_alert_migration import IssueAlertMigrator
from sentry.workflow_engine.defaults.workflows import ensure_default_workflows
from sentry.workflow_engine.models import AlertRuleWorkflow

logger = logging.getLogger("sentry")

Expand All @@ -34,21 +36,38 @@
PLATFORMS_WITH_PRIORITY_ALERTS = ["python", "javascript"]


def create_default_rules(project: Project, default_rules=True, RuleModel=Rule, **kwargs):
def create_default_workflows(
project: Project,
default_rules: bool = True,
RuleModel: type[Rule] = Rule,
**kwargs: Any,
) -> None:
rule_data = DEFAULT_RULE_DATA

if not default_rules:
return

rule_data = DEFAULT_RULE_DATA

with transaction.atomic(router.db_for_write(RuleModel)):
rule = RuleModel.objects.create(project=project, label=DEFAULT_RULE_LABEL, data=rule_data)
workflows = ensure_default_workflows(project)

workflow = IssueAlertMigrator(rule).run()
logger.info(
"workflow_engine.default_issue_alert.migrated",
extra={"rule_id": rule.id, "workflow_id": workflow.id},
# TODO - we can remove the legacy code below once
# we launch the new UI (and stop referencing legacy models)
rule = RuleModel.objects.create(
project=project,
label=DEFAULT_RULE_LABEL,
data=rule_data,
)

legacy_references = [
AlertRuleWorkflow(
rule_id=rule.id,
workflow=workflow,
)
for workflow in workflows
]

AlertRuleWorkflow.objects.bulk_create(legacy_references)

try:
user: RpcUser = project.organization.get_default_owner()
except IndexError:
Expand All @@ -71,4 +90,8 @@ def create_default_rules(project: Project, default_rules=True, RuleModel=Rule, *
)


project_created.connect(create_default_rules, dispatch_uid="create_default_rules", weak=False)
project_created.connect(
create_default_workflows,
dispatch_uid="create_default_workflows",
weak=False,
)
6 changes: 4 additions & 2 deletions tests/sentry/integrations/slack/tasks/test_tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,10 @@
from sentry.integrations.slack.utils.channel import SlackChannelIdData
from sentry.integrations.slack.utils.rule_status import RedisRuleStatus
from sentry.models.rule import Rule
from sentry.receivers.rules import DEFAULT_RULE_LABEL
from sentry.testutils.cases import TestCase
from sentry.testutils.helpers import install_slack
from sentry.testutils.skips import requires_snuba
from sentry.workflow_engine.receivers.project_workflows import DEFAULT_RULE_LABEL
from tests.sentry.integrations.slack.utils.test_mock_slack_response import mock_slack_response

pytestmark = [requires_snuba]
Expand Down Expand Up @@ -192,7 +192,9 @@ def test_task_new_rule_with_owner(self, mock_set_value: MagicMock) -> None:
with self.tasks():
find_channel_id_for_rule(**data)

rule = Rule.objects.exclude(label__in=[DEFAULT_RULE_LABEL]).get(project_id=self.project.id)
rule = Rule.objects.exclude(label__in=[DEFAULT_RULE_LABEL]).get(
project_id=self.project.id,
)
mock_set_value.assert_called_with("success", rule.id)
assert rule.label == "New Rule with Owner"
assert rule.owner_team_id == team.id
Expand Down
2 changes: 1 addition & 1 deletion tests/sentry/receivers/test_featureadoption.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
from sentry.models.rule import Rule
from sentry.plugins.bases.issue2 import IssueTrackingPlugin2
from sentry.plugins.bases.notify import NotificationPlugin
from sentry.receivers.rules import DEFAULT_RULE_DATA
from sentry.signals import (
advanced_search,
alert_rule_created,
Expand All @@ -24,6 +23,7 @@
user_feedback_received,
)
from sentry.testutils.cases import SnubaTestCase, TestCase
from sentry.workflow_engine.receivers.project_workflows import DEFAULT_RULE_DATA


class FeatureAdoptionTest(TestCase, SnubaTestCase):
Expand Down
21 changes: 17 additions & 4 deletions tests/sentry/receivers/test_onboarding.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@
)
from sentry.models.project import Project
from sentry.models.rule import Rule
from sentry.receivers.rules import DEFAULT_RULE_LABEL
from sentry.signals import (
alert_rule_created,
event_processed,
Expand All @@ -59,6 +58,7 @@
from sentry.testutils.skips import requires_snuba
from sentry.utils.event import has_event_minified_stack_trace
from sentry.utils.samples import load_data
from sentry.workflow_engine.defaults.workflows import DEFAULT_WORKFLOW_LABEL
from sentry.workflow_engine.models import Workflow
from sentry.workflow_engine.models.detector import Detector
from sentry.workflow_engine.models.detector_workflow import DetectorWorkflow
Expand Down Expand Up @@ -168,11 +168,24 @@ def test_project_created__default_workflow(self) -> None:
project = self.create_project(fire_project_created=True)

assert Rule.objects.filter(project=project).exists()
workflow = Workflow.objects.get(organization=project.organization, name=DEFAULT_RULE_LABEL)
workflow = Workflow.objects.get(
organization=project.organization,
name=DEFAULT_WORKFLOW_LABEL,
)

assert Detector.objects.filter(project=project, type=ErrorGroupType.slug).count() == 1
assert Detector.objects.filter(project=project, type=IssueStreamGroupType.slug).count() == 1
assert DetectorWorkflow.objects.filter(workflow=workflow).count() == 2

issue_stream_detectors = Detector.objects.filter(
project=project,
type=IssueStreamGroupType.slug,
)

assert len(issue_stream_detectors) == 1

# Ensure we have 1 connection to the issue stream, this triggers for both monitors above.
result_connections = DetectorWorkflow.objects.filter(workflow=workflow)
assert result_connections.count() == 1
assert result_connections[0].detector_id == issue_stream_detectors[0].id

@patch("sentry.analytics.record", wraps=record)
def test_project_created_with_origin(self, record_analytics: MagicMock) -> None:
Expand Down
Loading
Loading