Skip to content

Commit 8156cbe

Browse files
authored
tests(incidents): Add serializer parity test for incident activity (#113010)
When we identify issues with our API backport, I want to ensure we have delta tests to cover that situation so all know differences there are explicit. Also added a 'unreliable' option to assert_serializer_parity, as some fields (like activity.id) will sometimes be different and sometimes not depending on what other tests have run. 'unreliable' allows us to differentiate those from known differences, and likely has some other legitimate uses.
1 parent 3136d52 commit 8156cbe

File tree

3 files changed

+180
-12
lines changed

3 files changed

+180
-12
lines changed

src/sentry/testutils/helpers/serializer_parity.py

Lines changed: 57 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ def assert_serializer_parity(
1010
old: Mapping[str, Any],
1111
new: Mapping[str, Any],
1212
known_differences: set[str] | None = None,
13+
unreliable: set[str] | None = None,
1314
) -> None:
1415
"""Assert that two serializer responses are equal, modulo known differences.
1516
@@ -23,6 +24,14 @@ def assert_serializer_parity(
2324
Raises if any listed known difference is not actually different — unnecessary
2425
entries should be removed.
2526
27+
``unreliable`` is a set of dot-separated field paths that may or may not
28+
differ depending on test ordering (e.g. auto-incremented IDs from different
29+
tables). These fields are silently skipped with no "must actually differ"
30+
check. The field must still exist in both responses.
31+
32+
Use sparingly — ``unreliable`` masks both real regressions and reliable
33+
consistency, so prefer ``known_differences`` when the field reliably differs.
34+
2635
Examples::
2736
2837
assert_serializer_parity(old=old, new=new)
@@ -32,10 +41,17 @@ def assert_serializer_parity(
3241
new=new,
3342
known_differences={"resolveThreshold", "triggers.resolveThreshold"},
3443
)
44+
45+
assert_serializer_parity(
46+
old=old,
47+
new=new,
48+
unreliable={"activities.id"},
49+
)
3550
"""
3651
known_diffs = frozenset(known_differences or ())
52+
unreliable_fields = frozenset(unreliable or ())
3753
checker = _ParityChecker()
38-
checker.compare(old, new, known_diffs)
54+
checker.compare(old, new, known_diffs, unreliable=unreliable_fields)
3955

4056
assert not checker.mismatches, "Serializer differences:\n" + "\n".join(checker.mismatches)
4157

@@ -58,23 +74,38 @@ class _ParityChecker:
5874
# known_diffs entries confirmed to be actual differences.
5975
confirmed: set[str] = field(default_factory=set)
6076

61-
def _nested_diffs(self, known_diffs: frozenset[str], key: str) -> frozenset[str]:
77+
def _nested_fields(self, field_set: frozenset[str], key: str) -> frozenset[str]:
78+
"""Extract child paths for *key* from a dot-separated field set.
79+
80+
E.g. ``_nested_fields({"activities.id", "title"}, "activities")``
81+
returns ``{"id"}``.
82+
"""
6283
prefix = key + "."
63-
return frozenset(e[len(prefix) :] for e in known_diffs if e.startswith(prefix))
84+
return frozenset(e[len(prefix) :] for e in field_set if e.startswith(prefix))
6485

6586
def compare(
6687
self,
6788
old: Mapping[str, Any],
6889
new: Mapping[str, Any],
6990
known_diffs: frozenset[str],
7091
path: str = "",
71-
kd_path: str = "",
92+
diffs_path: str = "",
93+
*,
94+
unreliable: frozenset[str] = frozenset(),
7295
) -> None:
7396
for key in set(list(old.keys()) + list(new.keys())):
7497
if key in known_diffs:
75-
full_kd_key = _qualify(kd_path, key)
98+
full_diffs_key = _qualify(diffs_path, key)
7699
if key not in new or key not in old or old[key] != new[key]:
77-
self.confirmed.add(full_kd_key)
100+
self.confirmed.add(full_diffs_key)
101+
continue
102+
103+
if key in unreliable:
104+
full_path = _qualify(path, key)
105+
if key not in new:
106+
self.mismatches.append(f"Missing from new: {full_path}")
107+
elif key not in old:
108+
self.mismatches.append(f"Extra in new: {full_path}")
78109
continue
79110

80111
full_path = _qualify(path, key)
@@ -88,10 +119,11 @@ def compare(
88119

89120
old_val = old[key]
90121
new_val = new[key]
91-
nested = self._nested_diffs(known_diffs, key)
122+
nested_diffs = self._nested_fields(known_diffs, key)
123+
nested_unreliable = self._nested_fields(unreliable, key)
92124

93-
if nested:
94-
child_kd_path = _qualify(kd_path, key)
125+
if nested_diffs or nested_unreliable:
126+
child_diffs_path = _qualify(diffs_path, key)
95127
if isinstance(old_val, list) and isinstance(new_val, list):
96128
if len(old_val) != len(new_val):
97129
self.mismatches.append(
@@ -100,13 +132,27 @@ def compare(
100132
for i, (old_item, new_item) in enumerate(zip(old_val, new_val)):
101133
item_path = f"{full_path}[{i}]"
102134
if isinstance(old_item, Mapping) and isinstance(new_item, Mapping):
103-
self.compare(old_item, new_item, nested, item_path, child_kd_path)
135+
self.compare(
136+
old_item,
137+
new_item,
138+
nested_diffs,
139+
item_path,
140+
child_diffs_path,
141+
unreliable=nested_unreliable,
142+
)
104143
elif old_item != new_item:
105144
self.mismatches.append(
106145
f"{item_path}: old={old_item!r}, new={new_item!r}"
107146
)
108147
elif isinstance(old_val, Mapping) and isinstance(new_val, Mapping):
109-
self.compare(old_val, new_val, nested, full_path, child_kd_path)
148+
self.compare(
149+
old_val,
150+
new_val,
151+
nested_diffs,
152+
full_path,
153+
child_diffs_path,
154+
unreliable=nested_unreliable,
155+
)
110156
elif old_val != new_val:
111157
self.mismatches.append(f"{full_path}: old={old_val!r}, new={new_val!r}")
112158
elif old_val != new_val:

tests/sentry/incidents/endpoints/test_organization_incident_index.py

Lines changed: 80 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,11 @@
88
from sentry.incidents.endpoints.serializers.utils import get_fake_id_from_object_id
99
from sentry.incidents.grouptype import MetricIssue
1010
from sentry.incidents.logic import update_incident_status
11-
from sentry.incidents.models.incident import IncidentStatus
11+
from sentry.incidents.models.incident import IncidentActivity, IncidentActivityType, IncidentStatus
1212
from sentry.incidents.utils.constants import INCIDENTS_SNUBA_SUBSCRIPTION_TYPE
1313
from sentry.incidents.utils.types import DATA_SOURCE_SNUBA_QUERY_SUBSCRIPTION
1414
from sentry.models.groupopenperiod import GroupOpenPeriod
15+
from sentry.models.groupopenperiodactivity import GroupOpenPeriodActivity, OpenPeriodActivityType
1516
from sentry.silo.base import SiloMode
1617
from sentry.snuba.dataset import Dataset
1718
from sentry.snuba.models import SnubaQuery, SnubaQueryEventType
@@ -284,6 +285,84 @@ def test_workflow_engine_serializer_matches_old_serializer(self) -> None:
284285
},
285286
)
286287

288+
@freeze_time("2024-12-11 03:21:34")
289+
def test_activity_serialization_parity(self) -> None:
290+
self.create_team(organization=self.organization, members=[self.user])
291+
self.login_as(self.user)
292+
293+
alert_rule = self.create_alert_rule()
294+
trigger = self.create_alert_rule_trigger(alert_rule=alert_rule)
295+
_, _, _, detector, _, _, _, _ = migrate_alert_rule(alert_rule)
296+
migrate_metric_data_conditions(trigger)
297+
migrate_resolve_threshold_data_condition(alert_rule)
298+
299+
incident = self.create_incident(alert_rule=alert_rule, status=IncidentStatus.CRITICAL.value)
300+
301+
with assume_test_silo_mode(SiloMode.CELL):
302+
group = self.create_group(type=MetricIssue.type_id, project=self.project)
303+
group.priority = PriorityLevel.HIGH.value
304+
group.save()
305+
DetectorGroup.objects.create(detector=detector, group=group)
306+
307+
group_open_period = GroupOpenPeriod.objects.get(group=group, project=self.project)
308+
group_open_period.update(date_started=incident.date_started)
309+
310+
IncidentGroupOpenPeriod.objects.create(
311+
group_open_period=group_open_period,
312+
incident_id=incident.id,
313+
incident_identifier=incident.identifier,
314+
)
315+
316+
# Legacy activity: STATUS_CHANGE to CRITICAL
317+
IncidentActivity.objects.create(
318+
incident=incident,
319+
type=IncidentActivityType.STATUS_CHANGE.value,
320+
value=str(IncidentStatus.CRITICAL.value),
321+
previous_value=None,
322+
)
323+
324+
# Matching WE activity: OPENED with HIGH priority (maps to CRITICAL)
325+
GroupOpenPeriodActivity.objects.create(
326+
group_open_period=group_open_period,
327+
type=OpenPeriodActivityType.OPENED,
328+
value=PriorityLevel.HIGH,
329+
date_added=incident.date_started,
330+
)
331+
332+
with self.feature(["organizations:incidents", "organizations:performance-view"]):
333+
old_resp = self.get_success_response(self.organization.slug, expand="activities")
334+
old_data = old_resp.data
335+
assert len(old_data) > 0
336+
337+
with self.feature(
338+
[
339+
"organizations:incidents",
340+
"organizations:performance-view",
341+
"organizations:workflow-engine-rule-serializers",
342+
]
343+
):
344+
new_resp = self.get_success_response(self.organization.slug, expand="activities")
345+
new_data = new_resp.data
346+
assert len(new_data) == len(old_data)
347+
348+
for old_incident_data, new_incident_data in zip(old_data, new_data):
349+
assert_serializer_parity(
350+
old=old_incident_data,
351+
new=new_incident_data,
352+
known_differences={
353+
"alertRule.resolveThreshold",
354+
"alertRule.triggers.resolveThreshold",
355+
"alertRule.triggers.label",
356+
"title",
357+
# Legacy activities include fields the WE path intentionally omits
358+
"activities.incidentIdentifier",
359+
"activities.user",
360+
"activities.comment",
361+
},
362+
# Different tables with independent auto-increment sequences
363+
unreliable={"activities.id"},
364+
)
365+
287366

288367
class WorkflowEngineIncidentListTest(APITestCase):
289368
endpoint = "sentry-api-0-organization-incident-index"
Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
import pytest
2+
3+
from sentry.testutils.helpers.serializer_parity import assert_serializer_parity
4+
5+
6+
class TestUnreliable:
7+
def test_ignores_difference(self) -> None:
8+
old = {"id": "1", "name": "a"}
9+
new = {"id": "2", "name": "a"}
10+
assert_serializer_parity(old=old, new=new, unreliable={"id"})
11+
12+
def test_ignores_match(self) -> None:
13+
old = {"id": "1", "name": "a"}
14+
new = {"id": "1", "name": "a"}
15+
assert_serializer_parity(old=old, new=new, unreliable={"id"})
16+
17+
def test_nested_in_list(self) -> None:
18+
old = {"items": [{"id": "1", "v": "x"}, {"id": "2", "v": "y"}]}
19+
new = {"items": [{"id": "99", "v": "x"}, {"id": "100", "v": "y"}]}
20+
assert_serializer_parity(old=old, new=new, unreliable={"items.id"})
21+
22+
def test_nested_in_dict(self) -> None:
23+
old = {"outer": {"id": "1", "v": "x"}}
24+
new = {"outer": {"id": "2", "v": "x"}}
25+
assert_serializer_parity(old=old, new=new, unreliable={"outer.id"})
26+
27+
def test_still_fails_on_other_fields(self) -> None:
28+
old = {"id": "1", "name": "a"}
29+
new = {"id": "2", "name": "b"}
30+
with pytest.raises(AssertionError, match="name"):
31+
assert_serializer_parity(old=old, new=new, unreliable={"id"})
32+
33+
def test_fails_if_missing_from_new(self) -> None:
34+
old = {"id": "1", "name": "a"}
35+
new = {"name": "a"}
36+
with pytest.raises(AssertionError, match="Missing from new"):
37+
assert_serializer_parity(old=old, new=new, unreliable={"id"})
38+
39+
def test_fails_if_extra_in_new(self) -> None:
40+
old = {"name": "a"}
41+
new = {"id": "1", "name": "a"}
42+
with pytest.raises(AssertionError, match="Extra in new"):
43+
assert_serializer_parity(old=old, new=new, unreliable={"id"})

0 commit comments

Comments
 (0)