Skip to content

Commit 1293da3

Browse files
♻️ fix test
1 parent 45569f3 commit 1293da3

File tree

2 files changed

+67
-8
lines changed

2 files changed

+67
-8
lines changed

src/sentry/replays/usecases/summarize.py

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -247,31 +247,40 @@ def get_summary_logs(
247247
error_events: list[EventDict],
248248
project_id: int,
249249
is_mobile_replay: bool = False,
250+
replay_start: str | None = None,
250251
) -> list[str]:
251252
# Sort error events by timestamp. This list includes all feedback events still.
252253
error_events.sort(key=lambda x: x["timestamp"])
253-
return list(generate_summary_logs(segment_data, error_events, project_id, is_mobile_replay))
254+
return list(
255+
generate_summary_logs(
256+
segment_data, error_events, project_id, is_mobile_replay, replay_start
257+
)
258+
)
254259

255260

256261
def generate_summary_logs(
257262
segment_data: Iterator[tuple[int, memoryview]],
258263
error_events: list[EventDict],
259264
project_id,
260265
is_mobile_replay: bool = False,
266+
replay_start: str | None = None,
261267
) -> Generator[str]:
262268
"""
263269
Generate log messages from events and errors in chronological order.
264270
Avoid processing duplicate feedback events.
265271
"""
266272
error_idx = 0
267273
seen_feedback_ids = {error["id"] for error in error_events if error["category"] == "feedback"}
274+
replay_start_ms = _parse_iso_timestamp_to_ms(replay_start) if replay_start else 0.0
268275

269276
# Process segments
270277
for _, segment in segment_data:
271278
events = json.loads(segment.tobytes().decode("utf-8"))
272279
for event in events:
273280
event_type = which(event)
274281
timestamp = get_replay_event_timestamp_ms(event, event_type)
282+
if timestamp < replay_start_ms:
283+
continue
275284

276285
# Check if we need to yield any error messages that occurred before this event
277286
while (
@@ -582,5 +591,5 @@ def rpc_get_replay_summary_logs(
582591
segment_data = iter_segment_data(segment_md)
583592

584593
# Combine replay and error data and parse into logs.
585-
logs = get_summary_logs(segment_data, error_events, project.id, is_mobile_replay)
594+
logs = get_summary_logs(segment_data, error_events, project.id, is_mobile_replay, replay_start)
586595
return {"logs": logs}

tests/sentry/replays/usecases/test_summarize.py

Lines changed: 56 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1057,18 +1057,21 @@ def save_recording_segment(
10571057
FilestoreBlob().set(metadata, zlib.compress(data) if compressed else data)
10581058

10591059
def test_rpc_simple(self) -> None:
1060+
now = datetime.now(UTC)
1061+
replay_start = now - timedelta(minutes=1)
1062+
10601063
data = [
10611064
{
10621065
"type": 5,
1063-
"timestamp": 0.0,
1066+
"timestamp": replay_start.timestamp() * 1000,
10641067
"data": {
10651068
"tag": "breadcrumb",
10661069
"payload": {"category": "console", "message": "hello"},
10671070
},
10681071
},
10691072
{
10701073
"type": 5,
1071-
"timestamp": 0.0,
1074+
"timestamp": replay_start.timestamp() * 1000,
10721075
"data": {
10731076
"tag": "breadcrumb",
10741077
"payload": {"category": "console", "message": "world"},
@@ -1077,15 +1080,18 @@ def test_rpc_simple(self) -> None:
10771080
]
10781081
self.save_recording_segment(0, json.dumps(data).encode())
10791082
self.save_recording_segment(1, json.dumps([]).encode())
1080-
self.store_replay()
1083+
self.store_replay(dt=replay_start)
10811084

10821085
response = rpc_get_replay_summary_logs(
10831086
self.project.id,
10841087
self.replay_id,
10851088
2,
10861089
)
10871090

1088-
assert response == {"logs": ["Logged: 'hello' at 0.0", "Logged: 'world' at 0.0"]}
1091+
timestamp_ms = replay_start.timestamp() * 1000
1092+
assert response == {
1093+
"logs": [f"Logged: 'hello' at {timestamp_ms}", f"Logged: 'world' at {timestamp_ms}"]
1094+
}
10891095

10901096
def test_rpc_with_both_direct_and_trace_connected_errors(self) -> None:
10911097
"""Test handling of breadcrumbs with both direct and trace connected errors. Error logs should not be duplicated."""
@@ -1212,7 +1218,7 @@ def test_rpc_with_feedback_breadcrumb(self) -> None:
12121218
data = [
12131219
{
12141220
"type": 5,
1215-
"timestamp": dt.timestamp(),
1221+
"timestamp": dt.timestamp() * 1000,
12161222
"data": {
12171223
"tag": "breadcrumb",
12181224
"payload": {
@@ -1421,7 +1427,8 @@ def test_rpc_with_trace_errors_duplicate_feedback(
14211427
id=feedback_event_id,
14221428
title="User Feedback",
14231429
message=feedback_data["contexts"]["feedback"]["message"],
1424-
timestamp=float(feedback_data["timestamp"]),
1430+
timestamp=float(feedback_data["timestamp"])
1431+
* 1000, # EventDict timestamps are in milliseconds
14251432
category="feedback",
14261433
)
14271434

@@ -1516,3 +1523,46 @@ def test_rpc_web_replay_navigation(self) -> None:
15161523
logs = response["logs"]
15171524
# Web replays should not include navigation events, so logs should be empty
15181525
assert len(logs) == 0
1526+
1527+
def test_rpc_filters_out_events_before_replay_start(self) -> None:
1528+
"""Test that events before the replay start are not logged."""
1529+
now = datetime.now(UTC)
1530+
replay_start = now - timedelta(minutes=1)
1531+
1532+
self.store_replay(dt=replay_start)
1533+
1534+
data = [
1535+
{
1536+
"type": 5,
1537+
"timestamp": float((replay_start - timedelta(minutes=2)).timestamp() * 1000),
1538+
"data": {
1539+
"tag": "breadcrumb",
1540+
"payload": {
1541+
"category": "console",
1542+
"message": "hello",
1543+
},
1544+
},
1545+
},
1546+
{
1547+
"type": 5,
1548+
"timestamp": float((replay_start + timedelta(minutes=3)).timestamp() * 1000),
1549+
"data": {
1550+
"tag": "breadcrumb",
1551+
"payload": {
1552+
"category": "console",
1553+
"message": "world",
1554+
},
1555+
},
1556+
},
1557+
]
1558+
self.save_recording_segment(0, json.dumps(data).encode())
1559+
1560+
response = rpc_get_replay_summary_logs(
1561+
self.project.id,
1562+
self.replay_id,
1563+
1,
1564+
)
1565+
1566+
logs = response["logs"]
1567+
assert len(logs) == 1
1568+
assert "world" in logs[0]

0 commit comments

Comments
 (0)