diff --git a/src/sentry/incidents/metric_issue_detector.py b/src/sentry/incidents/metric_issue_detector.py index 1ee200557e86e0..2fd1b7082469ed 100644 --- a/src/sentry/incidents/metric_issue_detector.py +++ b/src/sentry/incidents/metric_issue_detector.py @@ -1,5 +1,5 @@ from datetime import timedelta -from typing import Any +from typing import Any, cast from rest_framework import serializers @@ -9,7 +9,10 @@ from sentry.incidents.models.alert_rule import AlertRuleDetectionType from sentry.relay.config.metric_extraction import on_demand_metrics_feature_flags from sentry.seer.anomaly_detection.delete_rule import delete_data_in_seer_for_detector -from sentry.seer.anomaly_detection.store_data_workflow_engine import send_new_detector_data +from sentry.seer.anomaly_detection.store_data_workflow_engine import ( + send_new_detector_data, + update_detector_data, +) from sentry.snuba.dataset import Dataset from sentry.snuba.metrics.extraction import should_use_on_demand_metrics from sentry.snuba.models import ( @@ -271,6 +274,20 @@ def update_data_source(self, instance: Detector, data_source: SnubaQueryDataSour "Invalid extrapolation mode for this detector type." ) + # Handle a dynamic detector's snuba query changing + if instance.config.get("detection_type") == AlertRuleDetectionType.DYNAMIC: + if snuba_query.query != data_source.get( + "query" + ) or snuba_query.aggregate != data_source.get("aggregate"): + try: + validated_data_source = cast(dict[str, Any], data_source) + update_detector_data(instance, validated_data_source) + except Exception: + # don't update the snuba query if we failed to send data to Seer + raise serializers.ValidationError( + "Failed to send data to Seer, cannot update detector" + ) + update_snuba_query( snuba_query=snuba_query, query_type=data_source.get("query_type", snuba_query.type), @@ -287,6 +304,21 @@ def update_data_source(self, instance: Detector, data_source: SnubaQueryDataSour ) def update(self, instance: Detector, validated_data: dict[str, Any]): + # Handle anomaly detection changes first in case we need to exit before saving + if ( + not instance.config.get("detection_type") == AlertRuleDetectionType.DYNAMIC + and validated_data.get("config", {}).get("detection_type") + == AlertRuleDetectionType.DYNAMIC + ): + # Detector has been changed to become a dynamic detector + try: + update_detector_data(instance, validated_data) + except Exception: + # Don't update if we failed to send data to Seer + raise serializers.ValidationError( + "Failed to send data to Seer, cannot update detector" + ) + super().update(instance, validated_data) # Handle enable/disable query subscriptions @@ -300,6 +332,7 @@ def update(self, instance: Detector, validated_data: dict[str, Any]): if query_subscriptions: enable_disable_subscriptions(query_subscriptions, enabled) + # Handle data sources data_source: SnubaQueryDataSourceType | None = None if "data_sources" in validated_data: @@ -325,10 +358,9 @@ def create(self, validated_data: dict[str, Any]): try: send_new_detector_data(detector) except Exception: - # Sending historical data failed; Detector won't be save, but we + # Sending historical data failed; Detector won't be saved, but we # need to clean up database state that has already been created. detector.workflow_condition_group.delete() - raise schedule_update_project_config(detector) diff --git a/src/sentry/seer/anomaly_detection/store_data_workflow_engine.py b/src/sentry/seer/anomaly_detection/store_data_workflow_engine.py index 9ef83411619d61..eb5d08d4c79462 100644 --- a/src/sentry/seer/anomaly_detection/store_data_workflow_engine.py +++ b/src/sentry/seer/anomaly_detection/store_data_workflow_engine.py @@ -1,4 +1,5 @@ import logging +from typing import Any import sentry_sdk from django.conf import settings @@ -30,7 +31,7 @@ from sentry.utils import json, metrics from sentry.utils.json import JSONDecodeError from sentry.workflow_engine.models import DataCondition, DataSource, DataSourceDetector, Detector -from sentry.workflow_engine.types import DetectorException +from sentry.workflow_engine.types import DetectorException, DetectorPriorityLevel logger = logging.getLogger(__name__) @@ -40,42 +41,104 @@ ) -def send_new_detector_data(detector: Detector) -> None: - """ - Send historical data for a new Detector to Seer. - """ +def _fetch_related_models( + detector: Detector, method: str +) -> tuple[DataSource, DataCondition, SnubaQuery]: # XXX: it is technically possible (though not used today) that a detector could have multiple data sources data_source_detector = DataSourceDetector.objects.filter(detector_id=detector.id).first() if not data_source_detector: - raise DetectorException("Could not create detector, data source not found.") + raise DetectorException(f"Could not {method} detector, data source not found.") data_source = data_source_detector.data_source try: query_subscription = QuerySubscription.objects.get(id=int(data_source.source_id)) except QuerySubscription.DoesNotExist: raise DetectorException( - f"Could not create detector, query subscription {data_source.source_id} not found." + f"Could not {method} detector, query subscription {data_source.source_id} not found." ) try: snuba_query = SnubaQuery.objects.get(id=query_subscription.snuba_query_id) except SnubaQuery.DoesNotExist: raise DetectorException( - f"Could not create detector, snuba query {query_subscription.snuba_query_id} not found." + f"Could not {method} detector, snuba query {query_subscription.snuba_query_id} not found." ) try: data_condition = DataCondition.objects.get( - condition_group=detector.workflow_condition_group + condition_group=detector.workflow_condition_group, + condition_result__in=[ + DetectorPriorityLevel.HIGH, + DetectorPriorityLevel.MEDIUM, + ], ) except (DataCondition.DoesNotExist, DataCondition.MultipleObjectsReturned): - # there should only ever be one data condition for a dynamic metric detector, we dont actually expect a MultipleObjectsReturned + # there should only ever be one non-resolution data condition for a dynamic metric detector, we dont actually expect a MultipleObjectsReturned dcg_id = ( detector.workflow_condition_group.id if detector.workflow_condition_group is not None else None ) raise DetectorException( - f"Could not create detector, data condition {dcg_id} not found or too many found." + f"Could not {method} detector, data condition {dcg_id} not found or too many found." + ) + return data_source, data_condition, snuba_query + + +def update_detector_data( + detector: Detector, + updated_fields: dict[str, Any], +) -> None: + data_source, data_condition, snuba_query = _fetch_related_models(detector, "update") + + # use setattr to avoid saving the models until the Seer call has successfully finished, + # otherwise they would be in a bad state + updated_data_condition_data = updated_fields.get("condition_group", {}).get("conditions") + if updated_data_condition_data: + for k, v in updated_data_condition_data[0].items(): + setattr(data_condition, k, v) + + event_types = snuba_query.event_types + updated_data_source_data = updated_fields.get("data_sources") + if updated_data_source_data: + data_source_data = updated_data_source_data[0] + event_types = data_source_data.get("eventTypes") + + for k, v in data_source_data.items(): + if k == "dataset": + v = v.value + elif k == "time_window": + time_window = data_source_data.get("time_window") + v = time_window if time_window is not None else snuba_query.time_window + elif k == "event_types": + continue + setattr(snuba_query, k, v) + + try: + handle_send_historical_data_to_seer( + detector, + data_source, + data_condition, + snuba_query, + detector.project, + SeerMethod.UPDATE, + event_types, ) + except TimeoutError: + raise ValidationError("Timed out sending data to Seer, unable to update detector") + except MaxRetryError: + raise ValidationError("Hit max retries sending data to Seer, unable to update detector") + except ParseError: + raise ValidationError("Couldn't parse response from Seer, unable to update detector") + except ValidationError: + raise ValidationError("Hit validation error, unable to update detector") + metrics.incr("anomaly_detection_monitor.updated") + + +def send_new_detector_data(detector: Detector) -> None: + """ + Send historical data for a new Detector to Seer. + """ + data_source, data_condition, snuba_query = _fetch_related_models(detector, "create") + try: handle_send_historical_data_to_seer( detector, data_source, data_condition, snuba_query, detector.project, SeerMethod.CREATE diff --git a/tests/sentry/incidents/endpoints/validators/test_validators.py b/tests/sentry/incidents/endpoints/validators/test_validators.py index 82dd645e6b45cd..f323594d7ab929 100644 --- a/tests/sentry/incidents/endpoints/validators/test_validators.py +++ b/tests/sentry/incidents/endpoints/validators/test_validators.py @@ -2,6 +2,7 @@ import orjson import pytest +from django.utils import timezone from rest_framework.exceptions import ErrorDetail, ValidationError from urllib3.exceptions import MaxRetryError, TimeoutError from urllib3.response import HTTPResponse @@ -152,15 +153,12 @@ def setUp(self) -> None: } ], "conditionGroup": { - "id": self.data_condition_group.id, - "organizationId": self.organization.id, "logicType": self.data_condition_group.logic_type, "conditions": [ { "type": Condition.GREATER, "comparison": 100, "conditionResult": DetectorPriorityLevel.HIGH, - "conditionGroupId": self.data_condition_group.id, }, { "type": Condition.LESS_OR_EQUAL, @@ -178,8 +176,6 @@ def setUp(self) -> None: self.valid_anomaly_detection_data = { **self.valid_data, "conditionGroup": { - "id": self.data_condition_group.id, - "organizationId": self.organization.id, "logicType": self.data_condition_group.logic_type, "conditions": [ { @@ -190,7 +186,6 @@ def setUp(self) -> None: "threshold_type": AnomalyDetectionThresholdType.ABOVE_AND_BELOW, }, "conditionResult": DetectorPriorityLevel.HIGH, - "conditionGroupId": self.data_condition_group.id, }, ], }, @@ -200,6 +195,66 @@ def setUp(self) -> None: }, } + def create_static_detector(self) -> Detector: + validator = MetricIssueDetectorValidator( + data=self.valid_data, + context=self.context, + ) + assert validator.is_valid(), validator.errors + + with self.tasks(): + static_detector = validator.save() + + # Verify detector in DB + self.assert_validated(static_detector) + + # Verify condition group in DB + condition_group = DataConditionGroup.objects.get( + id=static_detector.workflow_condition_group_id + ) + assert condition_group.logic_type == DataConditionGroup.Type.ANY + assert condition_group.organization_id == self.project.organization_id + + # Verify conditions in DB + conditions = list(DataCondition.objects.filter(condition_group=condition_group)) + assert len(conditions) == 2 + condition = conditions[0] + assert condition.type == Condition.GREATER + assert condition.comparison == 100 + assert condition.condition_result == DetectorPriorityLevel.HIGH + + return static_detector + + def create_dynamic_detector(self) -> Detector: + validator = MetricIssueDetectorValidator( + data=self.valid_anomaly_detection_data, + context=self.context, + ) + assert validator.is_valid(), validator.errors + + with self.tasks(): + detector = validator.save() + + # Verify condition group in DB + condition_group = DataConditionGroup.objects.get(id=detector.workflow_condition_group_id) + assert condition_group.logic_type == DataConditionGroup.Type.ANY + assert condition_group.organization_id == self.project.organization_id + + # Verify conditions in DB + conditions = list(DataCondition.objects.filter(condition_group=condition_group)) + assert len(conditions) == 1 + + condition = conditions[0] + assert condition.type == Condition.ANOMALY_DETECTION + assert condition.comparison == { + "sensitivity": AnomalyDetectionSensitivity.HIGH, + "seasonality": AnomalyDetectionSeasonality.AUTO, + "threshold_type": AnomalyDetectionThresholdType.ABOVE_AND_BELOW, + } + assert condition.condition_result == DetectorPriorityLevel.HIGH + + return detector + def assert_validated(self, detector): detector = Detector.objects.get(id=detector.id) assert detector.name == "Test Detector" @@ -228,34 +283,15 @@ def assert_validated(self, detector): assert snuba_query.environment == self.environment assert snuba_query.event_types == [SnubaQueryEventType.EventType.ERROR] + +class TestMetricAlertsCreateDetectorValidator(TestMetricAlertsDetectorValidator): + @mock.patch("sentry.incidents.metric_issue_detector.schedule_update_project_config") @mock.patch("sentry.workflow_engine.endpoints.validators.base.detector.create_audit_entry") def test_create_with_valid_data( self, mock_audit: mock.MagicMock, mock_schedule_update_project_config ) -> None: - validator = MetricIssueDetectorValidator( - data=self.valid_data, - context=self.context, - ) - assert validator.is_valid(), validator.errors - - with self.tasks(): - detector = validator.save() - - # Verify detector in DB - self.assert_validated(detector) - # Verify condition group in DB - condition_group = DataConditionGroup.objects.get(id=detector.workflow_condition_group_id) - assert condition_group.logic_type == DataConditionGroup.Type.ANY - assert condition_group.organization_id == self.project.organization_id - - # Verify conditions in DB - conditions = list(DataCondition.objects.filter(condition_group=condition_group)) - assert len(conditions) == 2 - condition = conditions[0] - assert condition.type == Condition.GREATER - assert condition.comparison == 100 - assert condition.condition_result == DetectorPriorityLevel.HIGH + detector = self.create_static_detector() # Verify audit log mock_audit.assert_called_once_with( @@ -277,38 +313,13 @@ def test_anomaly_detection( seer_return_value: StoreDataResponse = {"success": True} mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200) - validator = MetricIssueDetectorValidator( - data=self.valid_anomaly_detection_data, - context=self.context, - ) - assert validator.is_valid(), validator.errors - - with self.tasks(): - detector = validator.save() + detector = self.create_dynamic_detector() # Verify detector in DB self.assert_validated(detector) assert mock_seer_request.call_count == 1 - # Verify condition group in DB - condition_group = DataConditionGroup.objects.get(id=detector.workflow_condition_group_id) - assert condition_group.logic_type == DataConditionGroup.Type.ANY - assert condition_group.organization_id == self.project.organization_id - - # Verify conditions in DB - conditions = list(DataCondition.objects.filter(condition_group=condition_group)) - assert len(conditions) == 1 - - condition = conditions[0] - assert condition.type == Condition.ANOMALY_DETECTION - assert condition.comparison == { - "sensitivity": AnomalyDetectionSensitivity.HIGH, - "seasonality": AnomalyDetectionSeasonality.AUTO, - "threshold_type": AnomalyDetectionThresholdType.ABOVE_AND_BELOW, - } - assert condition.condition_result == DetectorPriorityLevel.HIGH - # Verify audit log mock_audit.assert_called_once_with( request=self.context["request"], @@ -591,6 +602,305 @@ def test_transaction_dataset_deprecation_multiple_data_sources(self) -> None: ): validator.save() + +class TestMetricAlertsUpdateDetectorValidator(TestMetricAlertsDetectorValidator): + def test_update_with_valid_data(self) -> None: + """ + Test a simple update + """ + detector = self.create_static_detector() + + # the front end passes _all_ of the data, not just what changed + new_name = "Testing My Cool Detector" + update_data = { + **self.valid_data, + "id": detector.id, + "projectId": self.project.id, + "dateCreated": detector.date_added, + "dateUpdated": timezone.now(), + "conditionGroup": { + "logicType": self.data_condition_group.logic_type, + "conditions": [ + { + "type": Condition.GREATER, + "comparison": 100, + "conditionResult": DetectorPriorityLevel.HIGH, + }, + { + "type": Condition.LESS_OR_EQUAL, + "comparison": 100, + "conditionResult": DetectorPriorityLevel.OK, + "conditionGroupId": self.data_condition_group.id, + }, + ], + }, + "name": new_name, # change the name + } + update_validator = MetricIssueDetectorValidator( + instance=detector, data=update_data, context=self.context, partial=True + ) + assert update_validator.is_valid(), update_validator.errors + updated_detector = update_validator.save() + assert updated_detector.name == new_name + + @mock.patch( + "sentry.seer.anomaly_detection.store_data_workflow_engine.seer_anomaly_detection_connection_pool.urlopen" + ) + @mock.patch("sentry.workflow_engine.endpoints.validators.base.detector.create_audit_entry") + def test_update_anomaly_detection_from_static( + self, mock_audit: mock.MagicMock, mock_seer_request: mock.MagicMock + ) -> None: + """ + Test that if a static detector is changed to become a dynamic one + we send the historical data to Seer for that detector + """ + static_detector = self.create_static_detector() + + mock_audit.assert_called() + mock_audit.reset_mock() + + # Change to become a dynamic detector + seer_return_value: StoreDataResponse = {"success": True} + mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200) + + update_validator = MetricIssueDetectorValidator( + instance=static_detector, + data=self.valid_anomaly_detection_data, + context=self.context, + partial=True, + ) + assert update_validator.is_valid(), update_validator.errors + dynamic_detector = update_validator.save() + + assert mock_seer_request.call_count == 1 + + # Verify detector in DB + self.assert_validated(dynamic_detector) + + # Verify condition group in DB + condition_group = DataConditionGroup.objects.get( + id=dynamic_detector.workflow_condition_group_id + ) + assert condition_group.logic_type == DataConditionGroup.Type.ANY + assert condition_group.organization_id == self.project.organization_id + + # Verify conditions in DB + conditions = list(DataCondition.objects.filter(condition_group=condition_group)) + assert len(conditions) == 1 + + condition = conditions[0] + assert condition.type == Condition.ANOMALY_DETECTION + assert condition.comparison == { + "sensitivity": AnomalyDetectionSensitivity.HIGH, + "seasonality": AnomalyDetectionSeasonality.AUTO, + "threshold_type": AnomalyDetectionThresholdType.ABOVE_AND_BELOW, + } + assert condition.condition_result == DetectorPriorityLevel.HIGH + + mock_audit.assert_called_once_with( + request=self.context["request"], + organization=self.project.organization, + target_object=dynamic_detector.id, + event=audit_log.get_event_id("DETECTOR_EDIT"), + data=dynamic_detector.get_audit_log_data(), + ) + + @mock.patch( + "sentry.seer.anomaly_detection.store_data_workflow_engine.seer_anomaly_detection_connection_pool.urlopen" + ) + @mock.patch("sentry.workflow_engine.endpoints.validators.base.detector.create_audit_entry") + def test_update_anomaly_detection_snuba_query( + self, mock_audit: mock.MagicMock, mock_seer_request: mock.MagicMock + ) -> None: + """ + Test that when we update the snuba query for a dynamic detector + we make a call to Seer with the changes + """ + seer_return_value: StoreDataResponse = {"success": True} + mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200) + + detector = self.create_dynamic_detector() + + # Verify detector in DB + self.assert_validated(detector) + + assert mock_seer_request.call_count == 1 + mock_seer_request.reset_mock() + + # Verify audit log + mock_audit.assert_called_once_with( + request=self.context["request"], + organization=self.project.organization, + target_object=detector.id, + event=audit_log.get_event_id("DETECTOR_ADD"), + data=detector.get_audit_log_data(), + ) + mock_audit.reset_mock() + + # Change the snuba query which should call Seer + updated_query = "different query" + update_data = { + **self.valid_anomaly_detection_data, + "dataSources": [ + { + "queryType": SnubaQuery.Type.ERROR.value, + "dataset": Dataset.Events.value, + "query": updated_query, # this is what's changing + "aggregate": "count()", + "timeWindow": 3600, + "environment": self.environment.name, + "eventTypes": [SnubaQueryEventType.EventType.ERROR.name.lower()], + } + ], + } + update_validator = MetricIssueDetectorValidator( + instance=detector, data=update_data, context=self.context, partial=True + ) + assert update_validator.is_valid(), update_validator.errors + dynamic_detector = update_validator.save() + + assert mock_seer_request.call_count == 1 + mock_seer_request.reset_mock() + + # Verify snuba query changes + data_source = DataSource.objects.get(detector=dynamic_detector) + query_subscription = QuerySubscription.objects.get(id=data_source.source_id) + snuba_query = SnubaQuery.objects.get(id=query_subscription.snuba_query_id) + assert snuba_query.query == updated_query + + mock_audit.assert_called_once_with( + request=self.context["request"], + organization=self.project.organization, + target_object=dynamic_detector.id, + event=audit_log.get_event_id("DETECTOR_EDIT"), + data=dynamic_detector.get_audit_log_data(), + ) + mock_audit.reset_mock() + + # Change the aggregate which should call Seer + updated_aggregate = "count_unique(user)" + update_data = { + **self.valid_anomaly_detection_data, + "dataSources": [ + { + "queryType": SnubaQuery.Type.ERROR.value, + "dataset": Dataset.Events.value, + "query": "updated_query", + "aggregate": updated_aggregate, # this is what's changing + "timeWindow": 3600, + "environment": self.environment.name, + "eventTypes": [SnubaQueryEventType.EventType.ERROR.name.lower()], + } + ], + } + update_validator = MetricIssueDetectorValidator( + instance=detector, data=update_data, context=self.context, partial=True + ) + assert update_validator.is_valid(), update_validator.errors + dynamic_detector = update_validator.save() + + assert mock_seer_request.call_count == 1 + + # Verify snuba query changes + data_source = DataSource.objects.get(detector=dynamic_detector) + query_subscription = QuerySubscription.objects.get(id=data_source.source_id) + snuba_query = SnubaQuery.objects.get(id=query_subscription.snuba_query_id) + assert snuba_query.aggregate == "count_unique(tags[sentry:user])" + + mock_audit.assert_called_once_with( + request=self.context["request"], + organization=self.project.organization, + target_object=dynamic_detector.id, + event=audit_log.get_event_id("DETECTOR_EDIT"), + data=dynamic_detector.get_audit_log_data(), + ) + + @mock.patch( + "sentry.seer.anomaly_detection.store_data_workflow_engine.seer_anomaly_detection_connection_pool.urlopen" + ) + def test_anomaly_detection__send_historical_data_update_fails( + self, mock_seer_request: mock.MagicMock + ) -> None: + """ + Test that if the call to Seer fails when we try to change a detector's type to dynamic from static that we do not update the detector or data condition + """ + static_detector = self.create_static_detector() + + # Attempt to convert detector to dynamic type + mock_seer_request.side_effect = TimeoutError + + update_validator = MetricIssueDetectorValidator( + instance=static_detector, + data=self.valid_anomaly_detection_data, + context=self.context, + partial=True, + ) + assert update_validator.is_valid(), update_validator.errors + + with self.tasks(), pytest.raises(ValidationError): + update_validator.save() + + # Re-fetch the models and ensure they're not updated + detector = Detector.objects.get(id=static_detector.id) + assert detector.config.get("detection_type") == AlertRuleDetectionType.STATIC.value + + condition_group = DataConditionGroup.objects.get(id=detector.workflow_condition_group_id) + assert condition_group.logic_type == DataConditionGroup.Type.ANY + assert condition_group.organization_id == self.project.organization_id + + conditions = list(DataCondition.objects.filter(condition_group=condition_group)) + assert len(conditions) == 2 + condition = conditions[0] + assert condition.type == Condition.GREATER + assert condition.comparison == 100 + assert condition.condition_result == DetectorPriorityLevel.HIGH + + @mock.patch( + "sentry.seer.anomaly_detection.store_data_workflow_engine.seer_anomaly_detection_connection_pool.urlopen" + ) + def test_anomaly_detection__send_historical_data_snuba_update_fails( + self, mock_seer_request: mock.MagicMock + ) -> None: + """ + Test that if the call to Seer fails when we try to change a dynamic detector's snuba query that we do not update the snuba query + """ + seer_return_value: StoreDataResponse = {"success": True} + mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200) + + detector = self.create_dynamic_detector() + + # Attempt to change the snuba query's query + mock_seer_request.side_effect = TimeoutError + + updated_query = "different query" + update_data = { + **self.valid_anomaly_detection_data, + "dataSources": [ + { + "queryType": SnubaQuery.Type.ERROR.value, + "dataset": Dataset.Events.value, + "query": updated_query, # this is what's changing + "aggregate": "count()", + "timeWindow": 3600, + "environment": self.environment.name, + "eventTypes": [SnubaQueryEventType.EventType.ERROR.name.lower()], + } + ], + } + update_validator = MetricIssueDetectorValidator( + instance=detector, data=update_data, context=self.context, partial=True + ) + assert update_validator.is_valid(), update_validator.errors + + with self.tasks(), pytest.raises(ValidationError): + update_validator.save() + + # Fetch data and ensure it hasn't changed + data_source = DataSource.objects.get(detector=detector) + query_sub = QuerySubscription.objects.get(id=data_source.source_id) + snuba_query = query_sub.snuba_query + assert snuba_query.query == "test query" + @with_feature("organizations:discover-saved-queries-deprecation") def test_update_allowed_even_with_deprecated_dataset(self) -> None: # Updates should be allowed even when the feature flag is enabled diff --git a/tests/sentry/workflow_engine/endpoints/test_organization_detector_details.py b/tests/sentry/workflow_engine/endpoints/test_organization_detector_details.py index f01f1c1d21d151..a66669ac26d980 100644 --- a/tests/sentry/workflow_engine/endpoints/test_organization_detector_details.py +++ b/tests/sentry/workflow_engine/endpoints/test_organization_detector_details.py @@ -719,7 +719,7 @@ def test_update_config_valid(self) -> None: self.detector.save() # Update with valid new config - updated_config = {"detection_type": "dynamic", "comparison_delta": 3600} + updated_config = {"detection_type": "percent", "comparison_delta": 3600} data = { "config": updated_config, } @@ -737,7 +737,7 @@ def test_update_config_valid(self) -> None: assert self.detector.config == updated_config # API returns camelCase assert response.data["config"] == { - "detectionType": "dynamic", + "detectionType": "percent", "comparisonDelta": 3600, }