From 63b1f2457e6a24a163c27d8a9d04b378381887be Mon Sep 17 00:00:00 2001 From: srothh Date: Fri, 11 Jul 2025 10:49:35 +0200 Subject: [PATCH 001/102] ref(transport): Added shared sync/async transport superclass and created a sync transport HTTP subclass Moved shared sync/async logic into a new superclass (HttpTransportCore), and moved sync transport specific code into a new subclass(BaseSyncHttpTransport), from which the current transport implementations inherit Fixes GH-4568 --- sentry_sdk/client.py | 4 +- sentry_sdk/transport.py | 151 ++++++++++++++++++++++++---------------- 2 files changed, 92 insertions(+), 63 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index 67c723c76c..9da4a40bf6 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -25,7 +25,7 @@ ) from sentry_sdk.serializer import serialize from sentry_sdk.tracing import trace -from sentry_sdk.transport import BaseHttpTransport, make_transport +from sentry_sdk.transport import HttpTransportCore, make_transport from sentry_sdk.consts import ( SPANDATA, DEFAULT_MAX_VALUE_LENGTH, @@ -403,7 +403,7 @@ def _capture_envelope(envelope: Envelope) -> None: self.monitor or self.log_batcher or has_profiling_enabled(self.options) - or isinstance(self.transport, BaseHttpTransport) + or isinstance(self.transport, HttpTransportCore) ): # If we have anything on that could spawn a background thread, we # need to check if it's safe to use them. diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index ac7a8c3522..3a0d4ec991 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -162,7 +162,7 @@ def _parse_rate_limits( continue -class BaseHttpTransport(Transport): +class HttpTransportCore(Transport): """The base HTTP transport.""" TIMEOUT = 30 # seconds @@ -286,12 +286,8 @@ def _update_rate_limits( seconds=retry_after ) - def _send_request( - self: Self, - body: bytes, - headers: Dict[str, str], - endpoint_type: EndpointType = EndpointType.ENVELOPE, - envelope: Optional[Envelope] = None, + def _handle_request_error( + self: Self, envelope: Optional[Envelope], loss_reason: str = "network" ) -> None: def record_loss(reason: str) -> None: if envelope is None: @@ -300,45 +296,45 @@ def record_loss(reason: str) -> None: for item in envelope.items: self.record_lost_event(reason, item=item) + self.on_dropped_event(loss_reason) + record_loss("network_error") + + def _handle_response( + self: Self, + response: Union[urllib3.BaseHTTPResponse, httpcore.Response], + envelope: Optional[Envelope], + ) -> None: + self._update_rate_limits(response) + + if response.status == 429: + # if we hit a 429. Something was rate limited but we already + # acted on this in `self._update_rate_limits`. Note that we + # do not want to record event loss here as we will have recorded + # an outcome in relay already. + self.on_dropped_event("status_429") + pass + + elif response.status >= 300 or response.status < 200: + logger.error( + "Unexpected status code: %s (body: %s)", + response.status, + getattr(response, "data", getattr(response, "content", None)), + ) + self._handle_request_error( + envelope=envelope, loss_reason="status_{}".format(response.status) + ) + + def _update_headers( + self: Self, + headers: Dict[str, str], + ) -> None: + headers.update( { "User-Agent": str(self._auth.client), "X-Sentry-Auth": str(self._auth.to_header()), } ) - try: - response = self._request( - "POST", - endpoint_type, - body, - headers, - ) - except Exception: - self.on_dropped_event("network") - record_loss("network_error") - raise - - try: - self._update_rate_limits(response) - - if response.status == 429: - # if we hit a 429. Something was rate limited but we already - # acted on this in `self._update_rate_limits`. Note that we - # do not want to record event loss here as we will have recorded - # an outcome in relay already. - self.on_dropped_event("status_429") - pass - - elif response.status >= 300 or response.status < 200: - logger.error( - "Unexpected status code: %s (body: %s)", - response.status, - getattr(response, "data", getattr(response, "content", None)), - ) - self.on_dropped_event("status_{}".format(response.status)) - record_loss("network_error") - finally: - response.close() def on_dropped_event(self: Self, _reason: str) -> None: return None @@ -375,11 +371,6 @@ def _fetch_pending_client_report( type="client_report", ) - def _flush_client_reports(self: Self, force: bool = False) -> None: - client_report = self._fetch_pending_client_report(force=force, interval=60) - if client_report is not None: - self.capture_envelope(Envelope(items=[client_report])) - def _check_disabled(self: Self, category: EventDataCategory) -> bool: def _disabled(bucket: Optional[EventDataCategory]) -> bool: ts = self._disabled_until.get(bucket) @@ -398,9 +389,9 @@ def _is_worker_full(self: Self) -> bool: def is_healthy(self: Self) -> bool: return not (self._is_worker_full() or self._is_rate_limited()) - def _send_envelope(self: Self, envelope: Envelope) -> None: - - # remove all items from the envelope which are over quota + def _prepare_envelope( + self: Self, envelope: Envelope + ) -> Optional[Tuple[Envelope, io.BytesIO, Dict[str, str]]]: new_items = [] for item in envelope.items: if self._check_disabled(item.data_category): @@ -442,13 +433,7 @@ def _send_envelope(self: Self, envelope: Envelope) -> None: if content_encoding: headers["Content-Encoding"] = content_encoding - self._send_request( - body.getvalue(), - headers=headers, - endpoint_type=EndpointType.ENVELOPE, - envelope=envelope, - ) - return None + return envelope, body, headers def _serialize_envelope( self: Self, envelope: Envelope @@ -506,6 +491,54 @@ def _request( ) -> Union[urllib3.BaseHTTPResponse, httpcore.Response]: raise NotImplementedError() + def kill(self: Self) -> None: + logger.debug("Killing HTTP transport") + self._worker.kill() + + +class BaseSyncHttpTransport(HttpTransportCore): + + def _send_envelope(self: Self, envelope: Envelope) -> None: + _prepared_envelope = self._prepare_envelope(envelope) + if _prepared_envelope is None: # TODO: check this behaviour in detail + return None + envelope, body, headers = _prepared_envelope + self._send_request( + body.getvalue(), + headers=headers, + endpoint_type=EndpointType.ENVELOPE, + envelope=envelope, + ) + return None + + def _send_request( + self: Self, + body: bytes, + headers: Dict[str, str], + endpoint_type: EndpointType, + envelope: Optional[Envelope], + ) -> None: + self._update_headers(headers) + try: + response = self._request( + "POST", + endpoint_type, + body, + headers, + ) + except Exception: + self._handle_request_error(envelope=envelope, loss_reason="network") + raise + try: + self._handle_response(response=response, envelope=envelope) + finally: + response.close() + + def _flush_client_reports(self: Self, force: bool = False) -> None: + client_report = self._fetch_pending_client_report(force=force, interval=60) + if client_report is not None: + self.capture_envelope(Envelope(items=[client_report])) + def capture_envelope(self: Self, envelope: Envelope) -> None: def send_envelope_wrapper() -> None: with capture_internal_exceptions(): @@ -528,12 +561,8 @@ def flush( self._worker.submit(lambda: self._flush_client_reports(force=True)) self._worker.flush(timeout, callback) - def kill(self: Self) -> None: - logger.debug("Killing HTTP transport") - self._worker.kill() - -class HttpTransport(BaseHttpTransport): +class HttpTransport(BaseSyncHttpTransport): if TYPE_CHECKING: _pool: Union[PoolManager, ProxyManager] @@ -650,7 +679,7 @@ def __init__(self: Self, options: Dict[str, Any]) -> None: else: - class Http2Transport(BaseHttpTransport): # type: ignore + class Http2Transport(BaseSyncHttpTransport): # type: ignore """The HTTP2 transport based on httpcore.""" TIMEOUT = 15 From 666ff3a878cb43eb35c983cb68d75c1ce72b6061 Mon Sep 17 00:00:00 2001 From: srothh Date: Fri, 11 Jul 2025 11:30:29 +0200 Subject: [PATCH 002/102] ref(transport) Removed Todo and reverted class name change Removed an unnecessary TODO message and reverted a class name change for BaseHTTPTransport. GH-4568 --- sentry_sdk/client.py | 4 ++-- sentry_sdk/transport.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index 9da4a40bf6..67c723c76c 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -25,7 +25,7 @@ ) from sentry_sdk.serializer import serialize from sentry_sdk.tracing import trace -from sentry_sdk.transport import HttpTransportCore, make_transport +from sentry_sdk.transport import BaseHttpTransport, make_transport from sentry_sdk.consts import ( SPANDATA, DEFAULT_MAX_VALUE_LENGTH, @@ -403,7 +403,7 @@ def _capture_envelope(envelope: Envelope) -> None: self.monitor or self.log_batcher or has_profiling_enabled(self.options) - or isinstance(self.transport, HttpTransportCore) + or isinstance(self.transport, BaseHttpTransport) ): # If we have anything on that could spawn a background thread, we # need to check if it's safe to use them. diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 3a0d4ec991..89651f355b 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -162,7 +162,7 @@ def _parse_rate_limits( continue -class HttpTransportCore(Transport): +class BaseHttpTransport(Transport): """The base HTTP transport.""" TIMEOUT = 30 # seconds @@ -496,11 +496,11 @@ def kill(self: Self) -> None: self._worker.kill() -class BaseSyncHttpTransport(HttpTransportCore): +class BaseSyncHttpTransport(BaseHttpTransport): def _send_envelope(self: Self, envelope: Envelope) -> None: _prepared_envelope = self._prepare_envelope(envelope) - if _prepared_envelope is None: # TODO: check this behaviour in detail + if _prepared_envelope is None: return None envelope, body, headers = _prepared_envelope self._send_request( From 748764e30c3b9aac075ae595d98cccdfcfd96354 Mon Sep 17 00:00:00 2001 From: srothh Date: Fri, 11 Jul 2025 12:34:28 +0200 Subject: [PATCH 003/102] test(transport): Add test for HTTP error status handling Adds test coverage for the error handling path when HTTP requests return error status codes. GH-4568 --- tests/test_transport.py | 39 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/tests/test_transport.py b/tests/test_transport.py index 7e0cc6383c..76db2b8e82 100644 --- a/tests/test_transport.py +++ b/tests/test_transport.py @@ -619,9 +619,9 @@ def test_record_lost_event_transaction_item(capturing_server, make_client, span_ transport.record_lost_event(reason="test", item=transaction_item) client.flush() - (captured,) = capturing_server.captured # Should only be one envelope + (captured,) = capturing_server.captured envelope = captured.envelope - (item,) = envelope.items # Envelope should only have one item + (item,) = envelope.items assert item.type == "client_report" @@ -641,3 +641,38 @@ def test_record_lost_event_transaction_item(capturing_server, make_client, span_ "reason": "test", "quantity": span_count + 1, } in discarded_events + + +def test_handle_unexpected_status_invokes_handle_request_error( + make_client, monkeypatch +): + client = make_client() + transport = client.transport + + monkeypatch.setattr(transport._worker, "submit", lambda fn: fn() or True) + + def stub_request(method, endpoint, body=None, headers=None): + class MockResponse: + def __init__(self): + self.status = 500 # Integer + self.data = b"server error" + self.headers = {} + + def close(self): + pass + + return MockResponse() + + monkeypatch.setattr(transport, "_request", stub_request) + + seen = [] + monkeypatch.setattr( + transport, + "_handle_request_error", + lambda envelope, loss_reason: seen.append(loss_reason), + ) + + client.capture_event({"message": "test"}) + client.flush() + + assert seen == ["status_500"] From ee6dbeecf8271884a19cd3361dfd5dc5f7d98832 Mon Sep 17 00:00:00 2001 From: srothh Date: Fri, 11 Jul 2025 14:48:30 +0200 Subject: [PATCH 004/102] test(transport): Restore accidentally removed comments Restore comments accidentally removed during a previous commit. --- tests/test_transport.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_transport.py b/tests/test_transport.py index 76db2b8e82..bd87728962 100644 --- a/tests/test_transport.py +++ b/tests/test_transport.py @@ -619,9 +619,9 @@ def test_record_lost_event_transaction_item(capturing_server, make_client, span_ transport.record_lost_event(reason="test", item=transaction_item) client.flush() - (captured,) = capturing_server.captured + (captured,) = capturing_server.captured # Should only be one envelope envelope = captured.envelope - (item,) = envelope.items + (item,) = envelope.items # Envelope should only have one item assert item.type == "client_report" From 19405fdc3d2a943f18157def55674489f4431804 Mon Sep 17 00:00:00 2001 From: srothh Date: Mon, 14 Jul 2025 09:27:57 +0200 Subject: [PATCH 005/102] ref(transport) Refactor class names to reflect previous functionality Refactored class names such that BaseHttpTransport now has the same functionality as before the hierarchy refactor GH-4568 --- sentry_sdk/client.py | 4 ++-- sentry_sdk/transport.py | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index 67c723c76c..9da4a40bf6 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -25,7 +25,7 @@ ) from sentry_sdk.serializer import serialize from sentry_sdk.tracing import trace -from sentry_sdk.transport import BaseHttpTransport, make_transport +from sentry_sdk.transport import HttpTransportCore, make_transport from sentry_sdk.consts import ( SPANDATA, DEFAULT_MAX_VALUE_LENGTH, @@ -403,7 +403,7 @@ def _capture_envelope(envelope: Envelope) -> None: self.monitor or self.log_batcher or has_profiling_enabled(self.options) - or isinstance(self.transport, BaseHttpTransport) + or isinstance(self.transport, HttpTransportCore) ): # If we have anything on that could spawn a background thread, we # need to check if it's safe to use them. diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 89651f355b..e0e9694b90 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -162,7 +162,7 @@ def _parse_rate_limits( continue -class BaseHttpTransport(Transport): +class HttpTransportCore(Transport): """The base HTTP transport.""" TIMEOUT = 30 # seconds @@ -496,7 +496,7 @@ def kill(self: Self) -> None: self._worker.kill() -class BaseSyncHttpTransport(BaseHttpTransport): +class BaseHttpTransport(HttpTransportCore): def _send_envelope(self: Self, envelope: Envelope) -> None: _prepared_envelope = self._prepare_envelope(envelope) @@ -562,7 +562,7 @@ def flush( self._worker.flush(timeout, callback) -class HttpTransport(BaseSyncHttpTransport): +class HttpTransport(BaseHttpTransport): if TYPE_CHECKING: _pool: Union[PoolManager, ProxyManager] @@ -679,7 +679,7 @@ def __init__(self: Self, options: Dict[str, Any]) -> None: else: - class Http2Transport(BaseSyncHttpTransport): # type: ignore + class Http2Transport(BaseHttpTransport): # type: ignore """The HTTP2 transport based on httpcore.""" TIMEOUT = 15 From 3736c036501ac6ab3854306d718d7f9def4fb80e Mon Sep 17 00:00:00 2001 From: srothh Date: Thu, 17 Jul 2025 11:48:11 +0200 Subject: [PATCH 006/102] ref(transport): Add flush_async in the Transport abc Add a new flush_async method in the Transport ABC. This is needed for the async transport, as calling it from the client while preserving execution order in close will require flush to be a coroutine, not a function. GH-4568 --- sentry_sdk/transport.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index e0e9694b90..60c6ae794a 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -107,6 +107,19 @@ def flush( """ return None + async def flush_async( + self: Self, + timeout: float, + callback: Optional[Any] = None, + ) -> None: + """ + Send out current events within `timeout` seconds. This method needs to be awaited for blocking behavior. + + The default implementation is a no-op, since this method may only be relevant to some transports. + Subclasses should override this method if necessary. + """ + return None + def kill(self: Self) -> None: """ Forcefully kills the transport. From 3607d44eac8c694295e5b35e73f27d8d2ff705ff Mon Sep 17 00:00:00 2001 From: srothh Date: Thu, 17 Jul 2025 12:33:26 +0200 Subject: [PATCH 007/102] ref(transport): Move flush_async from ABC Move flush_async down to the specific async transport subclass. This makes more sense anyway, as this will only be required by the async transport. If more async transports are expected, another shared superclass can be created. GH-4568 --- sentry_sdk/transport.py | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 60c6ae794a..e0e9694b90 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -107,19 +107,6 @@ def flush( """ return None - async def flush_async( - self: Self, - timeout: float, - callback: Optional[Any] = None, - ) -> None: - """ - Send out current events within `timeout` seconds. This method needs to be awaited for blocking behavior. - - The default implementation is a no-op, since this method may only be relevant to some transports. - Subclasses should override this method if necessary. - """ - return None - def kill(self: Self) -> None: """ Forcefully kills the transport. From 0ba5a839adda3b9319dd456c1f7c023f887478c6 Mon Sep 17 00:00:00 2001 From: srothh Date: Wed, 23 Jul 2025 15:51:58 +0200 Subject: [PATCH 008/102] ref(transport): add async type annotations to HTTPTransportCore Add necessary type annotations to the core HttpTransport to accomodate for async transport. GH-4568 --- sentry_sdk/transport.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index e0e9694b90..c9aa3e15ea 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -479,6 +479,9 @@ def _make_pool( httpcore.SOCKSProxy, httpcore.HTTPProxy, httpcore.ConnectionPool, + httpcore.AsyncSOCKSProxy, + httpcore.AsyncHTTPProxy, + httpcore.AsyncConnectionPool, ]: raise NotImplementedError() From 9bb628e36b5c77fee10988dfb687564fbfeaa1cf Mon Sep 17 00:00:00 2001 From: srothh Date: Mon, 14 Jul 2025 14:18:36 +0200 Subject: [PATCH 009/102] ref(transport): Add abstract base class for worker implementation Add an abstract bass class for implementation of the background worker. This was done to provide a shared interface for the current implementation of a threaded worker in the sync context as well as the upcoming async task-based worker implementation. GH-4578 --- sentry_sdk/worker.py | 44 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/sentry_sdk/worker.py b/sentry_sdk/worker.py index d911e15623..510376f381 100644 --- a/sentry_sdk/worker.py +++ b/sentry_sdk/worker.py @@ -1,4 +1,5 @@ from __future__ import annotations +from abc import ABC, abstractmethod import os import threading @@ -16,7 +17,48 @@ _TERMINATOR = object() -class BackgroundWorker: +class Worker(ABC): + """ + Base class for all workers. + + A worker is used to process events in the background and send them to Sentry. + """ + + @property + @abstractmethod + def is_alive(self) -> bool: + pass + + @abstractmethod + def kill(self) -> None: + pass + + @abstractmethod + def flush( + self, timeout: float, callback: Optional[Callable[[int, float], None]] = None + ) -> None: + """ + Flush the worker. + + This method blocks until the worker has flushed all events or the specified timeout is reached. + """ + pass + + @abstractmethod + def full(self) -> bool: + pass + + @abstractmethod + def submit(self, callback: Callable[[], None]) -> bool: + """ + Schedule a callback to be executed by the worker. + + Returns True if the callback was scheduled, False if the queue is full. + """ + pass + + +class BackgroundWorker(Worker): def __init__(self, queue_size: int = DEFAULT_QUEUE_SIZE) -> None: self._queue: Queue = Queue(queue_size) self._lock = threading.Lock() From a81487edace3f268179f664a12bc2be079478b1f Mon Sep 17 00:00:00 2001 From: srothh Date: Mon, 14 Jul 2025 14:23:23 +0200 Subject: [PATCH 010/102] ref(transport): Add _create_worker factory method to Transport Add a new factory method instead of direct instatiation of the threaded background worker. This allows for easy extension to other types of workers, such as the upcoming task-based async worker. GH-4578 --- sentry_sdk/transport.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index c9aa3e15ea..5799f29d7c 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -28,7 +28,7 @@ from sentry_sdk.consts import EndpointType from sentry_sdk.utils import Dsn, logger, capture_internal_exceptions -from sentry_sdk.worker import BackgroundWorker +from sentry_sdk.worker import BackgroundWorker, Worker from sentry_sdk.envelope import Envelope, Item, PayloadRef from typing import TYPE_CHECKING @@ -173,7 +173,7 @@ def __init__(self: Self, options: Dict[str, Any]) -> None: Transport.__init__(self, options) assert self.parsed_dsn is not None self.options: Dict[str, Any] = options - self._worker = BackgroundWorker(queue_size=options["transport_queue_size"]) + self._worker = self._create_worker(options) self._auth = self.parsed_dsn.to_auth("sentry.python/%s" % VERSION) self._disabled_until: Dict[Optional[str], datetime] = {} # We only use this Retry() class for the `get_retry_after` method it exposes @@ -224,6 +224,10 @@ def __init__(self: Self, options: Dict[str, Any]) -> None: elif self._compression_algo == "br": self._compression_level = 4 + def _create_worker(self: Self, options: Dict[str, Any]) -> Worker: + # For now, we only support the threaded sync background worker. + return BackgroundWorker(queue_size=options["transport_queue_size"]) + def record_lost_event( self: Self, reason: str, From 8960e6fde8dd64e867cf398a9ddc13040d880b00 Mon Sep 17 00:00:00 2001 From: srothh Date: Thu, 17 Jul 2025 11:55:26 +0200 Subject: [PATCH 011/102] ref(worker): Add flush_async method to Worker ABC Add a new flush_async method to worker ABC. This is necessary because the async transport cannot use a synchronous blocking flush. GH-4578 --- sentry_sdk/worker.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/sentry_sdk/worker.py b/sentry_sdk/worker.py index 510376f381..f37f920fe3 100644 --- a/sentry_sdk/worker.py +++ b/sentry_sdk/worker.py @@ -33,7 +33,6 @@ def is_alive(self) -> bool: def kill(self) -> None: pass - @abstractmethod def flush( self, timeout: float, callback: Optional[Callable[[int, float], None]] = None ) -> None: @@ -41,8 +40,22 @@ def flush( Flush the worker. This method blocks until the worker has flushed all events or the specified timeout is reached. + Default implementation is a no-op, since this method may only be relevant to some workers. + Subclasses should override this method if necessary. """ - pass + return None + + async def flush_async( + self, timeout: float, callback: Optional[Callable[[int, float], None]] = None + ) -> None: + """ + Flush the worker. + + This method can be awaited until the worker has flushed all events or the specified timeout is reached. + Default implementation is a no-op, since this method may only be relevant to some workers. + Subclasses should override this method if necessary. + """ + return None @abstractmethod def full(self) -> bool: From 0f7937b7cf64369bf16e7f576c1a5a0c8a444297 Mon Sep 17 00:00:00 2001 From: srothh Date: Thu, 17 Jul 2025 12:43:04 +0200 Subject: [PATCH 012/102] ref(worker): Move worker flush_async from Worker ABC Move the flush_async down to the concrete subclass to not break existing testing. This makes sense, as this will only really be needed by the async worker anyway and therefore is not shared logic. GH-4578 --- sentry_sdk/worker.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/sentry_sdk/worker.py b/sentry_sdk/worker.py index f37f920fe3..200a9ea914 100644 --- a/sentry_sdk/worker.py +++ b/sentry_sdk/worker.py @@ -45,18 +45,6 @@ def flush( """ return None - async def flush_async( - self, timeout: float, callback: Optional[Callable[[int, float], None]] = None - ) -> None: - """ - Flush the worker. - - This method can be awaited until the worker has flushed all events or the specified timeout is reached. - Default implementation is a no-op, since this method may only be relevant to some workers. - Subclasses should override this method if necessary. - """ - return None - @abstractmethod def full(self) -> bool: pass From 268ea1a4eceb915684a6e1f328059cb358ff8904 Mon Sep 17 00:00:00 2001 From: srothh Date: Thu, 17 Jul 2025 14:28:16 +0200 Subject: [PATCH 013/102] ref(worker): Amend function signature for coroutines Coroutines have a return value, however the current function signature for the worker methods does not accomodate for this. Therefore, this signature was changed. GH-4578 --- sentry_sdk/worker.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sentry_sdk/worker.py b/sentry_sdk/worker.py index 200a9ea914..7325455f8f 100644 --- a/sentry_sdk/worker.py +++ b/sentry_sdk/worker.py @@ -34,7 +34,7 @@ def kill(self) -> None: pass def flush( - self, timeout: float, callback: Optional[Callable[[int, float], None]] = None + self, timeout: float, callback: Optional[Callable[[int, float], Any]] = None ) -> None: """ Flush the worker. @@ -50,7 +50,7 @@ def full(self) -> bool: pass @abstractmethod - def submit(self, callback: Callable[[], None]) -> bool: + def submit(self, callback: Callable[[], Any]) -> bool: """ Schedule a callback to be executed by the worker. @@ -149,7 +149,7 @@ def _wait_flush(self, timeout: float, callback: Optional[Any]) -> None: pending = self._queue.qsize() + 1 logger.error("flush timed out, dropped %s events", pending) - def submit(self, callback: Callable[[], None]) -> bool: + def submit(self, callback: Callable[[], Any]) -> bool: self._ensure_thread() try: self._queue.put_nowait(callback) From b3c05cc99dd2660d3d32dbbbe490acffa802f40a Mon Sep 17 00:00:00 2001 From: srothh Date: Thu, 17 Jul 2025 11:41:16 +0200 Subject: [PATCH 014/102] feat(transport): Add an async task-based worker for transport Add a new implementation of the worker interface, implementing the worker as an async task. This is to be used by the upcoming async transport. GH-4581 --- sentry_sdk/worker.py | 92 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) diff --git a/sentry_sdk/worker.py b/sentry_sdk/worker.py index 7325455f8f..c002fe0a5a 100644 --- a/sentry_sdk/worker.py +++ b/sentry_sdk/worker.py @@ -2,6 +2,7 @@ from abc import ABC, abstractmethod import os import threading +import asyncio from time import sleep, time from sentry_sdk._queue import Queue, FullError @@ -170,3 +171,94 @@ def _target(self) -> None: finally: self._queue.task_done() sleep(0) + + +class AsyncWorker(Worker): + def __init__(self, queue_size: int = DEFAULT_QUEUE_SIZE) -> None: + self._queue: asyncio.Queue = asyncio.Queue(queue_size) + self._task: Optional[asyncio.Task] = None + # Event loop needs to remain in the same process + self._task_for_pid: Optional[int] = None + self._loop: Optional[asyncio.AbstractEventLoop] = None + + @property + def is_alive(self) -> bool: + if self._task_for_pid != os.getpid(): + return False + if not self._task or not self._loop: + return False + return self._loop.is_running() and not self._task.done() + + def kill(self) -> None: + if self._task: + self._task.cancel() + self._task = None + self._task_for_pid = None + + def start(self) -> None: + if not self.is_alive: + try: + self._loop = asyncio.get_running_loop() + self._task = self._loop.create_task(self._target()) + self._task_for_pid = os.getpid() + except RuntimeError: + # There is no event loop running + self._loop = None + self._task = None + self._task_for_pid = None + + def full(self) -> bool: + return self._queue.full() + + def _ensure_task(self) -> None: + if not self.is_alive: + self.start() + + async def _wait_flush(self, timeout: float, callback: Optional[Any] = None) -> None: + if not self._loop or not self._loop.is_running(): + return + + initial_timeout = min(0.1, timeout) + + # Timeout on the join + try: + await asyncio.wait_for(self._queue.join(), timeout=initial_timeout) + except asyncio.TimeoutError: + pending = self._queue.qsize() + 1 + logger.debug("%d event(s) pending on flush", pending) + if callback is not None: + callback(pending, timeout) + + try: + remaining_timeout = timeout - initial_timeout + await asyncio.wait_for(self._queue.join(), timeout=remaining_timeout) + except asyncio.TimeoutError: + pending = self._queue.qsize() + 1 + logger.error("flush timed out, dropped %s events", pending) + + async def flush(self, timeout: float, callback: Optional[Any] = None) -> None: + logger.debug("background worker got flush request") + if self.is_alive and timeout > 0.0: + await self._wait_flush(timeout, callback) + logger.debug("background worker flushed") + + def submit(self, callback: Callable[[], None]) -> bool: + self._ensure_task() + + try: + self._queue.put_nowait(callback) + return True + except asyncio.QueueFull: + return False + + async def _target(self) -> None: + while True: + callback = await self._queue.get() + try: + callback() + except Exception: + logger.error("Failed processing job", exc_info=True) + finally: + self._queue.task_done() + # Yield to let the event loop run other tasks + await asyncio.sleep(0) From fb0ad188116fa9bdc9dbd2ac735210dd6da80dd8 Mon Sep 17 00:00:00 2001 From: srothh Date: Thu, 17 Jul 2025 11:58:20 +0200 Subject: [PATCH 015/102] ref(worker): Make worker work with new ABC interface Refactor the flush method in the async worker to use the async_flush coroutine. GH-4581 --- sentry_sdk/worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sentry_sdk/worker.py b/sentry_sdk/worker.py index c002fe0a5a..1825faa776 100644 --- a/sentry_sdk/worker.py +++ b/sentry_sdk/worker.py @@ -236,7 +236,7 @@ async def _wait_flush(self, timeout: float, callback: Optional[Any] = None) -> N pending = self._queue.qsize() + 1 logger.error("flush timed out, dropped %s events", pending) - async def flush(self, timeout: float, callback: Optional[Any] = None) -> None: + async def flush_async(self, timeout: float, callback: Optional[Any] = None) -> None: logger.debug("background worker got flush request") if self.is_alive and timeout > 0.0: await self._wait_flush(timeout, callback) From 7edbbaf243bc4d46f862e46326fdf78d72d1f0e6 Mon Sep 17 00:00:00 2001 From: srothh Date: Thu, 17 Jul 2025 14:00:33 +0200 Subject: [PATCH 016/102] fix(worker): Check if callbacks from worker queue are coroutines or functions Add a check to see wheter callbacks are awaitable coroutines or functions, as coroutines need to be awaited. GH-4581 --- sentry_sdk/worker.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/sentry_sdk/worker.py b/sentry_sdk/worker.py index 1825faa776..aa7926364f 100644 --- a/sentry_sdk/worker.py +++ b/sentry_sdk/worker.py @@ -3,6 +3,7 @@ import os import threading import asyncio +import inspect from time import sleep, time from sentry_sdk._queue import Queue, FullError @@ -255,7 +256,12 @@ async def _target(self) -> None: while True: callback = await self._queue.get() try: - callback() + if inspect.iscoroutinefunction(callback): + # Callback is an async coroutine, need to await it + await callback() + else: + # Callback is a sync function, need to call it + callback() except Exception: logger.error("Failed processing job", exc_info=True) finally: From 0f63d24d948dbb5a5a3178dadd9bf9f366d83ec1 Mon Sep 17 00:00:00 2001 From: srothh Date: Thu, 17 Jul 2025 14:21:58 +0200 Subject: [PATCH 017/102] ref(worker): Amend return type of submit and flush to accomodate for coroutines Coroutines do not return None, therefore it is necessary to consider this in the callback parameter of the worker. Previously, only callbacks with return Type None were accepted. GH-4581 --- sentry_sdk/worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sentry_sdk/worker.py b/sentry_sdk/worker.py index aa7926364f..5423877e69 100644 --- a/sentry_sdk/worker.py +++ b/sentry_sdk/worker.py @@ -243,7 +243,7 @@ async def flush_async(self, timeout: float, callback: Optional[Any] = None) -> N await self._wait_flush(timeout, callback) logger.debug("background worker flushed") - def submit(self, callback: Callable[[], None]) -> bool: + def submit(self, callback: Callable[[], Any]) -> bool: self._ensure_task() try: From 2430e2e104f9d2a46f06b9fc8fe966c57cd22c6d Mon Sep 17 00:00:00 2001 From: srothh Date: Thu, 17 Jul 2025 15:53:20 +0200 Subject: [PATCH 018/102] ref(worker): Add type parameters for AsyncWorker variables GH-4581 --- sentry_sdk/worker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sentry_sdk/worker.py b/sentry_sdk/worker.py index 5423877e69..97a16a1605 100644 --- a/sentry_sdk/worker.py +++ b/sentry_sdk/worker.py @@ -176,8 +176,8 @@ def _target(self) -> None: class AsyncWorker(Worker): def __init__(self, queue_size: int = DEFAULT_QUEUE_SIZE) -> None: - self._queue: asyncio.Queue = asyncio.Queue(queue_size) - self._task: Optional[asyncio.Task] = None + self._queue: asyncio.Queue[Callable[[], Any]] = asyncio.Queue(queue_size) + self._task: Optional[asyncio.Task[None]] = None # Event loop needs to remain in the same process self._task_for_pid: Optional[int] = None self._loop: Optional[asyncio.AbstractEventLoop] = None From 96fcd85bceab379e675fed757f21e6de22a43fa0 Mon Sep 17 00:00:00 2001 From: srothh Date: Thu, 17 Jul 2025 16:05:04 +0200 Subject: [PATCH 019/102] ref(worker): Remove loop upon killing worker GH-4581 --- sentry_sdk/worker.py | 1 + 1 file changed, 1 insertion(+) diff --git a/sentry_sdk/worker.py b/sentry_sdk/worker.py index 97a16a1605..dfa9ac5b8f 100644 --- a/sentry_sdk/worker.py +++ b/sentry_sdk/worker.py @@ -195,6 +195,7 @@ def kill(self) -> None: self._task.cancel() self._task = None self._task_for_pid = None + self._loop = None def start(self) -> None: if not self.is_alive: From 331e40b434abe5d4588d40827d2cbdb16f39db06 Mon Sep 17 00:00:00 2001 From: srothh Date: Fri, 18 Jul 2025 11:59:30 +0200 Subject: [PATCH 020/102] feat(worker): Enable concurrent callbacks on async task worker Enable concurrent callbacks on async task worker by firing them as a task rather than awaiting them. A done callback handles the necessary queue and exception logic. GH-4581 --- sentry_sdk/worker.py | 44 +++++++++++++++++++++++++++++++++----------- 1 file changed, 33 insertions(+), 11 deletions(-) diff --git a/sentry_sdk/worker.py b/sentry_sdk/worker.py index dfa9ac5b8f..9df639e56a 100644 --- a/sentry_sdk/worker.py +++ b/sentry_sdk/worker.py @@ -181,6 +181,8 @@ def __init__(self, queue_size: int = DEFAULT_QUEUE_SIZE) -> None: # Event loop needs to remain in the same process self._task_for_pid: Optional[int] = None self._loop: Optional[asyncio.AbstractEventLoop] = None + # Track active callback tasks so they have a strong reference and can be cancelled on kill + self._active_tasks: set[asyncio.Task] = set() @property def is_alive(self) -> bool: @@ -195,6 +197,12 @@ def kill(self) -> None: self._task.cancel() self._task = None self._task_for_pid = None + # Also cancel any active callback tasks + # Avoid modifying the set while cancelling tasks + tasks_to_cancel = set(self._active_tasks) + for task in tasks_to_cancel: + task.cancel() + self._active_tasks.clear() self._loop = None def start(self) -> None: @@ -256,16 +264,30 @@ def submit(self, callback: Callable[[], Any]) -> bool: async def _target(self) -> None: while True: callback = await self._queue.get() - try: - if inspect.iscoroutinefunction(callback): - # Callback is an async coroutine, need to await it - await callback() - else: - # Callback is a sync function, need to call it - callback() - except Exception: - logger.error("Failed processing job", exc_info=True) - finally: - self._queue.task_done() + # Firing tasks instead of awaiting them allows for concurrent requests + task = asyncio.create_task(self._process_callback(callback)) + # Create a strong reference to the task so it can be cancelled on kill + # and does not get garbage collected while running + self._active_tasks.add(task) + task.add_done_callback(self._on_task_complete) # Yield to let the event loop run other tasks await asyncio.sleep(0) + + async def _process_callback(self, callback: Callable[[], Any]) -> None: + if inspect.iscoroutinefunction(callback): + # Callback is an async coroutine, need to await it + await callback() + else: + # Callback is a sync function, need to call it + callback() + + def _on_task_complete(self, task: asyncio.Task[None]) -> None: + try: + task.result() + except Exception: + logger.error("Failed processing job", exc_info=True) + finally: + # Mark the task as done and remove it from the active tasks set + # This happens only after the task has completed + self._queue.task_done() + self._active_tasks.discard(task) From 5f674855b0edfe9550cff467e74c61884a2af9a8 Mon Sep 17 00:00:00 2001 From: srothh Date: Fri, 18 Jul 2025 12:21:22 +0200 Subject: [PATCH 021/102] fix(worker): Modify kill behaviour to mirror threaded worker Changed kill to also use the _TERMINATOR sentinel, so the queue is still drained to this point on kill instead of cancelled immediately. This should also fix potential race conditions with flush_async. GH-4581 --- sentry_sdk/worker.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/sentry_sdk/worker.py b/sentry_sdk/worker.py index 9df639e56a..04db6c9279 100644 --- a/sentry_sdk/worker.py +++ b/sentry_sdk/worker.py @@ -176,7 +176,7 @@ def _target(self) -> None: class AsyncWorker(Worker): def __init__(self, queue_size: int = DEFAULT_QUEUE_SIZE) -> None: - self._queue: asyncio.Queue[Callable[[], Any]] = asyncio.Queue(queue_size) + self._queue: asyncio.Queue[Any] = asyncio.Queue(queue_size) self._task: Optional[asyncio.Task[None]] = None # Event loop needs to remain in the same process self._task_for_pid: Optional[int] = None @@ -194,9 +194,10 @@ def is_alive(self) -> bool: def kill(self) -> None: if self._task: - self._task.cancel() - self._task = None - self._task_for_pid = None + try: + self._queue.put_nowait(_TERMINATOR) + except asyncio.QueueFull: + logger.debug("async worker queue full, kill failed") # Also cancel any active callback tasks # Avoid modifying the set while cancelling tasks tasks_to_cancel = set(self._active_tasks) @@ -204,6 +205,8 @@ def kill(self) -> None: task.cancel() self._active_tasks.clear() self._loop = None + self._task = None + self._task_for_pid = None def start(self) -> None: if not self.is_alive: @@ -264,6 +267,9 @@ def submit(self, callback: Callable[[], Any]) -> bool: async def _target(self) -> None: while True: callback = await self._queue.get() + if callback is _TERMINATOR: + self._queue.task_done() + break # Firing tasks instead of awaiting them allows for concurrent requests task = asyncio.create_task(self._process_callback(callback)) # Create a strong reference to the task so it can be cancelled on kill From 97c5e3d30eaf126043be201f4d74fd2861846956 Mon Sep 17 00:00:00 2001 From: srothh Date: Mon, 21 Jul 2025 10:17:32 +0200 Subject: [PATCH 022/102] ref(worker): add proper type annotation to worker task list Add proper type annotation to worker task list to fix linting problems GH-4581 --- sentry_sdk/worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sentry_sdk/worker.py b/sentry_sdk/worker.py index 04db6c9279..174341f8cd 100644 --- a/sentry_sdk/worker.py +++ b/sentry_sdk/worker.py @@ -182,7 +182,7 @@ def __init__(self, queue_size: int = DEFAULT_QUEUE_SIZE) -> None: self._task_for_pid: Optional[int] = None self._loop: Optional[asyncio.AbstractEventLoop] = None # Track active callback tasks so they have a strong reference and can be cancelled on kill - self._active_tasks: set[asyncio.Task] = set() + self._active_tasks: set[asyncio.Task[None]] = set() @property def is_alive(self) -> bool: From 8809b080f052f1062b6068e349d641196b6dc724 Mon Sep 17 00:00:00 2001 From: srothh Date: Mon, 21 Jul 2025 14:06:50 +0200 Subject: [PATCH 023/102] feat(transport): Add async transport class Add an implementation of Transport to work with the async background worker and HTTPCore async. GH-4582 --- sentry_sdk/transport.py | 182 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 182 insertions(+) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 5799f29d7c..3607820e2a 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -6,6 +6,7 @@ import socket import ssl import time +import asyncio from datetime import datetime, timedelta, timezone from collections import defaultdict from urllib.request import getproxies @@ -569,6 +570,187 @@ def flush( self._worker.flush(timeout, callback) +class AsyncHttpTransport(HttpTransportCore): + def __init__(self: Self, options: Dict[str, Any]) -> None: + super().__init__(options) + # Requires event loop at init time + self._loop = asyncio.get_running_loop() + self.background_tasks = set() + + async def _send_envelope(self: Self, envelope: Envelope) -> None: + _prepared_envelope = self._prepare_envelope(envelope) + if _prepared_envelope is None: + return None + envelope, body, headers = _prepared_envelope + await self._send_request( + body.getvalue(), + headers=headers, + endpoint_type=EndpointType.ENVELOPE, + envelope=envelope, + ) + return None + + async def _send_request( + self: Self, + body: bytes, + headers: Dict[str, str], + endpoint_type: EndpointType, + envelope: Optional[Envelope], + ) -> None: + self._update_headers(headers) + try: + response = await self._request( + "POST", + endpoint_type, + body, + headers, + ) + except Exception: + self._handle_request_error(envelope=envelope, loss_reason="network") + raise + try: + self._handle_response(response=response, envelope=envelope) + finally: + response.close() + + async def _request( + self: Self, + method: str, + endpoint_type: EndpointType, + body: Any, + headers: Mapping[str, str], + ) -> httpcore.Response: + return await self._pool.request( + method, + self._auth.get_api_url(endpoint_type), + content=body, + headers=headers, # type: ignore + ) + + def _flush_client_reports(self: Self, force: bool = False) -> None: + client_report = self._fetch_pending_client_report(force=force, interval=60) + if client_report is not None: + self.capture_envelope(Envelope(items=[client_report])) + + async def _capture_envelope(self: Self, envelope: Envelope) -> None: + async def send_envelope_wrapper() -> None: + with capture_internal_exceptions(): + await self._send_envelope(envelope) + self._flush_client_reports() + + if not self._worker.submit(send_envelope_wrapper): + self.on_dropped_event("full_queue") + for item in envelope.items: + self.record_lost_event("queue_overflow", item=item) + + def capture_envelope(self: Self, envelope: Envelope) -> None: + # Synchronous entry point + if asyncio.get_running_loop() is not None: + # We are on the main thread running the event loop + task = asyncio.create_task(self._capture_envelope(envelope)) + self.background_tasks.add(task) + task.add_done_callback(self.background_tasks.discard) + else: + # We are in a background thread, not running an event loop, + # have to launch the task on the loop in a threadsafe way. + asyncio.run_coroutine_threadsafe( + self._capture_envelope(envelope), + self._loop, + ) + + async def flush_async( + self: Self, + timeout: float, + callback: Optional[Callable[[int, float], None]] = None, + ) -> None: + logger.debug("Flushing HTTP transport") + + if timeout > 0: + self._worker.submit(lambda: self._flush_client_reports(force=True)) + await self._worker.flush_async(timeout, callback) # type: ignore + + def _get_pool_options(self: Self) -> Dict[str, Any]: + options: Dict[str, Any] = { + "http2": False, # no HTTP2 for now + "retries": 3, + } + + socket_options = ( + self.options["socket_options"] + if self.options["socket_options"] is not None + else [] + ) + + used_options = {(o[0], o[1]) for o in socket_options} + for default_option in KEEP_ALIVE_SOCKET_OPTIONS: + if (default_option[0], default_option[1]) not in used_options: + socket_options.append(default_option) + + options["socket_options"] = socket_options + + ssl_context = ssl.create_default_context() + ssl_context.load_verify_locations( + self.options["ca_certs"] # User-provided bundle from the SDK init + or os.environ.get("SSL_CERT_FILE") + or os.environ.get("REQUESTS_CA_BUNDLE") + or certifi.where() + ) + cert_file = self.options["cert_file"] or os.environ.get("CLIENT_CERT_FILE") + key_file = self.options["key_file"] or os.environ.get("CLIENT_KEY_FILE") + if cert_file is not None: + ssl_context.load_cert_chain(cert_file, key_file) + + options["ssl_context"] = ssl_context + + return options + + def _make_pool( + self: Self, + ) -> Union[ + httpcore.AsyncSOCKSProxy, httpcore.AsyncHTTPProxy, httpcore.AsyncConnectionPool + ]: + if self.parsed_dsn is None: + raise ValueError("Cannot create HTTP-based transport without valid DSN") + proxy = None + no_proxy = self._in_no_proxy(self.parsed_dsn) + + # try HTTPS first + https_proxy = self.options["https_proxy"] + if self.parsed_dsn.scheme == "https" and (https_proxy != ""): + proxy = https_proxy or (not no_proxy and getproxies().get("https")) + + # maybe fallback to HTTP proxy + http_proxy = self.options["http_proxy"] + if not proxy and (http_proxy != ""): + proxy = http_proxy or (not no_proxy and getproxies().get("http")) + + opts = self._get_pool_options() + + if proxy: + proxy_headers = self.options["proxy_headers"] + if proxy_headers: + opts["proxy_headers"] = proxy_headers + + if proxy.startswith("socks"): + try: + if "socket_options" in opts: + socket_options = opts.pop("socket_options") + if socket_options: + logger.warning( + "You have defined socket_options but using a SOCKS proxy which doesn't support these. We'll ignore socket_options." + ) + return httpcore.AsyncSOCKSProxy(proxy_url=proxy, **opts) + except RuntimeError: + logger.warning( + "You have configured a SOCKS proxy (%s) but support for SOCKS proxies is not installed. Disabling proxy support.", + proxy, + ) + else: + return httpcore.AsyncHTTPProxy(proxy_url=proxy, **opts) + + return httpcore.AsyncConnectionPool(**opts) + + class HttpTransport(BaseHttpTransport): if TYPE_CHECKING: _pool: Union[PoolManager, ProxyManager] From 7c5dec035ad27fe4a468666fd2fedb060dc7c89d Mon Sep 17 00:00:00 2001 From: srothh Date: Tue, 22 Jul 2025 10:31:45 +0200 Subject: [PATCH 024/102] ref(transport): Fix event loop handling in async transport Async Transport now properly checks for the presence of the event loop in capture_envelop, and drops items in case the event loop is no longer running for some reason. GH-4582 --- sentry_sdk/transport.py | 39 +++++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 14 deletions(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 3607820e2a..77ae060d2a 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -29,7 +29,7 @@ from sentry_sdk.consts import EndpointType from sentry_sdk.utils import Dsn, logger, capture_internal_exceptions -from sentry_sdk.worker import BackgroundWorker, Worker +from sentry_sdk.worker import BackgroundWorker, Worker, AsyncWorker from sentry_sdk.envelope import Envelope, Item, PayloadRef from typing import TYPE_CHECKING @@ -225,9 +225,10 @@ def __init__(self: Self, options: Dict[str, Any]) -> None: elif self._compression_algo == "br": self._compression_level = 4 - def _create_worker(self: Self, options: Dict[str, Any]) -> Worker: - # For now, we only support the threaded sync background worker. - return BackgroundWorker(queue_size=options["transport_queue_size"]) + def _create_worker(self, options: dict[str, Any]) -> Worker: + async_enabled = options.get("_experiments", {}).get("transport_async", False) + worker_cls = AsyncWorker if async_enabled else BackgroundWorker + return worker_cls(queue_size=options["transport_queue_size"]) def record_lost_event( self: Self, @@ -645,18 +646,26 @@ async def send_envelope_wrapper() -> None: def capture_envelope(self: Self, envelope: Envelope) -> None: # Synchronous entry point - if asyncio.get_running_loop() is not None: + try: + asyncio.get_running_loop() # We are on the main thread running the event loop task = asyncio.create_task(self._capture_envelope(envelope)) self.background_tasks.add(task) task.add_done_callback(self.background_tasks.discard) - else: + except RuntimeError: # We are in a background thread, not running an event loop, # have to launch the task on the loop in a threadsafe way. - asyncio.run_coroutine_threadsafe( - self._capture_envelope(envelope), - self._loop, - ) + if self._loop and self._loop.is_running(): + asyncio.run_coroutine_threadsafe( + self._capture_envelope(envelope), + self._loop, + ) + else: + # The event loop is no longer running + logger.warning("Async Transport is not running in an event loop.") + self.on_dropped_event("no_async_context") + for item in envelope.items: + self.record_lost_event("no_async_context", item=item) async def flush_async( self: Self, @@ -996,11 +1005,13 @@ def make_transport(options: Dict[str, Any]) -> Optional[Transport]: ref_transport = options["transport"] use_http2_transport = options.get("_experiments", {}).get("transport_http2", False) - + use_async_transport = options.get("_experiments", {}).get("transport_async", False) # By default, we use the http transport class - transport_cls: Type[Transport] = ( - Http2Transport if use_http2_transport else HttpTransport - ) + if use_async_transport and asyncio.get_running_loop() is not None: + transport_cls: Type[Transport] = AsyncHttpTransport + else: + use_http2 = use_http2_transport + transport_cls = Http2Transport if use_http2 else HttpTransport if isinstance(ref_transport, Transport): return ref_transport From b0390e6ad284748dedf8f34ee99cd6dce75a1e18 Mon Sep 17 00:00:00 2001 From: srothh Date: Tue, 22 Jul 2025 11:10:38 +0200 Subject: [PATCH 025/102] feat(transport): Add kill method for async transport Implement a kill method that properly shuts down the async transport. The httpcore async connection pool needs to be explicitly shutdown at the end of its usage. GH-4582 --- sentry_sdk/transport.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 77ae060d2a..b40173799a 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -759,6 +759,16 @@ def _make_pool( return httpcore.AsyncConnectionPool(**opts) + def kill(self: Self) -> None: + + logger.debug("Killing HTTP transport") + self._worker.kill() + for task in self.background_tasks: + task.cancel() + self.background_tasks.clear() + + self._loop.create_task(self._pool.aclose()) # type: ignore + class HttpTransport(BaseHttpTransport): if TYPE_CHECKING: From f01b00d18299d0fa9c8092305ca07d432ec0bcee Mon Sep 17 00:00:00 2001 From: srothh Date: Wed, 23 Jul 2025 15:50:06 +0200 Subject: [PATCH 026/102] ref(transport): Fix type errors in async transport Fix type errors resulting from async override and missing type definition in the async transport. GH-4582 --- sentry_sdk/transport.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index b40173799a..0cee60dd6e 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -576,7 +576,7 @@ def __init__(self: Self, options: Dict[str, Any]) -> None: super().__init__(options) # Requires event loop at init time self._loop = asyncio.get_running_loop() - self.background_tasks = set() + self.background_tasks: set[asyncio.Task[None]] = set() async def _send_envelope(self: Self, envelope: Envelope) -> None: _prepared_envelope = self._prepare_envelope(envelope) @@ -614,7 +614,7 @@ async def _send_request( finally: response.close() - async def _request( + async def _request( # type: ignore[override] self: Self, method: str, endpoint_type: EndpointType, From 23b8ea218e7d8862d8d1159e8f06df7a5bab006e Mon Sep 17 00:00:00 2001 From: srothh Date: Thu, 24 Jul 2025 09:44:58 +0200 Subject: [PATCH 027/102] Add silent failing to kill on event loop errors Add a try/catch to ensure silent fail on kill in case the event loop shuts down. GH-4582 --- sentry_sdk/transport.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 0cee60dd6e..7a757f7cb3 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -766,8 +766,10 @@ def kill(self: Self) -> None: for task in self.background_tasks: task.cancel() self.background_tasks.clear() - - self._loop.create_task(self._pool.aclose()) # type: ignore + try: + self._loop.create_task(self._pool.aclose()) # type: ignore + except RuntimeError: + logger.warning("Event loop not running, aborting kill.") class HttpTransport(BaseHttpTransport): From 176a1d11a8e9f82d3fb70c538c326ddd50008929 Mon Sep 17 00:00:00 2001 From: srothh Date: Thu, 24 Jul 2025 11:27:45 +0200 Subject: [PATCH 028/102] ref(transport): Fix event loop check in make_transport Fix the event loop check in make_transport so that it does not throw a runtime error but rather falls back correctly. GH-4582 --- sentry_sdk/transport.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 7a757f7cb3..66f2af9b0f 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -1019,11 +1019,14 @@ def make_transport(options: Dict[str, Any]) -> Optional[Transport]: use_http2_transport = options.get("_experiments", {}).get("transport_http2", False) use_async_transport = options.get("_experiments", {}).get("transport_async", False) # By default, we use the http transport class - if use_async_transport and asyncio.get_running_loop() is not None: - transport_cls: Type[Transport] = AsyncHttpTransport - else: - use_http2 = use_http2_transport - transport_cls = Http2Transport if use_http2 else HttpTransport + if use_async_transport: + try: + asyncio.get_running_loop() + transport_cls: Type[Transport] = AsyncHttpTransport + except RuntimeError: + # No event loop running, fall back to sync transport + logger.warning("No event loop running, falling back to sync transport.") + transport_cls = Http2Transport if use_http2_transport else HttpTransport if isinstance(ref_transport, Transport): return ref_transport From 4fe61bf7ee7fb7e1eb674b508d66fadaf96a8379 Mon Sep 17 00:00:00 2001 From: srothh Date: Thu, 24 Jul 2025 11:45:13 +0200 Subject: [PATCH 029/102] ref(transport): Add missing transport instantiation in non-async context GH-4582 --- sentry_sdk/transport.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 66f2af9b0f..1150a7531f 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -1027,6 +1027,8 @@ def make_transport(options: Dict[str, Any]) -> Optional[Transport]: # No event loop running, fall back to sync transport logger.warning("No event loop running, falling back to sync transport.") transport_cls = Http2Transport if use_http2_transport else HttpTransport + else: + transport_cls = Http2Transport if use_http2_transport else HttpTransport if isinstance(ref_transport, Transport): return ref_transport From 52c9e36627745a4902dc5a3618705ada2ae8e674 Mon Sep 17 00:00:00 2001 From: srothh Date: Fri, 25 Jul 2025 11:21:37 +0200 Subject: [PATCH 030/102] ref(transport): Fix httpcore async specific request handling GH-4582 --- sentry_sdk/transport.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 1150a7531f..0d63c11242 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -578,6 +578,16 @@ def __init__(self: Self, options: Dict[str, Any]) -> None: self._loop = asyncio.get_running_loop() self.background_tasks: set[asyncio.Task[None]] = set() + def _get_header_value(self: Self, response: Any, header: str) -> Optional[str]: + return next( + ( + val.decode("ascii") + for key, val in response.headers + if key.decode("ascii").lower() == header + ), + None, + ) + async def _send_envelope(self: Self, envelope: Envelope) -> None: _prepared_envelope = self._prepare_envelope(envelope) if _prepared_envelope is None: @@ -612,7 +622,7 @@ async def _send_request( try: self._handle_response(response=response, envelope=envelope) finally: - response.close() + await response.aclose() async def _request( # type: ignore[override] self: Self, From 6d69406c3437e27d6967c6bb97e81040628d8998 Mon Sep 17 00:00:00 2001 From: srothh Date: Fri, 25 Jul 2025 12:49:26 +0200 Subject: [PATCH 031/102] ref(transport): Add gc safety to async kill GH-4582 --- sentry_sdk/transport.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 0d63c11242..851518fd32 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -777,7 +777,9 @@ def kill(self: Self) -> None: task.cancel() self.background_tasks.clear() try: - self._loop.create_task(self._pool.aclose()) # type: ignore + task = self._loop.create_task(self._pool.aclose()) # type: ignore + self.background_tasks.add(task) + task.add_done_callback(lambda t: self.background_tasks.discard(t)) except RuntimeError: logger.warning("Event loop not running, aborting kill.") From 3629609a9277eabe7e0d287b19ed462c39e94714 Mon Sep 17 00:00:00 2001 From: srothh Date: Fri, 25 Jul 2025 13:06:14 +0200 Subject: [PATCH 032/102] ref(transport): Add missing httpcore extensions GH-4582 --- sentry_sdk/transport.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 851518fd32..fa009daaac 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -636,6 +636,14 @@ async def _request( # type: ignore[override] self._auth.get_api_url(endpoint_type), content=body, headers=headers, # type: ignore + extensions={ + "timeout": { + "pool": self.TIMEOUT, + "connect": self.TIMEOUT, + "write": self.TIMEOUT, + "read": self.TIMEOUT, + } + }, ) def _flush_client_reports(self: Self, force: bool = False) -> None: From 21cde522bcf6941ce4ed1aa52b9f834d73fa06f1 Mon Sep 17 00:00:00 2001 From: srothh Date: Mon, 28 Jul 2025 10:17:17 +0200 Subject: [PATCH 033/102] fix(transport): Fix fallback sync transport creating async worker GH-4582 --- sentry_sdk/transport.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index fa009daaac..0a53e0bb3d 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -227,7 +227,11 @@ def __init__(self: Self, options: Dict[str, Any]) -> None: def _create_worker(self, options: dict[str, Any]) -> Worker: async_enabled = options.get("_experiments", {}).get("transport_async", False) - worker_cls = AsyncWorker if async_enabled else BackgroundWorker + try: + asyncio.get_running_loop() + worker_cls = AsyncWorker if async_enabled else BackgroundWorker + except RuntimeError: + worker_cls = BackgroundWorker return worker_cls(queue_size=options["transport_queue_size"]) def record_lost_event( From c541bd7f61e7e72b120b3e421e31c15b0c9d0b1d Mon Sep 17 00:00:00 2001 From: srothh Date: Tue, 29 Jul 2025 13:12:12 +0200 Subject: [PATCH 034/102] ref(transport): Make kill optionally return a task for async Make kill optionally return a task for async transport. This allows for a blocking kill operation if the caller is in an async context. GH-4582 --- sentry_sdk/transport.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 0a53e0bb3d..b9db5d17e1 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -781,7 +781,7 @@ def _make_pool( return httpcore.AsyncConnectionPool(**opts) - def kill(self: Self) -> None: + def kill(self: Self) -> Optional[asyncio.Task[None]]: # type: ignore logger.debug("Killing HTTP transport") self._worker.kill() @@ -789,11 +789,11 @@ def kill(self: Self) -> None: task.cancel() self.background_tasks.clear() try: - task = self._loop.create_task(self._pool.aclose()) # type: ignore - self.background_tasks.add(task) - task.add_done_callback(lambda t: self.background_tasks.discard(t)) + # Return the pool cleanup task so caller can await it if needed + return self._loop.create_task(self._pool.aclose()) # type: ignore except RuntimeError: logger.warning("Event loop not running, aborting kill.") + return None class HttpTransport(BaseHttpTransport): From 2808062d5d2ba33efe8efea9842ed9af96c647a9 Mon Sep 17 00:00:00 2001 From: srothh Date: Tue, 22 Jul 2025 15:48:58 +0200 Subject: [PATCH 035/102] Integrate AsyncHttpTransport as a new experimental option Add a new experimental option that uses AsyncHttpTransport and integrates it with synchronous client interfaces. GH-4601 --- sentry_sdk/client.py | 63 ++++++++++++++++++++++++++++++----------- sentry_sdk/consts.py | 1 + sentry_sdk/transport.py | 8 +++--- 3 files changed, 52 insertions(+), 20 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index 9da4a40bf6..f871b4cbd8 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -3,6 +3,7 @@ import uuid import random import socket +import asyncio from collections.abc import Mapping from datetime import datetime, timezone from importlib import import_module @@ -25,7 +26,7 @@ ) from sentry_sdk.serializer import serialize from sentry_sdk.tracing import trace -from sentry_sdk.transport import HttpTransportCore, make_transport +from sentry_sdk.transport import HttpTransportCore, make_transport, AsyncHttpTransport from sentry_sdk.consts import ( SPANDATA, DEFAULT_MAX_VALUE_LENGTH, @@ -914,36 +915,50 @@ def get_integration( return self.integrations.get(integration_name) + def _close_components(self) -> None: + """Kill all client components in the correct order.""" + self.session_flusher.kill() + if self.log_batcher is not None: + self.log_batcher.kill() + if self.monitor: + self.monitor.kill() + if self.transport is not None: + self.transport.kill() + self.transport = None + def close( self, timeout: Optional[float] = None, callback: Optional[Callable[[int, float], None]] = None, - ) -> None: + ) -> Optional[asyncio.Task[None]]: """ Close the client and shut down the transport. Arguments have the same - semantics as :py:meth:`Client.flush`. + semantics as :py:meth:`Client.flush`. When using the async transport, close needs to be awaited to block. """ if self.transport is not None: - self.flush(timeout=timeout, callback=callback) + if isinstance(self.transport, AsyncHttpTransport): - self.session_flusher.kill() + def _on_flush_done(_: asyncio.Task[None]) -> None: + self._close_components() - if self.log_batcher is not None: - self.log_batcher.kill() - - if self.monitor: - self.monitor.kill() - - self.transport.kill() - self.transport = None + flush_task = self.transport.loop.create_task( + self._flush_async(timeout, callback) + ) + # Enforce flush before shutdown + flush_task.add_done_callback(_on_flush_done) + return flush_task + else: + self.flush(timeout=timeout, callback=callback) + self._close_components() + return None def flush( self, timeout: Optional[float] = None, callback: Optional[Callable[[int, float], None]] = None, - ) -> None: + ) -> Optional[asyncio.Task[None]]: """ - Wait for the current events to be sent. + Wait for the current events to be sent. When using the async transport, flush needs to be awaited to block. :param timeout: Wait for at most `timeout` seconds. If no `timeout` is provided, the `shutdown_timeout` option value is used. @@ -952,12 +967,28 @@ def flush( if self.transport is not None: if timeout is None: timeout = self.options["shutdown_timeout"] + self.session_flusher.flush() if self.log_batcher is not None: self.log_batcher.flush() - self.transport.flush(timeout=timeout, callback=callback) + if isinstance(self.transport, AsyncHttpTransport): + return self.transport.loop.create_task( + self._flush_async(timeout, callback) + ) + else: + self.transport.flush(timeout=timeout, callback=callback) + + return None + + async def _flush_async( + self, timeout: float, callback: Optional[Callable[[int, float], None]] + ) -> None: + self.session_flusher.flush() + if self.log_batcher is not None: + self.log_batcher.flush() + await self.transport.flush_async(timeout=timeout, callback=callback) # type: ignore def __enter__(self) -> _Client: return self diff --git a/sentry_sdk/consts.py b/sentry_sdk/consts.py index 0ab65919a6..3860987b15 100644 --- a/sentry_sdk/consts.py +++ b/sentry_sdk/consts.py @@ -75,6 +75,7 @@ class CompressionAlgo(Enum): "transport_compression_algo": Optional[CompressionAlgo], "transport_num_pools": Optional[int], "transport_http2": Optional[bool], + "transport_async": Optional[bool], "enable_logs": Optional[bool], "before_send_log": Optional[Callable[[Log, Hint], Optional[Log]]], }, diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index b9db5d17e1..afe9c6c16a 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -669,18 +669,18 @@ async def send_envelope_wrapper() -> None: def capture_envelope(self: Self, envelope: Envelope) -> None: # Synchronous entry point try: - asyncio.get_running_loop() # We are on the main thread running the event loop + asyncio.get_running_loop() task = asyncio.create_task(self._capture_envelope(envelope)) self.background_tasks.add(task) task.add_done_callback(self.background_tasks.discard) except RuntimeError: # We are in a background thread, not running an event loop, # have to launch the task on the loop in a threadsafe way. - if self._loop and self._loop.is_running(): + if self.loop and self.loop.is_running(): asyncio.run_coroutine_threadsafe( self._capture_envelope(envelope), - self._loop, + self.loop, ) else: # The event loop is no longer running @@ -702,7 +702,7 @@ async def flush_async( def _get_pool_options(self: Self) -> Dict[str, Any]: options: Dict[str, Any] = { - "http2": False, # no HTTP2 for now + "http2": False, # no HTTP2 for now, should probably just work with this setting "retries": 3, } From ea5f5572ebf314024c0f16945aa1233d059d2b81 Mon Sep 17 00:00:00 2001 From: srothh Date: Tue, 22 Jul 2025 16:39:32 +0200 Subject: [PATCH 036/102] ref(transport): Fix type issues in AsyncTransport Fix type issues in the Async Transport. GH-4601 --- sentry_sdk/transport.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index afe9c6c16a..8dd1bf6e38 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -669,8 +669,8 @@ async def send_envelope_wrapper() -> None: def capture_envelope(self: Self, envelope: Envelope) -> None: # Synchronous entry point try: - # We are on the main thread running the event loop asyncio.get_running_loop() + # We are on the main thread running the event loop task = asyncio.create_task(self._capture_envelope(envelope)) self.background_tasks.add(task) task.add_done_callback(self.background_tasks.discard) @@ -702,7 +702,7 @@ async def flush_async( def _get_pool_options(self: Self) -> Dict[str, Any]: options: Dict[str, Any] = { - "http2": False, # no HTTP2 for now, should probably just work with this setting + "http2": False, # no HTTP2 for now "retries": 3, } From c61eb02f91c9125a4da153c0461d0e6ea386d6b1 Mon Sep 17 00:00:00 2001 From: srothh Date: Wed, 23 Jul 2025 16:18:52 +0200 Subject: [PATCH 037/102] ref(transport): Add missing async transport loop type annotation GH-4601 --- sentry_sdk/transport.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 8dd1bf6e38..16e6e49401 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -579,7 +579,7 @@ class AsyncHttpTransport(HttpTransportCore): def __init__(self: Self, options: Dict[str, Any]) -> None: super().__init__(options) # Requires event loop at init time - self._loop = asyncio.get_running_loop() + self.loop: asyncio.AbstractEventLoop = asyncio.get_running_loop() self.background_tasks: set[asyncio.Task[None]] = set() def _get_header_value(self: Self, response: Any, header: str) -> Optional[str]: From 38baead56b9460c23dcd5cc056b1e9ae1b728ec5 Mon Sep 17 00:00:00 2001 From: srothh Date: Wed, 23 Jul 2025 16:44:03 +0200 Subject: [PATCH 038/102] fix(client): Fix mypy type errors GH-4601 --- sentry_sdk/client.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index f871b4cbd8..44463fd3cc 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -950,7 +950,7 @@ def _on_flush_done(_: asyncio.Task[None]) -> None: else: self.flush(timeout=timeout, callback=callback) self._close_components() - return None + return None def flush( self, @@ -967,6 +967,7 @@ def flush( if self.transport is not None: if timeout is None: timeout = self.options["shutdown_timeout"] + assert timeout is not None # shutdown_timeout should never be None self.session_flusher.flush() @@ -979,12 +980,12 @@ def flush( ) else: self.transport.flush(timeout=timeout, callback=callback) - - return None + return None async def _flush_async( - self, timeout: float, callback: Optional[Callable[[int, float], None]] + self, timeout: Optional[float], callback: Optional[Callable[[int, float], None]] ) -> None: + self.session_flusher.flush() if self.log_batcher is not None: self.log_batcher.flush() From e4ed7732f4801ee6c760e899db2f27461bc4397c Mon Sep 17 00:00:00 2001 From: srothh Date: Wed, 23 Jul 2025 16:47:17 +0200 Subject: [PATCH 039/102] ref(client): Fix mypy inheritance type error GH-4601 --- sentry_sdk/client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index 44463fd3cc..0c87695ae8 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -930,7 +930,7 @@ def close( self, timeout: Optional[float] = None, callback: Optional[Callable[[int, float], None]] = None, - ) -> Optional[asyncio.Task[None]]: + ) -> Optional[asyncio.Task[None]]: # type: ignore[override] """ Close the client and shut down the transport. Arguments have the same semantics as :py:meth:`Client.flush`. When using the async transport, close needs to be awaited to block. @@ -956,7 +956,7 @@ def flush( self, timeout: Optional[float] = None, callback: Optional[Callable[[int, float], None]] = None, - ) -> Optional[asyncio.Task[None]]: + ) -> Optional[asyncio.Task[None]]: # type: ignore[override] """ Wait for the current events to be sent. When using the async transport, flush needs to be awaited to block. From 236ae2c1ecc86189c0d54d4a07641d78fd1b8d37 Mon Sep 17 00:00:00 2001 From: srothh Date: Wed, 23 Jul 2025 16:50:42 +0200 Subject: [PATCH 040/102] ref(client): Move mypy annotations to correct place GH-4601 --- sentry_sdk/client.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index 0c87695ae8..ab791b069c 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -926,11 +926,11 @@ def _close_components(self) -> None: self.transport.kill() self.transport = None - def close( + def close( # type: ignore[override] self, timeout: Optional[float] = None, callback: Optional[Callable[[int, float], None]] = None, - ) -> Optional[asyncio.Task[None]]: # type: ignore[override] + ) -> Optional[asyncio.Task[None]]: """ Close the client and shut down the transport. Arguments have the same semantics as :py:meth:`Client.flush`. When using the async transport, close needs to be awaited to block. @@ -952,11 +952,11 @@ def _on_flush_done(_: asyncio.Task[None]) -> None: self._close_components() return None - def flush( + def flush( # type: ignore[override] self, timeout: Optional[float] = None, callback: Optional[Callable[[int, float], None]] = None, - ) -> Optional[asyncio.Task[None]]: # type: ignore[override] + ) -> Optional[asyncio.Task[None]]: """ Wait for the current events to be sent. When using the async transport, flush needs to be awaited to block. From 9dd546c6d96ed07c4c95506c0855bda8ca16393c Mon Sep 17 00:00:00 2001 From: srothh Date: Thu, 24 Jul 2025 09:43:35 +0200 Subject: [PATCH 041/102] ref(client): Add event loop handling to client flush/close Add event loop checks to client flush/close in async, in order to ensure silent failing instead of runtime error propagation/crashing. GH-4601 --- sentry_sdk/client.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index ab791b069c..4f8afc0555 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -941,9 +941,13 @@ def close( # type: ignore[override] def _on_flush_done(_: asyncio.Task[None]) -> None: self._close_components() - flush_task = self.transport.loop.create_task( - self._flush_async(timeout, callback) - ) + try: + flush_task = self.transport.loop.create_task( + self._flush_async(timeout, callback) + ) + except RuntimeError: + logger.warning("Event loop not running, aborting close.") + return None # Enforce flush before shutdown flush_task.add_done_callback(_on_flush_done) return flush_task @@ -975,9 +979,13 @@ def flush( # type: ignore[override] self.log_batcher.flush() if isinstance(self.transport, AsyncHttpTransport): - return self.transport.loop.create_task( - self._flush_async(timeout, callback) - ) + try: + return self.transport.loop.create_task( + self._flush_async(timeout, callback) + ) + except RuntimeError: + logger.warning("Event loop not running, aborting flush.") + return None else: self.transport.flush(timeout=timeout, callback=callback) return None From f4ac157c7a92ee63c3c5ddb57ab921dfb59dce60 Mon Sep 17 00:00:00 2001 From: srothh Date: Thu, 24 Jul 2025 11:13:33 +0200 Subject: [PATCH 042/102] ref(client): Moved close done callback into async task Moved the logic of the done callback directly into the async task to provide a cleaner and simpler control flow. GH-4601 --- sentry_sdk/client.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index 4f8afc0555..1996247a8c 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -935,21 +935,24 @@ def close( # type: ignore[override] Close the client and shut down the transport. Arguments have the same semantics as :py:meth:`Client.flush`. When using the async transport, close needs to be awaited to block. """ + + async def _flush_and_close( + timeout: Optional[float], callback: Optional[Callable[[int, float], None]] + ) -> None: + await self._flush_async(timeout=timeout, callback=callback) + self._close_components() + if self.transport is not None: if isinstance(self.transport, AsyncHttpTransport): - def _on_flush_done(_: asyncio.Task[None]) -> None: - self._close_components() - try: flush_task = self.transport.loop.create_task( - self._flush_async(timeout, callback) + _flush_and_close(timeout, callback) ) except RuntimeError: logger.warning("Event loop not running, aborting close.") return None # Enforce flush before shutdown - flush_task.add_done_callback(_on_flush_done) return flush_task else: self.flush(timeout=timeout, callback=callback) From ed392e939c50e038cdea9053a83f90e6971976c7 Mon Sep 17 00:00:00 2001 From: srothh Date: Fri, 25 Jul 2025 11:30:45 +0200 Subject: [PATCH 043/102] ref(client): Move timeout check in client to properly cover async GH-4601 --- sentry_sdk/client.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index 1996247a8c..22ee4304b2 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -939,6 +939,7 @@ def close( # type: ignore[override] async def _flush_and_close( timeout: Optional[float], callback: Optional[Callable[[int, float], None]] ) -> None: + await self._flush_async(timeout=timeout, callback=callback) self._close_components() @@ -997,6 +998,9 @@ async def _flush_async( self, timeout: Optional[float], callback: Optional[Callable[[int, float], None]] ) -> None: + if timeout is None: + timeout = self.options["shutdown_timeout"] + self.session_flusher.flush() if self.log_batcher is not None: self.log_batcher.flush() From 50553d4f3f7f41f5a056012fe03b68eefc6eac61 Mon Sep 17 00:00:00 2001 From: srothh Date: Fri, 25 Jul 2025 12:07:48 +0200 Subject: [PATCH 044/102] feat(asyncio): Add patching for loop.close in asyncio GH-4601 --- sentry_sdk/integrations/asyncio.py | 45 ++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/sentry_sdk/integrations/asyncio.py b/sentry_sdk/integrations/asyncio.py index 4f44983e61..75e7a5f2d4 100644 --- a/sentry_sdk/integrations/asyncio.py +++ b/sentry_sdk/integrations/asyncio.py @@ -5,6 +5,7 @@ from sentry_sdk.consts import OP from sentry_sdk.integrations import Integration, DidNotEnable from sentry_sdk.utils import event_from_exception, logger, reraise +from sentry_sdk.transport import AsyncHttpTransport try: import asyncio @@ -124,3 +125,47 @@ class AsyncioIntegration(Integration): @staticmethod def setup_once() -> None: patch_asyncio() + + def _patch_loop_close() -> None: + # Atexit shutdown hook happens after the event loop is closed. + # Therefore, it is necessary to patch the loop.close method to ensure + # that pending events are flushed before the interpreter shuts down. + try: + loop = asyncio.get_running_loop() + except RuntimeError: + # No running loop → cannot patch now + return + + if getattr(loop, "_sentry_flush_patched", False): + return + + async def _flush() -> None: + client = sentry_sdk.get_client() + if not client: + return + try: + + if not isinstance(client.transport, AsyncHttpTransport): + return + + t = client.close() + if t is not None: + # Wait for the task to complete. + await t # type: ignore + except Exception: + logger.warning( + "Sentry flush failed during loop shutdown", exc_info=True + ) + + orig_close = loop.close + + def _patched_close() -> None: + try: + loop.run_until_complete(_flush()) + finally: + orig_close() + + loop.close = _patched_close + loop._sentry_flush_patched = True # type: ignore + + _patch_loop_close() From 4a7b8ce784cb4ab3863577aeaecad30492af153a Mon Sep 17 00:00:00 2001 From: srothh Date: Fri, 25 Jul 2025 12:14:25 +0200 Subject: [PATCH 045/102] ref(asyncio): Fix mypy type annotation errors Gh-4601 --- sentry_sdk/integrations/asyncio.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/sentry_sdk/integrations/asyncio.py b/sentry_sdk/integrations/asyncio.py index 75e7a5f2d4..9a66078d5e 100644 --- a/sentry_sdk/integrations/asyncio.py +++ b/sentry_sdk/integrations/asyncio.py @@ -148,10 +148,9 @@ async def _flush() -> None: if not isinstance(client.transport, AsyncHttpTransport): return - t = client.close() + t = client.close() # type: ignore if t is not None: - # Wait for the task to complete. - await t # type: ignore + await t except Exception: logger.warning( "Sentry flush failed during loop shutdown", exc_info=True @@ -165,7 +164,7 @@ def _patched_close() -> None: finally: orig_close() - loop.close = _patched_close + loop.close = _patched_close # type: ignore loop._sentry_flush_patched = True # type: ignore _patch_loop_close() From cd8a35fba4da9201ae7743ca440ad62d62b96f21 Mon Sep 17 00:00:00 2001 From: srothh Date: Fri, 25 Jul 2025 12:30:07 +0200 Subject: [PATCH 046/102] ref(client): Fix redundant async flush helper flushes GH-4601 --- sentry_sdk/client.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index 22ee4304b2..4e1438ebd9 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -951,6 +951,7 @@ async def _flush_and_close( _flush_and_close(timeout, callback) ) except RuntimeError: + self._close_components() logger.warning("Event loop not running, aborting close.") return None # Enforce flush before shutdown @@ -975,12 +976,6 @@ def flush( # type: ignore[override] if self.transport is not None: if timeout is None: timeout = self.options["shutdown_timeout"] - assert timeout is not None # shutdown_timeout should never be None - - self.session_flusher.flush() - - if self.log_batcher is not None: - self.log_batcher.flush() if isinstance(self.transport, AsyncHttpTransport): try: @@ -991,6 +986,11 @@ def flush( # type: ignore[override] logger.warning("Event loop not running, aborting flush.") return None else: + self.session_flusher.flush() + + if self.log_batcher is not None: + self.log_batcher.flush() + self.transport.flush(timeout=timeout, callback=callback) return None From b9f2ec7c3c7c372fd6b149468cecc23579557006 Mon Sep 17 00:00:00 2001 From: srothh Date: Fri, 25 Jul 2025 12:33:16 +0200 Subject: [PATCH 047/102] ref(client): remove wrongful indent GH-4601 --- sentry_sdk/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index 4e1438ebd9..a51c3f2da5 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -991,7 +991,7 @@ def flush( # type: ignore[override] if self.log_batcher is not None: self.log_batcher.flush() - self.transport.flush(timeout=timeout, callback=callback) + self.transport.flush(timeout=timeout, callback=callback) return None async def _flush_async( From 98d74edacef2a4be26e407008ec130c6f4331952 Mon Sep 17 00:00:00 2001 From: srothh Date: Mon, 28 Jul 2025 15:06:14 +0200 Subject: [PATCH 048/102] test(transport): Add inital transport tests GH-4601 --- requirements-testing.txt | 2 +- tests/test_transport.py | 87 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+), 1 deletion(-) diff --git a/requirements-testing.txt b/requirements-testing.txt index 7014f49137..5ec7a9fa3c 100644 --- a/requirements-testing.txt +++ b/requirements-testing.txt @@ -12,7 +12,7 @@ asttokens responses pysocks socksio -httpcore[http2] +httpcore[http2,asyncio]>=1.0 # From 1.0, httpcore async is optional setuptools freezegun Brotli diff --git a/tests/test_transport.py b/tests/test_transport.py index bd87728962..4cd4d9dde7 100644 --- a/tests/test_transport.py +++ b/tests/test_transport.py @@ -28,6 +28,7 @@ from sentry_sdk.transport import ( KEEP_ALIVE_SOCKET_OPTIONS, _parse_rate_limits, + AsyncHttpTransport, ) from sentry_sdk.integrations.logging import LoggingIntegration, ignore_logger @@ -146,6 +147,92 @@ def test_transport_works( assert any("Sending envelope" in record.msg for record in caplog.records) == debug +@pytest.mark.asyncio +@pytest.mark.parametrize("debug", (True, False)) +@pytest.mark.parametrize("client_flush_method", ["close", "flush"]) +@pytest.mark.parametrize("use_pickle", (True, False)) +@pytest.mark.parametrize("compression_level", (0, 9, None)) +@pytest.mark.parametrize("compression_algo", ("gzip", "br", "", None)) +@pytest.mark.parametrize("http2", [True, False] if PY38 else [False]) +async def test_transport_works_async( + capturing_server, + request, + capsys, + caplog, + debug, + make_client, + client_flush_method, + use_pickle, + compression_level, + compression_algo, + http2, +): + caplog.set_level(logging.DEBUG) + + experiments = {} + if compression_level is not None: + experiments["transport_compression_level"] = compression_level + + if compression_algo is not None: + experiments["transport_compression_algo"] = compression_algo + + if http2: + experiments["transport_http2"] = True + + # Enable async transport + experiments["transport_async"] = True + + client = make_client( + debug=debug, + _experiments=experiments, + ) + + if use_pickle: + client = pickle.loads(pickle.dumps(client)) + + # Verify we're using async transport + assert isinstance( + client.transport, AsyncHttpTransport + ), "Expected AsyncHttpTransport" + + sentry_sdk.get_global_scope().set_client(client) + request.addfinalizer(lambda: sentry_sdk.get_global_scope().set_client(None)) + + add_breadcrumb( + level="info", message="i like bread", timestamp=datetime.now(timezone.utc) + ) + capture_message("löl") + + if client_flush_method == "close": + await client.close(timeout=2.0) + else: + if hasattr(client, "_flush_async"): + await client._flush_async(timeout=2.0, callback=None) + # Need to kill, as the end of the test will close the event loop, but the worker task is still alive + client.transport._worker.kill() + + out, err = capsys.readouterr() + assert not err and not out + assert capturing_server.captured + should_compress = ( + # default is to compress with brotli if available, gzip otherwise + (compression_level is None) + or ( + # setting compression level to 0 means don't compress + compression_level + > 0 + ) + ) and ( + # if we couldn't resolve to a known algo, we don't compress + compression_algo + != "" + ) + + assert capturing_server.captured[0].compressed == should_compress + + assert any("Sending envelope" in record.msg for record in caplog.records) == debug + + @pytest.mark.parametrize( "num_pools,expected_num_pools", ( From 9df5ec5014d6139512544936612657d589935ab9 Mon Sep 17 00:00:00 2001 From: srothh Date: Mon, 28 Jul 2025 15:12:22 +0200 Subject: [PATCH 049/102] fix(requirements): Fix requirements for async transport testing on python versions GH-4601 --- requirements-testing.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements-testing.txt b/requirements-testing.txt index 5ec7a9fa3c..4766e759c6 100644 --- a/requirements-testing.txt +++ b/requirements-testing.txt @@ -12,7 +12,8 @@ asttokens responses pysocks socksio -httpcore[http2,asyncio]>=1.0 # From 1.0, httpcore async is optional +httpcore[http2]; python_version < "3.8" +httpcore[http2,asyncio]>=1.0; python_version >= "3.8" # asyncio support only supported for Python 3.8+ setuptools freezegun Brotli From 23d874057c7da42dc29659946e23577a53146273 Mon Sep 17 00:00:00 2001 From: srothh Date: Mon, 28 Jul 2025 15:19:56 +0200 Subject: [PATCH 050/102] fix(dependencies): Remove version constraint from httpcore GH-4601 --- requirements-testing.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-testing.txt b/requirements-testing.txt index 4766e759c6..3f91ba324c 100644 --- a/requirements-testing.txt +++ b/requirements-testing.txt @@ -13,7 +13,7 @@ responses pysocks socksio httpcore[http2]; python_version < "3.8" -httpcore[http2,asyncio]>=1.0; python_version >= "3.8" # asyncio support only supported for Python 3.8+ +httpcore[http2,asyncio]; python_version >= "3.8" # asyncio support only supported for Python 3.8+ setuptools freezegun Brotli From a496787a95e62526c2e6212ee2122e4dac1aff83 Mon Sep 17 00:00:00 2001 From: srothh Date: Mon, 28 Jul 2025 15:39:08 +0200 Subject: [PATCH 051/102] fix(dependencies): Version guards for correct httpcore version GH-4601 --- requirements-testing.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements-testing.txt b/requirements-testing.txt index 3f91ba324c..993a42a99e 100644 --- a/requirements-testing.txt +++ b/requirements-testing.txt @@ -12,8 +12,8 @@ asttokens responses pysocks socksio -httpcore[http2]; python_version < "3.8" -httpcore[http2,asyncio]; python_version >= "3.8" # asyncio support only supported for Python 3.8+ +httpcore[http2]<1.0; python_version < "3.8" +httpcore[http2,asyncio]>=1.0; python_version >= "3.8" # asyncio support only supported for Python 3.8+ setuptools freezegun Brotli From 32a9abd8bb07eb7a3ce87233afecb6d4236ee21b Mon Sep 17 00:00:00 2001 From: srothh Date: Mon, 28 Jul 2025 16:13:23 +0200 Subject: [PATCH 052/102] ref(tox): remove anyio version pin GH-4601 --- tox.ini | 1 - 1 file changed, 1 deletion(-) diff --git a/tox.ini b/tox.ini index 8a3350440c..c08471bf45 100644 --- a/tox.ini +++ b/tox.ini @@ -376,7 +376,6 @@ deps = httpx-v0.25: pytest-httpx==0.25.0 httpx: pytest-httpx # anyio is a dep of httpx - httpx: anyio<4.0.0 httpx-v0.16: httpx~=0.16.0 httpx-v0.18: httpx~=0.18.0 httpx-v0.20: httpx~=0.20.0 From a69f7bbea6f07d7c006dbf59904856694daf5690 Mon Sep 17 00:00:00 2001 From: srothh Date: Mon, 28 Jul 2025 16:50:08 +0200 Subject: [PATCH 053/102] Revert "ref(tox): remove anyio version pin" This reverts commit b7e32a8723e08bad5ea902b9471024f3b3413015. --- tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/tox.ini b/tox.ini index c08471bf45..8a3350440c 100644 --- a/tox.ini +++ b/tox.ini @@ -376,6 +376,7 @@ deps = httpx-v0.25: pytest-httpx==0.25.0 httpx: pytest-httpx # anyio is a dep of httpx + httpx: anyio<4.0.0 httpx-v0.16: httpx~=0.16.0 httpx-v0.18: httpx~=0.18.0 httpx-v0.20: httpx~=0.20.0 From c80b0950ab64ede0b6352f3ce2f712eea0f19d37 Mon Sep 17 00:00:00 2001 From: srothh Date: Mon, 28 Jul 2025 16:51:46 +0200 Subject: [PATCH 054/102] ref(dependencies): Revert dependency changes Revert dependency changes that broke existing tests GH-4601 --- requirements-testing.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/requirements-testing.txt b/requirements-testing.txt index 993a42a99e..7014f49137 100644 --- a/requirements-testing.txt +++ b/requirements-testing.txt @@ -12,8 +12,7 @@ asttokens responses pysocks socksio -httpcore[http2]<1.0; python_version < "3.8" -httpcore[http2,asyncio]>=1.0; python_version >= "3.8" # asyncio support only supported for Python 3.8+ +httpcore[http2] setuptools freezegun Brotli From 59049681a00ba63bf13584804f0cc1c7008ab042 Mon Sep 17 00:00:00 2001 From: srothh Date: Mon, 28 Jul 2025 17:18:56 +0200 Subject: [PATCH 055/102] ref(test): Remove flush from async transport test Flush in async only flushes the worker, but does not cancel the task. This leads to errors, as the end of the test closes the event loop, but the worker task is still running. GH-4601 --- tests/test_transport.py | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/tests/test_transport.py b/tests/test_transport.py index 4cd4d9dde7..fac8bb0f89 100644 --- a/tests/test_transport.py +++ b/tests/test_transport.py @@ -149,11 +149,10 @@ def test_transport_works( @pytest.mark.asyncio @pytest.mark.parametrize("debug", (True, False)) -@pytest.mark.parametrize("client_flush_method", ["close", "flush"]) +@pytest.mark.parametrize("client_flush_method", ["close"]) @pytest.mark.parametrize("use_pickle", (True, False)) @pytest.mark.parametrize("compression_level", (0, 9, None)) @pytest.mark.parametrize("compression_algo", ("gzip", "br", "", None)) -@pytest.mark.parametrize("http2", [True, False] if PY38 else [False]) async def test_transport_works_async( capturing_server, request, @@ -165,7 +164,6 @@ async def test_transport_works_async( use_pickle, compression_level, compression_algo, - http2, ): caplog.set_level(logging.DEBUG) @@ -176,9 +174,6 @@ async def test_transport_works_async( if compression_algo is not None: experiments["transport_compression_algo"] = compression_algo - if http2: - experiments["transport_http2"] = True - # Enable async transport experiments["transport_async"] = True @@ -205,11 +200,6 @@ async def test_transport_works_async( if client_flush_method == "close": await client.close(timeout=2.0) - else: - if hasattr(client, "_flush_async"): - await client._flush_async(timeout=2.0, callback=None) - # Need to kill, as the end of the test will close the event loop, but the worker task is still alive - client.transport._worker.kill() out, err = capsys.readouterr() assert not err and not out From 09034b7b7717c0cb4301a3108cf19180cb9f7d42 Mon Sep 17 00:00:00 2001 From: srothh Date: Mon, 28 Jul 2025 17:20:50 +0200 Subject: [PATCH 056/102] ref(client): Remove wrongful indents in client sync flush GH-4601 --- sentry_sdk/client.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index a51c3f2da5..14d05addaa 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -988,10 +988,10 @@ def flush( # type: ignore[override] else: self.session_flusher.flush() - if self.log_batcher is not None: - self.log_batcher.flush() + if self.log_batcher is not None: + self.log_batcher.flush() - self.transport.flush(timeout=timeout, callback=callback) + self.transport.flush(timeout=timeout, callback=callback) return None async def _flush_async( From 21b1cdaca9748ff5083a4304350cb6c0b33dc191 Mon Sep 17 00:00:00 2001 From: srothh Date: Tue, 29 Jul 2025 10:18:21 +0200 Subject: [PATCH 057/102] fix(testing): Changed httpx anyio version pin to >=3, <5 The previous version pin of httpx anyio was incompatible with httpcore's asyncio extension, therefore it is necessary to modify it to allow the dependencies to be resolved. GH-4601 --- requirements-testing.txt | 2 +- scripts/populate_tox/config.py | 2 +- tox.ini | 78 ++++++++++++++++++---------------- 3 files changed, 43 insertions(+), 39 deletions(-) diff --git a/requirements-testing.txt b/requirements-testing.txt index 7014f49137..ed3c7ca79a 100644 --- a/requirements-testing.txt +++ b/requirements-testing.txt @@ -12,7 +12,7 @@ asttokens responses pysocks socksio -httpcore[http2] +httpcore[http2,asyncio] setuptools freezegun Brotli diff --git a/scripts/populate_tox/config.py b/scripts/populate_tox/config.py index 7e48d620c0..dd5b163a82 100644 --- a/scripts/populate_tox/config.py +++ b/scripts/populate_tox/config.py @@ -96,7 +96,7 @@ "pytest-asyncio", "python-multipart", "requests", - "anyio<4", + "anyio>=3,<5", ], # There's an incompatibility between FastAPI's TestClient, which is # actually Starlette's TestClient, which is actually httpx's Client. diff --git a/tox.ini b/tox.ini index 8a3350440c..2d6d46cc01 100644 --- a/tox.ini +++ b/tox.ini @@ -10,7 +10,7 @@ # The file (and all resulting CI YAMLs) then need to be regenerated via # "scripts/generate-test-files.sh". # -# Last generated: 2025-07-15T08:55:25.785747+00:00 +# Last generated: 2025-07-29T08:10:08.981379+00:00 [tox] requires = @@ -132,9 +132,9 @@ envlist = # ~~~ AI ~~~ {py3.8,py3.11,py3.12}-anthropic-v0.16.0 - {py3.8,py3.11,py3.12}-anthropic-v0.30.1 - {py3.8,py3.11,py3.12}-anthropic-v0.44.0 - {py3.8,py3.11,py3.12}-anthropic-v0.57.1 + {py3.8,py3.11,py3.12}-anthropic-v0.31.2 + {py3.8,py3.11,py3.12}-anthropic-v0.46.0 + {py3.8,py3.12,py3.13}-anthropic-v0.60.0 {py3.9,py3.10,py3.11}-cohere-v5.4.0 {py3.9,py3.11,py3.12}-cohere-v5.9.4 @@ -143,11 +143,13 @@ envlist = {py3.9,py3.11,py3.12}-openai_agents-v0.0.19 {py3.9,py3.12,py3.13}-openai_agents-v0.1.0 + {py3.9,py3.12,py3.13}-openai_agents-v0.2.3 {py3.8,py3.10,py3.11}-huggingface_hub-v0.22.2 {py3.8,py3.11,py3.12}-huggingface_hub-v0.26.5 {py3.8,py3.12,py3.13}-huggingface_hub-v0.30.2 - {py3.8,py3.12,py3.13}-huggingface_hub-v0.33.4 + {py3.8,py3.12,py3.13}-huggingface_hub-v0.34.2 + {py3.8,py3.12,py3.13}-huggingface_hub-v0.35.0rc0 # ~~~ DBs ~~~ @@ -176,9 +178,9 @@ envlist = {py3.9,py3.12,py3.13}-openfeature-v0.8.1 {py3.7,py3.12,py3.13}-statsig-v0.55.3 - {py3.7,py3.12,py3.13}-statsig-v0.56.0 {py3.7,py3.12,py3.13}-statsig-v0.57.3 - {py3.7,py3.12,py3.13}-statsig-v0.59.0 + {py3.7,py3.12,py3.13}-statsig-v0.59.1 + {py3.7,py3.12,py3.13}-statsig-v0.61.0 {py3.8,py3.12,py3.13}-unleash-v6.0.1 {py3.8,py3.12,py3.13}-unleash-v6.1.0 @@ -200,17 +202,16 @@ envlist = {py3.8,py3.12,py3.13}-graphene-v3.4.3 {py3.8,py3.10,py3.11}-strawberry-v0.209.8 - {py3.8,py3.11,py3.12}-strawberry-v0.231.1 - {py3.8,py3.12,py3.13}-strawberry-v0.253.1 - {py3.9,py3.12,py3.13}-strawberry-v0.276.0 + {py3.8,py3.11,py3.12}-strawberry-v0.232.2 + {py3.8,py3.12,py3.13}-strawberry-v0.255.0 + {py3.9,py3.12,py3.13}-strawberry-v0.278.0 # ~~~ Network ~~~ {py3.7,py3.8}-grpc-v1.32.0 {py3.7,py3.9,py3.10}-grpc-v1.46.5 {py3.7,py3.11,py3.12}-grpc-v1.60.2 - {py3.9,py3.12,py3.13}-grpc-v1.73.1 - {py3.9,py3.12,py3.13}-grpc-v1.74.0rc1 + {py3.9,py3.12,py3.13}-grpc-v1.74.0 # ~~~ Tasks ~~~ @@ -249,7 +250,7 @@ envlist = {py3.7,py3.9,py3.10}-starlette-v0.16.0 {py3.7,py3.10,py3.11}-starlette-v0.26.1 {py3.8,py3.11,py3.12}-starlette-v0.36.3 - {py3.9,py3.12,py3.13}-starlette-v0.47.1 + {py3.9,py3.12,py3.13}-starlette-v0.47.2 {py3.7,py3.9,py3.10}-fastapi-v0.79.1 {py3.7,py3.10,py3.11}-fastapi-v0.91.0 @@ -261,7 +262,7 @@ envlist = {py3.7}-aiohttp-v3.4.4 {py3.7,py3.8,py3.9}-aiohttp-v3.7.4 {py3.8,py3.12,py3.13}-aiohttp-v3.10.11 - {py3.9,py3.12,py3.13}-aiohttp-v3.12.14 + {py3.9,py3.12,py3.13}-aiohttp-v3.12.15 {py3.7}-bottle-v0.12.25 {py3.8,py3.12,py3.13}-bottle-v0.13.4 @@ -269,6 +270,7 @@ envlist = {py3.7,py3.8,py3.9}-falcon-v3.0.1 {py3.7,py3.11,py3.12}-falcon-v3.1.3 {py3.8,py3.11,py3.12}-falcon-v4.0.2 + {py3.8,py3.11,py3.12}-falcon-v4.1.0a3 {py3.8,py3.10,py3.11}-litestar-v2.0.1 {py3.8,py3.11,py3.12}-litestar-v2.5.5 @@ -295,8 +297,8 @@ envlist = {py3.7}-trytond-v5.0.63 {py3.7,py3.8}-trytond-v5.8.16 {py3.8,py3.10,py3.11}-trytond-v6.8.17 - {py3.8,py3.11,py3.12}-trytond-v7.0.33 - {py3.9,py3.12,py3.13}-trytond-v7.6.3 + {py3.8,py3.11,py3.12}-trytond-v7.0.34 + {py3.9,py3.12,py3.13}-trytond-v7.6.4 {py3.7,py3.12,py3.13}-typer-v0.15.4 {py3.7,py3.12,py3.13}-typer-v0.16.0 @@ -493,13 +495,13 @@ deps = # ~~~ AI ~~~ anthropic-v0.16.0: anthropic==0.16.0 - anthropic-v0.30.1: anthropic==0.30.1 - anthropic-v0.44.0: anthropic==0.44.0 - anthropic-v0.57.1: anthropic==0.57.1 + anthropic-v0.31.2: anthropic==0.31.2 + anthropic-v0.46.0: anthropic==0.46.0 + anthropic-v0.60.0: anthropic==0.60.0 anthropic: pytest-asyncio anthropic-v0.16.0: httpx<0.28.0 - anthropic-v0.30.1: httpx<0.28.0 - anthropic-v0.44.0: httpx<0.28.0 + anthropic-v0.31.2: httpx<0.28.0 + anthropic-v0.46.0: httpx<0.28.0 cohere-v5.4.0: cohere==5.4.0 cohere-v5.9.4: cohere==5.9.4 @@ -508,12 +510,14 @@ deps = openai_agents-v0.0.19: openai-agents==0.0.19 openai_agents-v0.1.0: openai-agents==0.1.0 + openai_agents-v0.2.3: openai-agents==0.2.3 openai_agents: pytest-asyncio huggingface_hub-v0.22.2: huggingface_hub==0.22.2 huggingface_hub-v0.26.5: huggingface_hub==0.26.5 huggingface_hub-v0.30.2: huggingface_hub==0.30.2 - huggingface_hub-v0.33.4: huggingface_hub==0.33.4 + huggingface_hub-v0.34.2: huggingface_hub==0.34.2 + huggingface_hub-v0.35.0rc0: huggingface_hub==0.35.0rc0 # ~~~ DBs ~~~ @@ -543,9 +547,9 @@ deps = openfeature-v0.8.1: openfeature-sdk==0.8.1 statsig-v0.55.3: statsig==0.55.3 - statsig-v0.56.0: statsig==0.56.0 statsig-v0.57.3: statsig==0.57.3 - statsig-v0.59.0: statsig==0.59.0 + statsig-v0.59.1: statsig==0.59.1 + statsig-v0.61.0: statsig==0.61.0 statsig: typing_extensions unleash-v6.0.1: UnleashClient==6.0.1 @@ -576,21 +580,20 @@ deps = py3.6-graphene: aiocontextvars strawberry-v0.209.8: strawberry-graphql[fastapi,flask]==0.209.8 - strawberry-v0.231.1: strawberry-graphql[fastapi,flask]==0.231.1 - strawberry-v0.253.1: strawberry-graphql[fastapi,flask]==0.253.1 - strawberry-v0.276.0: strawberry-graphql[fastapi,flask]==0.276.0 + strawberry-v0.232.2: strawberry-graphql[fastapi,flask]==0.232.2 + strawberry-v0.255.0: strawberry-graphql[fastapi,flask]==0.255.0 + strawberry-v0.278.0: strawberry-graphql[fastapi,flask]==0.278.0 strawberry: httpx strawberry-v0.209.8: pydantic<2.11 - strawberry-v0.231.1: pydantic<2.11 - strawberry-v0.253.1: pydantic<2.11 + strawberry-v0.232.2: pydantic<2.11 + strawberry-v0.255.0: pydantic<2.11 # ~~~ Network ~~~ grpc-v1.32.0: grpcio==1.32.0 grpc-v1.46.5: grpcio==1.46.5 grpc-v1.60.2: grpcio==1.60.2 - grpc-v1.73.1: grpcio==1.73.1 - grpc-v1.74.0rc1: grpcio==1.74.0rc1 + grpc-v1.74.0: grpcio==1.74.0 grpc: protobuf grpc: mypy-protobuf grpc: types-protobuf @@ -663,7 +666,7 @@ deps = starlette-v0.16.0: starlette==0.16.0 starlette-v0.26.1: starlette==0.26.1 starlette-v0.36.3: starlette==0.36.3 - starlette-v0.47.1: starlette==0.47.1 + starlette-v0.47.2: starlette==0.47.2 starlette: pytest-asyncio starlette: python-multipart starlette: requests @@ -683,7 +686,7 @@ deps = fastapi: pytest-asyncio fastapi: python-multipart fastapi: requests - fastapi: anyio<4 + fastapi: anyio>=3,<5 fastapi-v0.79.1: httpx<0.28.0 fastapi-v0.91.0: httpx<0.28.0 fastapi-v0.103.2: httpx<0.28.0 @@ -694,10 +697,10 @@ deps = aiohttp-v3.4.4: aiohttp==3.4.4 aiohttp-v3.7.4: aiohttp==3.7.4 aiohttp-v3.10.11: aiohttp==3.10.11 - aiohttp-v3.12.14: aiohttp==3.12.14 + aiohttp-v3.12.15: aiohttp==3.12.15 aiohttp: pytest-aiohttp aiohttp-v3.10.11: pytest-asyncio - aiohttp-v3.12.14: pytest-asyncio + aiohttp-v3.12.15: pytest-asyncio bottle-v0.12.25: bottle==0.12.25 bottle-v0.13.4: bottle==0.13.4 @@ -706,6 +709,7 @@ deps = falcon-v3.0.1: falcon==3.0.1 falcon-v3.1.3: falcon==3.1.3 falcon-v4.0.2: falcon==4.0.2 + falcon-v4.1.0a3: falcon==4.1.0a3 litestar-v2.0.1: litestar==2.0.1 litestar-v2.5.5: litestar==2.5.5 @@ -749,8 +753,8 @@ deps = trytond-v5.0.63: trytond==5.0.63 trytond-v5.8.16: trytond==5.8.16 trytond-v6.8.17: trytond==6.8.17 - trytond-v7.0.33: trytond==7.0.33 - trytond-v7.6.3: trytond==7.6.3 + trytond-v7.0.34: trytond==7.0.34 + trytond-v7.6.4: trytond==7.6.4 trytond: werkzeug trytond-v5.0.63: werkzeug<1.0 From 9d0cde4f28be3601e59efc3ad594f4025dd68442 Mon Sep 17 00:00:00 2001 From: srothh Date: Tue, 29 Jul 2025 10:27:43 +0200 Subject: [PATCH 058/102] fix(test): Properly modify httpx anyio pin Previous commit did not properly modify the version pin to the new required one for httpcore asyncio GH-4601 --- scripts/populate_tox/tox.jinja | 2 +- tox.ini | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/populate_tox/tox.jinja b/scripts/populate_tox/tox.jinja index 66b1d7885a..514566ea46 100644 --- a/scripts/populate_tox/tox.jinja +++ b/scripts/populate_tox/tox.jinja @@ -207,7 +207,7 @@ deps = httpx-v0.25: pytest-httpx==0.25.0 httpx: pytest-httpx # anyio is a dep of httpx - httpx: anyio<4.0.0 + httpx: anyio>=3,<5 httpx-v0.16: httpx~=0.16.0 httpx-v0.18: httpx~=0.18.0 httpx-v0.20: httpx~=0.20.0 diff --git a/tox.ini b/tox.ini index 2d6d46cc01..b763dd9b48 100644 --- a/tox.ini +++ b/tox.ini @@ -10,7 +10,7 @@ # The file (and all resulting CI YAMLs) then need to be regenerated via # "scripts/generate-test-files.sh". # -# Last generated: 2025-07-29T08:10:08.981379+00:00 +# Last generated: 2025-07-29T08:26:02.455474+00:00 [tox] requires = @@ -378,7 +378,7 @@ deps = httpx-v0.25: pytest-httpx==0.25.0 httpx: pytest-httpx # anyio is a dep of httpx - httpx: anyio<4.0.0 + httpx: anyio>=3,<5 httpx-v0.16: httpx~=0.16.0 httpx-v0.18: httpx~=0.18.0 httpx-v0.20: httpx~=0.20.0 From f21e2ea1c03835ab06580b28c52316c769dd2a39 Mon Sep 17 00:00:00 2001 From: srothh Date: Tue, 29 Jul 2025 10:47:03 +0200 Subject: [PATCH 059/102] fix(test): Add fastapi anyio pin for <0.8 Anyio >=4.0 breaks fastapi for versions <0.8 GH-4601 --- scripts/populate_tox/config.py | 1 + tox.ini | 7 ++++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/scripts/populate_tox/config.py b/scripts/populate_tox/config.py index dd5b163a82..a4efb452de 100644 --- a/scripts/populate_tox/config.py +++ b/scripts/populate_tox/config.py @@ -106,6 +106,7 @@ # FastAPI versions we use older httpx which still supports the # deprecated argument. "<0.110.1": ["httpx<0.28.0"], + "<0.80": ["anyio<4"], "py3.6": ["aiocontextvars"], }, }, diff --git a/tox.ini b/tox.ini index b763dd9b48..4779e22556 100644 --- a/tox.ini +++ b/tox.ini @@ -10,7 +10,7 @@ # The file (and all resulting CI YAMLs) then need to be regenerated via # "scripts/generate-test-files.sh". # -# Last generated: 2025-07-29T08:26:02.455474+00:00 +# Last generated: 2025-07-29T08:45:43.951088+00:00 [tox] requires = @@ -148,7 +148,7 @@ envlist = {py3.8,py3.10,py3.11}-huggingface_hub-v0.22.2 {py3.8,py3.11,py3.12}-huggingface_hub-v0.26.5 {py3.8,py3.12,py3.13}-huggingface_hub-v0.30.2 - {py3.8,py3.12,py3.13}-huggingface_hub-v0.34.2 + {py3.8,py3.12,py3.13}-huggingface_hub-v0.34.3 {py3.8,py3.12,py3.13}-huggingface_hub-v0.35.0rc0 @@ -516,7 +516,7 @@ deps = huggingface_hub-v0.22.2: huggingface_hub==0.22.2 huggingface_hub-v0.26.5: huggingface_hub==0.26.5 huggingface_hub-v0.30.2: huggingface_hub==0.30.2 - huggingface_hub-v0.34.2: huggingface_hub==0.34.2 + huggingface_hub-v0.34.3: huggingface_hub==0.34.3 huggingface_hub-v0.35.0rc0: huggingface_hub==0.35.0rc0 @@ -690,6 +690,7 @@ deps = fastapi-v0.79.1: httpx<0.28.0 fastapi-v0.91.0: httpx<0.28.0 fastapi-v0.103.2: httpx<0.28.0 + fastapi-v0.79.1: anyio<4 py3.6-fastapi: aiocontextvars From 76aae83a0de74f990da6e0f32ac98dbdf82cac96 Mon Sep 17 00:00:00 2001 From: srothh Date: Tue, 29 Jul 2025 12:10:28 +0200 Subject: [PATCH 060/102] feat(test): Add tests for specific async transport functionality Add tests that test specific async functionality for the async transport, such as concurrency. GH-4601 --- tests/test_transport.py | 118 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 118 insertions(+) diff --git a/tests/test_transport.py b/tests/test_transport.py index fac8bb0f89..7f4a2e2159 100644 --- a/tests/test_transport.py +++ b/tests/test_transport.py @@ -3,6 +3,8 @@ import os import socket import sys +import asyncio +import threading from collections import defaultdict from datetime import datetime, timedelta, timezone from unittest import mock @@ -153,6 +155,7 @@ def test_transport_works( @pytest.mark.parametrize("use_pickle", (True, False)) @pytest.mark.parametrize("compression_level", (0, 9, None)) @pytest.mark.parametrize("compression_algo", ("gzip", "br", "", None)) +@pytest.mark.skipif(not PY38, reason="Async transport only supported in Python 3.8+") async def test_transport_works_async( capturing_server, request, @@ -753,3 +756,118 @@ def close(self): client.flush() assert seen == ["status_500"] + + +@pytest.mark.asyncio +@pytest.mark.skipif(not PY38, reason="Async transport requires Python 3.8+") +async def test_async_transport_background_thread_capture( + capturing_server, make_client, caplog +): + """Test capture_envelope from background threads uses run_coroutine_threadsafe""" + caplog.set_level(logging.DEBUG) + experiments = {"transport_async": True} + client = make_client(_experiments=experiments) + assert isinstance(client.transport, AsyncHttpTransport) + sentry_sdk.get_global_scope().set_client(client) + captured_from_thread = [] + exception_from_thread = [] + + def background_thread_work(): + try: + # This should use run_coroutine_threadsafe path + capture_message("from background thread") + captured_from_thread.append(True) + except Exception as e: + exception_from_thread.append(e) + + thread = threading.Thread(target=background_thread_work) + thread.start() + thread.join() + assert not exception_from_thread + assert captured_from_thread + await client.close(timeout=2.0) + assert capturing_server.captured + + +@pytest.mark.asyncio +@pytest.mark.skipif(not PY38, reason="Async transport requires Python 3.8+") +async def test_async_transport_event_loop_closed_scenario( + capturing_server, make_client, caplog +): + """Test behavior when trying to capture after event loop context ends""" + caplog.set_level(logging.DEBUG) + experiments = {"transport_async": True} + client = make_client(_experiments=experiments) + sentry_sdk.get_global_scope().set_client(client) + original_loop = client.transport.loop + + with mock.patch("asyncio.get_running_loop", side_effect=RuntimeError("no loop")): + with mock.patch.object(client.transport.loop, "is_running", return_value=False): + with mock.patch("sentry_sdk.transport.logger") as mock_logger: + # This should trigger the "no_async_context" path + capture_message("after loop closed") + + mock_logger.warning.assert_called_with( + "Async Transport is not running in an event loop." + ) + + client.transport.loop = original_loop + await client.close(timeout=2.0) + + +@pytest.mark.asyncio +@pytest.mark.skipif(not PY38, reason="Async transport requires Python 3.8+") +async def test_async_transport_concurrent_requests( + capturing_server, make_client, caplog +): + """Test multiple simultaneous envelope submissions""" + caplog.set_level(logging.DEBUG) + experiments = {"transport_async": True} + client = make_client(_experiments=experiments) + assert isinstance(client.transport, AsyncHttpTransport) + sentry_sdk.get_global_scope().set_client(client) + + num_messages = 15 + + async def send_message(i): + capture_message(f"concurrent message {i}") + + tasks = [send_message(i) for i in range(num_messages)] + await asyncio.gather(*tasks) + transport = client.transport + await client.close(timeout=2.0) + assert len(transport.background_tasks) == 0 + assert len(capturing_server.captured) == num_messages + + +@pytest.mark.asyncio +@pytest.mark.skipif(not PY38, reason="Async transport requires Python 3.8+") +async def test_async_transport_rate_limiting_with_concurrency( + capturing_server, make_client, request +): + """Test async transport rate limiting with concurrent requests""" + experiments = {"transport_async": True} + client = make_client(_experiments=experiments) + + assert isinstance(client.transport, AsyncHttpTransport) + sentry_sdk.get_global_scope().set_client(client) + request.addfinalizer(lambda: sentry_sdk.get_global_scope().set_client(None)) + capturing_server.respond_with( + code=429, headers={"X-Sentry-Rate-Limits": "60:error:organization"} + ) + + # Send one request first to trigger rate limiting + capture_message("initial message") + await asyncio.sleep(0.1) # Wait for request to execute + assert client.transport._check_disabled("error") is True + capturing_server.clear_captured() + + async def send_message(i): + capture_message(f"message {i}") + await asyncio.sleep(0.01) + + await asyncio.gather(*[send_message(i) for i in range(5)]) + await asyncio.sleep(0.1) + # New request should be dropped due to rate limiting + assert len(capturing_server.captured) == 0 + await client.close(timeout=2.0) From 4c1e99bd6595c3c179a2df9e93b3dd47aaa31ca0 Mon Sep 17 00:00:00 2001 From: srothh Date: Tue, 29 Jul 2025 12:13:49 +0200 Subject: [PATCH 061/102] feat(test): Add flush to async transport test Add flush to async transport test matrix. Additionally, explicitly shut down the worker task upon flushing in the tests, to avoid event loop errors. GH-4601 --- tests/test_transport.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/test_transport.py b/tests/test_transport.py index 7f4a2e2159..eaa49e0c15 100644 --- a/tests/test_transport.py +++ b/tests/test_transport.py @@ -151,7 +151,7 @@ def test_transport_works( @pytest.mark.asyncio @pytest.mark.parametrize("debug", (True, False)) -@pytest.mark.parametrize("client_flush_method", ["close"]) +@pytest.mark.parametrize("client_flush_method", ["close", "flush"]) @pytest.mark.parametrize("use_pickle", (True, False)) @pytest.mark.parametrize("compression_level", (0, 9, None)) @pytest.mark.parametrize("compression_algo", ("gzip", "br", "", None)) @@ -203,6 +203,8 @@ async def test_transport_works_async( if client_flush_method == "close": await client.close(timeout=2.0) + if client_flush_method == "flush": + await client.flush(timeout=2.0) out, err = capsys.readouterr() assert not err and not out @@ -222,8 +224,11 @@ async def test_transport_works_async( ) assert capturing_server.captured[0].compressed == should_compress - + # After flush, the worker task is still running, but the end of the test will shut down the event loop + # Therefore, we need to explicitly close the client to clean up the worker task assert any("Sending envelope" in record.msg for record in caplog.records) == debug + if client_flush_method == "flush": + await client.close(timeout=2.0) @pytest.mark.parametrize( From 6df70371ad9fbd3dc335f77593beed9eee633d42 Mon Sep 17 00:00:00 2001 From: srothh Date: Tue, 29 Jul 2025 13:34:19 +0200 Subject: [PATCH 062/102] ref(client): Adapt client for blocking kill in async transport Add blocking kill functionality to the client in async transport. GH-4601 --- sentry_sdk/client.py | 22 ++++++++++++++++++---- sentry_sdk/transport.py | 2 +- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index 14d05addaa..9be1561002 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -915,6 +915,14 @@ def get_integration( return self.integrations.get(integration_name) + def _close_transport(self) -> Optional[asyncio.Task[None]]: + """Close transport and return cleanup task if any.""" + if self.transport is not None: + cleanup_task = self.transport.kill() + self.transport = None + return cleanup_task + return None + def _close_components(self) -> None: """Kill all client components in the correct order.""" self.session_flusher.kill() @@ -922,9 +930,13 @@ def _close_components(self) -> None: self.log_batcher.kill() if self.monitor: self.monitor.kill() - if self.transport is not None: - self.transport.kill() - self.transport = None + + async def _close_components_async(self) -> None: + """Async version of _close_components that properly awaits transport cleanup.""" + self._close_components() + cleanup_task = self._close_transport() + if cleanup_task is not None: + await cleanup_task def close( # type: ignore[override] self, @@ -941,7 +953,7 @@ async def _flush_and_close( ) -> None: await self._flush_async(timeout=timeout, callback=callback) - self._close_components() + await self._close_components_async() if self.transport is not None: if isinstance(self.transport, AsyncHttpTransport): @@ -959,6 +971,8 @@ async def _flush_and_close( else: self.flush(timeout=timeout, callback=callback) self._close_components() + self._close_transport() + return None def flush( # type: ignore[override] diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 16e6e49401..e6afd7d0a9 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -790,7 +790,7 @@ def kill(self: Self) -> Optional[asyncio.Task[None]]: # type: ignore self.background_tasks.clear() try: # Return the pool cleanup task so caller can await it if needed - return self._loop.create_task(self._pool.aclose()) # type: ignore + return self.loop.create_task(self._pool.aclose()) # type: ignore except RuntimeError: logger.warning("Event loop not running, aborting kill.") return None From 25c04fc5eecc557eaf34432916de133f8719154b Mon Sep 17 00:00:00 2001 From: srothh Date: Tue, 29 Jul 2025 13:42:24 +0200 Subject: [PATCH 063/102] ref(client): Fix transport shutdown if loop is not running GH-4601 --- sentry_sdk/client.py | 4 +++- sentry_sdk/transport.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index 9be1561002..a92ad263e0 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -918,7 +918,7 @@ def get_integration( def _close_transport(self) -> Optional[asyncio.Task[None]]: """Close transport and return cleanup task if any.""" if self.transport is not None: - cleanup_task = self.transport.kill() + cleanup_task = self.transport.kill() # type: ignore self.transport = None return cleanup_task return None @@ -963,7 +963,9 @@ async def _flush_and_close( _flush_and_close(timeout, callback) ) except RuntimeError: + # Shutdown the components anyway self._close_components() + self._close_transport() logger.warning("Event loop not running, aborting close.") return None # Enforce flush before shutdown diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index e6afd7d0a9..acc2bf15f8 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -781,7 +781,7 @@ def _make_pool( return httpcore.AsyncConnectionPool(**opts) - def kill(self: Self) -> Optional[asyncio.Task[None]]: # type: ignore + def kill(self: Self) -> Optional[asyncio.Task[None]]: # type: ignore[override] logger.debug("Killing HTTP transport") self._worker.kill() From e8d889c3e142502165a3cfbb7497ffac0b1779c0 Mon Sep 17 00:00:00 2001 From: srothh Date: Tue, 29 Jul 2025 16:12:50 +0200 Subject: [PATCH 064/102] ref(test): Add config tests for SSL/proxy for async GH-4601 --- tests/test_client.py | 317 +++++++++++++++++++++++++++++++++++++++- tests/test_transport.py | 21 +++ 2 files changed, 337 insertions(+), 1 deletion(-) diff --git a/tests/test_client.py b/tests/test_client.py index 8290c8e575..049414b785 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -23,9 +23,10 @@ from sentry_sdk.spotlight import DEFAULT_SPOTLIGHT_URL from sentry_sdk.utils import capture_internal_exception from sentry_sdk.integrations.executing import ExecutingIntegration -from sentry_sdk.transport import Transport +from sentry_sdk.transport import Transport, AsyncHttpTransport from sentry_sdk.serializer import MAX_DATABAG_BREADTH from sentry_sdk.consts import DEFAULT_MAX_BREADCRUMBS, DEFAULT_MAX_VALUE_LENGTH +from sentry_sdk._compat import PY38 from typing import TYPE_CHECKING @@ -1498,3 +1499,317 @@ def test_keep_alive(env_value, arg_value, expected_value): ) assert transport_cls.options["keep_alive"] is expected_value + + +@pytest.mark.parametrize( + "testcase", + [ + { + "dsn": "http://foo@sentry.io/123", + "env_http_proxy": None, + "env_https_proxy": None, + "arg_http_proxy": "http://localhost/123", + "arg_https_proxy": None, + "expected_proxy_scheme": "http", + }, + { + "dsn": "https://foo@sentry.io/123", + "env_http_proxy": None, + "env_https_proxy": None, + "arg_http_proxy": "https://localhost/123", + "arg_https_proxy": None, + "expected_proxy_scheme": "https", + }, + { + "dsn": "http://foo@sentry.io/123", + "env_http_proxy": None, + "env_https_proxy": None, + "arg_http_proxy": "http://localhost/123", + "arg_https_proxy": "https://localhost/123", + "expected_proxy_scheme": "http", + }, + { + "dsn": "https://foo@sentry.io/123", + "env_http_proxy": None, + "env_https_proxy": None, + "arg_http_proxy": "http://localhost/123", + "arg_https_proxy": "https://localhost/123", + "expected_proxy_scheme": "https", + }, + { + "dsn": "https://foo@sentry.io/123", + "env_http_proxy": None, + "env_https_proxy": None, + "arg_http_proxy": "http://localhost/123", + "arg_https_proxy": None, + "expected_proxy_scheme": "http", + }, + { + "dsn": "http://foo@sentry.io/123", + "env_http_proxy": None, + "env_https_proxy": None, + "arg_http_proxy": None, + "arg_https_proxy": None, + "expected_proxy_scheme": None, + }, + { + "dsn": "http://foo@sentry.io/123", + "env_http_proxy": "http://localhost/123", + "env_https_proxy": None, + "arg_http_proxy": None, + "arg_https_proxy": None, + "expected_proxy_scheme": "http", + }, + { + "dsn": "https://foo@sentry.io/123", + "env_http_proxy": None, + "env_https_proxy": "https://localhost/123", + "arg_http_proxy": None, + "arg_https_proxy": None, + "expected_proxy_scheme": "https", + }, + { + "dsn": "https://foo@sentry.io/123", + "env_http_proxy": "http://localhost/123", + "env_https_proxy": None, + "arg_http_proxy": None, + "arg_https_proxy": None, + "expected_proxy_scheme": "http", + }, + { + "dsn": "https://foo@sentry.io/123", + "env_http_proxy": "http://localhost/123", + "env_https_proxy": "https://localhost/123", + "arg_http_proxy": "", + "arg_https_proxy": "", + "expected_proxy_scheme": None, + }, + { + "dsn": "https://foo@sentry.io/123", + "env_http_proxy": "http://localhost/123", + "env_https_proxy": "https://localhost/123", + "arg_http_proxy": None, + "arg_https_proxy": None, + "expected_proxy_scheme": "https", + }, + { + "dsn": "https://foo@sentry.io/123", + "env_http_proxy": "http://localhost/123", + "env_https_proxy": None, + "arg_http_proxy": None, + "arg_https_proxy": None, + "expected_proxy_scheme": "http", + }, + { + "dsn": "https://foo@sentry.io/123", + "env_http_proxy": "http://localhost/123", + "env_https_proxy": "https://localhost/123", + "arg_http_proxy": None, + "arg_https_proxy": "", + "expected_proxy_scheme": "http", + }, + { + "dsn": "https://foo@sentry.io/123", + "env_http_proxy": "http://localhost/123", + "env_https_proxy": "https://localhost/123", + "arg_http_proxy": "", + "arg_https_proxy": None, + "expected_proxy_scheme": "https", + }, + { + "dsn": "https://foo@sentry.io/123", + "env_http_proxy": None, + "env_https_proxy": "https://localhost/123", + "arg_http_proxy": None, + "arg_https_proxy": "", + "expected_proxy_scheme": None, + }, + { + "dsn": "http://foo@sentry.io/123", + "env_http_proxy": "http://localhost/123", + "env_https_proxy": "https://localhost/123", + "arg_http_proxy": None, + "arg_https_proxy": None, + "expected_proxy_scheme": "http", + }, + # NO_PROXY testcases + { + "dsn": "http://foo@sentry.io/123", + "env_http_proxy": "http://localhost/123", + "env_https_proxy": None, + "env_no_proxy": "sentry.io,example.com", + "arg_http_proxy": None, + "arg_https_proxy": None, + "expected_proxy_scheme": None, + }, + { + "dsn": "https://foo@sentry.io/123", + "env_http_proxy": None, + "env_https_proxy": "https://localhost/123", + "env_no_proxy": "example.com,sentry.io", + "arg_http_proxy": None, + "arg_https_proxy": None, + "expected_proxy_scheme": None, + }, + { + "dsn": "http://foo@sentry.io/123", + "env_http_proxy": None, + "env_https_proxy": None, + "env_no_proxy": "sentry.io,example.com", + "arg_http_proxy": "http://localhost/123", + "arg_https_proxy": None, + "expected_proxy_scheme": "http", + }, + { + "dsn": "https://foo@sentry.io/123", + "env_http_proxy": None, + "env_https_proxy": None, + "env_no_proxy": "sentry.io,example.com", + "arg_http_proxy": None, + "arg_https_proxy": "https://localhost/123", + "expected_proxy_scheme": "https", + }, + { + "dsn": "https://foo@sentry.io/123", + "env_http_proxy": None, + "env_https_proxy": None, + "env_no_proxy": "sentry.io,example.com", + "arg_http_proxy": None, + "arg_https_proxy": "https://localhost/123", + "expected_proxy_scheme": "https", + "arg_proxy_headers": {"Test-Header": "foo-bar"}, + }, + ], +) +@pytest.mark.asyncio +@pytest.mark.skipif(not PY38, reason="Async transport requires Python 3.8+") +async def test_async_proxy(monkeypatch, testcase): + # These are just the same tests as the sync ones, but they need to be run in an event loop + # and respect the shutdown behavior of the async transport + if testcase["env_http_proxy"] is not None: + monkeypatch.setenv("HTTP_PROXY", testcase["env_http_proxy"]) + if testcase["env_https_proxy"] is not None: + monkeypatch.setenv("HTTPS_PROXY", testcase["env_https_proxy"]) + if testcase.get("env_no_proxy") is not None: + monkeypatch.setenv("NO_PROXY", testcase["env_no_proxy"]) + + kwargs = {"_experiments": {"transport_async": True}} + + if testcase["arg_http_proxy"] is not None: + kwargs["http_proxy"] = testcase["arg_http_proxy"] + if testcase["arg_https_proxy"] is not None: + kwargs["https_proxy"] = testcase["arg_https_proxy"] + if testcase.get("arg_proxy_headers") is not None: + kwargs["proxy_headers"] = testcase["arg_proxy_headers"] + + client = Client(testcase["dsn"], **kwargs) + assert isinstance(client.transport, AsyncHttpTransport) + + proxy = getattr( + client.transport._pool, + "proxy", + getattr(client.transport._pool, "_proxy_url", None), + ) + if testcase["expected_proxy_scheme"] is None: + assert proxy is None + else: + scheme = ( + proxy.scheme.decode("ascii") + if isinstance(proxy.scheme, bytes) + else proxy.scheme + ) + assert scheme == testcase["expected_proxy_scheme"] + + if testcase.get("arg_proxy_headers") is not None: + proxy_headers = dict( + (k.decode("ascii"), v.decode("ascii")) + for k, v in client.transport._pool._proxy_headers + ) + assert proxy_headers == testcase["arg_proxy_headers"] + + await client.close() + + +@pytest.mark.parametrize( + "testcase", + [ + { + "dsn": "https://foo@sentry.io/123", + "arg_http_proxy": "http://localhost/123", + "arg_https_proxy": None, + "should_be_socks_proxy": False, + }, + { + "dsn": "https://foo@sentry.io/123", + "arg_http_proxy": "socks4a://localhost/123", + "arg_https_proxy": None, + "should_be_socks_proxy": True, + }, + { + "dsn": "https://foo@sentry.io/123", + "arg_http_proxy": "socks4://localhost/123", + "arg_https_proxy": None, + "should_be_socks_proxy": True, + }, + { + "dsn": "https://foo@sentry.io/123", + "arg_http_proxy": "socks5h://localhost/123", + "arg_https_proxy": None, + "should_be_socks_proxy": True, + }, + { + "dsn": "https://foo@sentry.io/123", + "arg_http_proxy": "socks5://localhost/123", + "arg_https_proxy": None, + "should_be_socks_proxy": True, + }, + { + "dsn": "https://foo@sentry.io/123", + "arg_http_proxy": None, + "arg_https_proxy": "socks4a://localhost/123", + "should_be_socks_proxy": True, + }, + { + "dsn": "https://foo@sentry.io/123", + "arg_http_proxy": None, + "arg_https_proxy": "socks4://localhost/123", + "should_be_socks_proxy": True, + }, + { + "dsn": "https://foo@sentry.io/123", + "arg_http_proxy": None, + "arg_https_proxy": "socks5h://localhost/123", + "should_be_socks_proxy": True, + }, + { + "dsn": "https://foo@sentry.io/123", + "arg_http_proxy": None, + "arg_https_proxy": "socks5://localhost/123", + "should_be_socks_proxy": True, + }, + ], +) +@pytest.mark.asyncio +@pytest.mark.skipif(not PY38, reason="Async transport requires Python 3.8+") +async def test_async_socks_proxy(testcase): + # These are just the same tests as the sync ones, but they need to be run in an event loop + # and respect the shutdown behavior of the async transport + + kwargs = {"_experiments": {"transport_async": True}} + + if testcase["arg_http_proxy"] is not None: + kwargs["http_proxy"] = testcase["arg_http_proxy"] + if testcase["arg_https_proxy"] is not None: + kwargs["https_proxy"] = testcase["arg_https_proxy"] + + client = Client(testcase["dsn"], **kwargs) + assert isinstance(client.transport, AsyncHttpTransport) + + assert ("socks" in str(type(client.transport._pool)).lower()) == testcase[ + "should_be_socks_proxy" + ], ( + f"Expected {kwargs} to result in SOCKS == {testcase['should_be_socks_proxy']}" + f"but got {str(type(client.transport._pool))}" + ) + + await client.close() diff --git a/tests/test_transport.py b/tests/test_transport.py index eaa49e0c15..f5a53e348d 100644 --- a/tests/test_transport.py +++ b/tests/test_transport.py @@ -876,3 +876,24 @@ async def send_message(i): # New request should be dropped due to rate limiting assert len(capturing_server.captured) == 0 await client.close(timeout=2.0) + + +@pytest.mark.asyncio +@pytest.mark.skipif(not PY38, reason="Async transport requires Python 3.8+") +async def test_async_two_way_ssl_authentication(): + current_dir = os.path.dirname(__file__) + cert_file = f"{current_dir}/test.pem" + key_file = f"{current_dir}/test.key" + + client = Client( + "https://foo@sentry.io/123", + cert_file=cert_file, + key_file=key_file, + _experiments={"transport_async": True}, + ) + assert isinstance(client.transport, AsyncHttpTransport) + + options = client.transport._get_pool_options() + assert options["ssl_context"] is not None + + await client.close() From a64446577dd598d66c1bdce5485c666ad13bc425 Mon Sep 17 00:00:00 2001 From: srothh Date: Mon, 21 Jul 2025 14:06:50 +0200 Subject: [PATCH 065/102] feat(transport): Add async transport class Add an implementation of Transport to work with the async background worker and HTTPCore async. GH-4582 --- sentry_sdk/transport.py | 182 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 182 insertions(+) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index f8328cac12..44a7c6eacb 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -6,6 +6,7 @@ import socket import ssl import time +import asyncio from datetime import datetime, timedelta, timezone from collections import defaultdict from urllib.request import getproxies @@ -571,6 +572,187 @@ def flush( self._worker.flush(timeout, callback) +class AsyncHttpTransport(HttpTransportCore): + def __init__(self: Self, options: Dict[str, Any]) -> None: + super().__init__(options) + # Requires event loop at init time + self._loop = asyncio.get_running_loop() + self.background_tasks = set() + + async def _send_envelope(self: Self, envelope: Envelope) -> None: + _prepared_envelope = self._prepare_envelope(envelope) + if _prepared_envelope is None: + return None + envelope, body, headers = _prepared_envelope + await self._send_request( + body.getvalue(), + headers=headers, + endpoint_type=EndpointType.ENVELOPE, + envelope=envelope, + ) + return None + + async def _send_request( + self: Self, + body: bytes, + headers: Dict[str, str], + endpoint_type: EndpointType, + envelope: Optional[Envelope], + ) -> None: + self._update_headers(headers) + try: + response = await self._request( + "POST", + endpoint_type, + body, + headers, + ) + except Exception: + self._handle_request_error(envelope=envelope, loss_reason="network") + raise + try: + self._handle_response(response=response, envelope=envelope) + finally: + response.close() + + async def _request( + self: Self, + method: str, + endpoint_type: EndpointType, + body: Any, + headers: Mapping[str, str], + ) -> httpcore.Response: + return await self._pool.request( + method, + self._auth.get_api_url(endpoint_type), + content=body, + headers=headers, # type: ignore + ) + + def _flush_client_reports(self: Self, force: bool = False) -> None: + client_report = self._fetch_pending_client_report(force=force, interval=60) + if client_report is not None: + self.capture_envelope(Envelope(items=[client_report])) + + async def _capture_envelope(self: Self, envelope: Envelope) -> None: + async def send_envelope_wrapper() -> None: + with capture_internal_exceptions(): + await self._send_envelope(envelope) + self._flush_client_reports() + + if not self._worker.submit(send_envelope_wrapper): + self.on_dropped_event("full_queue") + for item in envelope.items: + self.record_lost_event("queue_overflow", item=item) + + def capture_envelope(self: Self, envelope: Envelope) -> None: + # Synchronous entry point + if asyncio.get_running_loop() is not None: + # We are on the main thread running the event loop + task = asyncio.create_task(self._capture_envelope(envelope)) + self.background_tasks.add(task) + task.add_done_callback(self.background_tasks.discard) + else: + # We are in a background thread, not running an event loop, + # have to launch the task on the loop in a threadsafe way. + asyncio.run_coroutine_threadsafe( + self._capture_envelope(envelope), + self._loop, + ) + + async def flush_async( + self: Self, + timeout: float, + callback: Optional[Callable[[int, float], None]] = None, + ) -> None: + logger.debug("Flushing HTTP transport") + + if timeout > 0: + self._worker.submit(lambda: self._flush_client_reports(force=True)) + await self._worker.flush_async(timeout, callback) # type: ignore + + def _get_pool_options(self: Self) -> Dict[str, Any]: + options: Dict[str, Any] = { + "http2": False, # no HTTP2 for now + "retries": 3, + } + + socket_options = ( + self.options["socket_options"] + if self.options["socket_options"] is not None + else [] + ) + + used_options = {(o[0], o[1]) for o in socket_options} + for default_option in KEEP_ALIVE_SOCKET_OPTIONS: + if (default_option[0], default_option[1]) not in used_options: + socket_options.append(default_option) + + options["socket_options"] = socket_options + + ssl_context = ssl.create_default_context() + ssl_context.load_verify_locations( + self.options["ca_certs"] # User-provided bundle from the SDK init + or os.environ.get("SSL_CERT_FILE") + or os.environ.get("REQUESTS_CA_BUNDLE") + or certifi.where() + ) + cert_file = self.options["cert_file"] or os.environ.get("CLIENT_CERT_FILE") + key_file = self.options["key_file"] or os.environ.get("CLIENT_KEY_FILE") + if cert_file is not None: + ssl_context.load_cert_chain(cert_file, key_file) + + options["ssl_context"] = ssl_context + + return options + + def _make_pool( + self: Self, + ) -> Union[ + httpcore.AsyncSOCKSProxy, httpcore.AsyncHTTPProxy, httpcore.AsyncConnectionPool + ]: + if self.parsed_dsn is None: + raise ValueError("Cannot create HTTP-based transport without valid DSN") + proxy = None + no_proxy = self._in_no_proxy(self.parsed_dsn) + + # try HTTPS first + https_proxy = self.options["https_proxy"] + if self.parsed_dsn.scheme == "https" and (https_proxy != ""): + proxy = https_proxy or (not no_proxy and getproxies().get("https")) + + # maybe fallback to HTTP proxy + http_proxy = self.options["http_proxy"] + if not proxy and (http_proxy != ""): + proxy = http_proxy or (not no_proxy and getproxies().get("http")) + + opts = self._get_pool_options() + + if proxy: + proxy_headers = self.options["proxy_headers"] + if proxy_headers: + opts["proxy_headers"] = proxy_headers + + if proxy.startswith("socks"): + try: + if "socket_options" in opts: + socket_options = opts.pop("socket_options") + if socket_options: + logger.warning( + "You have defined socket_options but using a SOCKS proxy which doesn't support these. We'll ignore socket_options." + ) + return httpcore.AsyncSOCKSProxy(proxy_url=proxy, **opts) + except RuntimeError: + logger.warning( + "You have configured a SOCKS proxy (%s) but support for SOCKS proxies is not installed. Disabling proxy support.", + proxy, + ) + else: + return httpcore.AsyncHTTPProxy(proxy_url=proxy, **opts) + + return httpcore.AsyncConnectionPool(**opts) + + class HttpTransport(BaseHttpTransport): if TYPE_CHECKING: _pool: Union[PoolManager, ProxyManager] From c935e9e21e585a0fe7e1beb64a93163a8ace9990 Mon Sep 17 00:00:00 2001 From: srothh Date: Tue, 22 Jul 2025 10:31:45 +0200 Subject: [PATCH 066/102] ref(transport): Fix event loop handling in async transport Async Transport now properly checks for the presence of the event loop in capture_envelop, and drops items in case the event loop is no longer running for some reason. GH-4582 --- sentry_sdk/transport.py | 39 +++++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 14 deletions(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 44a7c6eacb..fd160f347a 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -29,7 +29,7 @@ from sentry_sdk.consts import EndpointType from sentry_sdk.utils import Dsn, logger, capture_internal_exceptions -from sentry_sdk.worker import BackgroundWorker, Worker +from sentry_sdk.worker import BackgroundWorker, Worker, AsyncWorker from sentry_sdk.envelope import Envelope, Item, PayloadRef from typing import TYPE_CHECKING @@ -225,9 +225,10 @@ def __init__(self: Self, options: Dict[str, Any]) -> None: elif self._compression_algo == "br": self._compression_level = 4 - def _create_worker(self: Self, options: Dict[str, Any]) -> Worker: - # For now, we only support the threaded sync background worker. - return BackgroundWorker(queue_size=options["transport_queue_size"]) + def _create_worker(self, options: dict[str, Any]) -> Worker: + async_enabled = options.get("_experiments", {}).get("transport_async", False) + worker_cls = AsyncWorker if async_enabled else BackgroundWorker + return worker_cls(queue_size=options["transport_queue_size"]) def record_lost_event( self: Self, @@ -647,18 +648,26 @@ async def send_envelope_wrapper() -> None: def capture_envelope(self: Self, envelope: Envelope) -> None: # Synchronous entry point - if asyncio.get_running_loop() is not None: + try: + asyncio.get_running_loop() # We are on the main thread running the event loop task = asyncio.create_task(self._capture_envelope(envelope)) self.background_tasks.add(task) task.add_done_callback(self.background_tasks.discard) - else: + except RuntimeError: # We are in a background thread, not running an event loop, # have to launch the task on the loop in a threadsafe way. - asyncio.run_coroutine_threadsafe( - self._capture_envelope(envelope), - self._loop, - ) + if self._loop and self._loop.is_running(): + asyncio.run_coroutine_threadsafe( + self._capture_envelope(envelope), + self._loop, + ) + else: + # The event loop is no longer running + logger.warning("Async Transport is not running in an event loop.") + self.on_dropped_event("no_async_context") + for item in envelope.items: + self.record_lost_event("no_async_context", item=item) async def flush_async( self: Self, @@ -998,11 +1007,13 @@ def make_transport(options: Dict[str, Any]) -> Optional[Transport]: ref_transport = options["transport"] use_http2_transport = options.get("_experiments", {}).get("transport_http2", False) - + use_async_transport = options.get("_experiments", {}).get("transport_async", False) # By default, we use the http transport class - transport_cls: Type[Transport] = ( - Http2Transport if use_http2_transport else HttpTransport - ) + if use_async_transport and asyncio.get_running_loop() is not None: + transport_cls: Type[Transport] = AsyncHttpTransport + else: + use_http2 = use_http2_transport + transport_cls = Http2Transport if use_http2 else HttpTransport if isinstance(ref_transport, Transport): return ref_transport From b90daf40bc5549cc0bd982cea6515b3df1a30557 Mon Sep 17 00:00:00 2001 From: srothh Date: Tue, 22 Jul 2025 11:10:38 +0200 Subject: [PATCH 067/102] feat(transport): Add kill method for async transport Implement a kill method that properly shuts down the async transport. The httpcore async connection pool needs to be explicitly shutdown at the end of its usage. GH-4582 --- sentry_sdk/transport.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index fd160f347a..3a2c0cb2df 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -761,6 +761,16 @@ def _make_pool( return httpcore.AsyncConnectionPool(**opts) + def kill(self: Self) -> None: + + logger.debug("Killing HTTP transport") + self._worker.kill() + for task in self.background_tasks: + task.cancel() + self.background_tasks.clear() + + self._loop.create_task(self._pool.aclose()) # type: ignore + class HttpTransport(BaseHttpTransport): if TYPE_CHECKING: From e1d7cdb8ced87e36a03e16bd251353cad241b7fd Mon Sep 17 00:00:00 2001 From: srothh Date: Wed, 23 Jul 2025 15:50:06 +0200 Subject: [PATCH 068/102] ref(transport): Fix type errors in async transport Fix type errors resulting from async override and missing type definition in the async transport. GH-4582 --- sentry_sdk/transport.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 3a2c0cb2df..71d563423d 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -578,7 +578,7 @@ def __init__(self: Self, options: Dict[str, Any]) -> None: super().__init__(options) # Requires event loop at init time self._loop = asyncio.get_running_loop() - self.background_tasks = set() + self.background_tasks: set[asyncio.Task[None]] = set() async def _send_envelope(self: Self, envelope: Envelope) -> None: _prepared_envelope = self._prepare_envelope(envelope) @@ -616,7 +616,7 @@ async def _send_request( finally: response.close() - async def _request( + async def _request( # type: ignore[override] self: Self, method: str, endpoint_type: EndpointType, From 90346a56dca8368a7809a9df20f7825e12a80453 Mon Sep 17 00:00:00 2001 From: srothh Date: Thu, 24 Jul 2025 09:44:58 +0200 Subject: [PATCH 069/102] Add silent failing to kill on event loop errors Add a try/catch to ensure silent fail on kill in case the event loop shuts down. GH-4582 --- sentry_sdk/transport.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 71d563423d..7743910c79 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -768,8 +768,10 @@ def kill(self: Self) -> None: for task in self.background_tasks: task.cancel() self.background_tasks.clear() - - self._loop.create_task(self._pool.aclose()) # type: ignore + try: + self._loop.create_task(self._pool.aclose()) # type: ignore + except RuntimeError: + logger.warning("Event loop not running, aborting kill.") class HttpTransport(BaseHttpTransport): From 47416f40e44db552b193247e3fe515b537e27e6f Mon Sep 17 00:00:00 2001 From: srothh Date: Thu, 24 Jul 2025 11:27:45 +0200 Subject: [PATCH 070/102] ref(transport): Fix event loop check in make_transport Fix the event loop check in make_transport so that it does not throw a runtime error but rather falls back correctly. GH-4582 --- sentry_sdk/transport.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 7743910c79..541f71ba53 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -1021,11 +1021,14 @@ def make_transport(options: Dict[str, Any]) -> Optional[Transport]: use_http2_transport = options.get("_experiments", {}).get("transport_http2", False) use_async_transport = options.get("_experiments", {}).get("transport_async", False) # By default, we use the http transport class - if use_async_transport and asyncio.get_running_loop() is not None: - transport_cls: Type[Transport] = AsyncHttpTransport - else: - use_http2 = use_http2_transport - transport_cls = Http2Transport if use_http2 else HttpTransport + if use_async_transport: + try: + asyncio.get_running_loop() + transport_cls: Type[Transport] = AsyncHttpTransport + except RuntimeError: + # No event loop running, fall back to sync transport + logger.warning("No event loop running, falling back to sync transport.") + transport_cls = Http2Transport if use_http2_transport else HttpTransport if isinstance(ref_transport, Transport): return ref_transport From 73cdc6d8d69452610d0838719f537a3a9e7ec56a Mon Sep 17 00:00:00 2001 From: srothh Date: Thu, 24 Jul 2025 11:45:13 +0200 Subject: [PATCH 071/102] ref(transport): Add missing transport instantiation in non-async context GH-4582 --- sentry_sdk/transport.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 541f71ba53..832a5d5610 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -1029,6 +1029,8 @@ def make_transport(options: Dict[str, Any]) -> Optional[Transport]: # No event loop running, fall back to sync transport logger.warning("No event loop running, falling back to sync transport.") transport_cls = Http2Transport if use_http2_transport else HttpTransport + else: + transport_cls = Http2Transport if use_http2_transport else HttpTransport if isinstance(ref_transport, Transport): return ref_transport From 1ae870811269834c0fa0d783a4e255a968cd4a2e Mon Sep 17 00:00:00 2001 From: srothh Date: Fri, 25 Jul 2025 11:21:37 +0200 Subject: [PATCH 072/102] ref(transport): Fix httpcore async specific request handling GH-4582 --- sentry_sdk/transport.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 832a5d5610..aff2132941 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -580,6 +580,16 @@ def __init__(self: Self, options: Dict[str, Any]) -> None: self._loop = asyncio.get_running_loop() self.background_tasks: set[asyncio.Task[None]] = set() + def _get_header_value(self: Self, response: Any, header: str) -> Optional[str]: + return next( + ( + val.decode("ascii") + for key, val in response.headers + if key.decode("ascii").lower() == header + ), + None, + ) + async def _send_envelope(self: Self, envelope: Envelope) -> None: _prepared_envelope = self._prepare_envelope(envelope) if _prepared_envelope is None: @@ -614,7 +624,7 @@ async def _send_request( try: self._handle_response(response=response, envelope=envelope) finally: - response.close() + await response.aclose() async def _request( # type: ignore[override] self: Self, From 6f186570428f42ee0e4c3ae46aa1b790bf431724 Mon Sep 17 00:00:00 2001 From: srothh Date: Fri, 25 Jul 2025 12:49:26 +0200 Subject: [PATCH 073/102] ref(transport): Add gc safety to async kill GH-4582 --- sentry_sdk/transport.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index aff2132941..6644a7c4ec 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -779,7 +779,9 @@ def kill(self: Self) -> None: task.cancel() self.background_tasks.clear() try: - self._loop.create_task(self._pool.aclose()) # type: ignore + task = self._loop.create_task(self._pool.aclose()) # type: ignore + self.background_tasks.add(task) + task.add_done_callback(lambda t: self.background_tasks.discard(t)) except RuntimeError: logger.warning("Event loop not running, aborting kill.") From 87a9b2fdde5e5656da95c9bfb8bda863083daf08 Mon Sep 17 00:00:00 2001 From: srothh Date: Fri, 25 Jul 2025 13:06:14 +0200 Subject: [PATCH 074/102] ref(transport): Add missing httpcore extensions GH-4582 --- sentry_sdk/transport.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 6644a7c4ec..7793bc020d 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -638,6 +638,14 @@ async def _request( # type: ignore[override] self._auth.get_api_url(endpoint_type), content=body, headers=headers, # type: ignore + extensions={ + "timeout": { + "pool": self.TIMEOUT, + "connect": self.TIMEOUT, + "write": self.TIMEOUT, + "read": self.TIMEOUT, + } + }, ) def _flush_client_reports(self: Self, force: bool = False) -> None: From 4fd7fa0c67cead71bad8468a2b2008100460aacc Mon Sep 17 00:00:00 2001 From: srothh Date: Mon, 28 Jul 2025 10:17:17 +0200 Subject: [PATCH 075/102] fix(transport): Fix fallback sync transport creating async worker GH-4582 --- sentry_sdk/transport.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 7793bc020d..0986613cb9 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -227,7 +227,11 @@ def __init__(self: Self, options: Dict[str, Any]) -> None: def _create_worker(self, options: dict[str, Any]) -> Worker: async_enabled = options.get("_experiments", {}).get("transport_async", False) - worker_cls = AsyncWorker if async_enabled else BackgroundWorker + try: + asyncio.get_running_loop() + worker_cls = AsyncWorker if async_enabled else BackgroundWorker + except RuntimeError: + worker_cls = BackgroundWorker return worker_cls(queue_size=options["transport_queue_size"]) def record_lost_event( From 69734cd469fff833afaa5287ba2c2c747de9a9bb Mon Sep 17 00:00:00 2001 From: srothh Date: Tue, 29 Jul 2025 13:12:12 +0200 Subject: [PATCH 076/102] ref(transport): Make kill optionally return a task for async Make kill optionally return a task for async transport. This allows for a blocking kill operation if the caller is in an async context. GH-4582 --- sentry_sdk/transport.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 0986613cb9..36a779a210 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -783,7 +783,7 @@ def _make_pool( return httpcore.AsyncConnectionPool(**opts) - def kill(self: Self) -> None: + def kill(self: Self) -> Optional[asyncio.Task[None]]: # type: ignore logger.debug("Killing HTTP transport") self._worker.kill() @@ -791,11 +791,11 @@ def kill(self: Self) -> None: task.cancel() self.background_tasks.clear() try: - task = self._loop.create_task(self._pool.aclose()) # type: ignore - self.background_tasks.add(task) - task.add_done_callback(lambda t: self.background_tasks.discard(t)) + # Return the pool cleanup task so caller can await it if needed + return self._loop.create_task(self._pool.aclose()) # type: ignore except RuntimeError: logger.warning("Event loop not running, aborting kill.") + return None class HttpTransport(BaseHttpTransport): From f5ef707a4d4eb854f329b418bf8871b71b718c67 Mon Sep 17 00:00:00 2001 From: srothh Date: Wed, 30 Jul 2025 15:12:20 +0200 Subject: [PATCH 077/102] ref(transport): Adapt transport for synchronous flush interface GH-4582 --- sentry_sdk/transport.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 36a779a210..bf08b1d1e4 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -691,16 +691,17 @@ def capture_envelope(self: Self, envelope: Envelope) -> None: for item in envelope.items: self.record_lost_event("no_async_context", item=item) - async def flush_async( + def flush( # type: ignore[override] self: Self, timeout: float, callback: Optional[Callable[[int, float], None]] = None, - ) -> None: + ) -> Optional[asyncio.Task[None]]: logger.debug("Flushing HTTP transport") if timeout > 0: self._worker.submit(lambda: self._flush_client_reports(force=True)) - await self._worker.flush_async(timeout, callback) # type: ignore + return self._worker.flush(timeout, callback) + return None def _get_pool_options(self: Self) -> Dict[str, Any]: options: Dict[str, Any] = { From fca874039894e2e359cf284e2aa41deb0e3f8b02 Mon Sep 17 00:00:00 2001 From: srothh Date: Wed, 30 Jul 2025 15:17:18 +0200 Subject: [PATCH 078/102] ref(transport): Fix mypy error GH-4582 --- sentry_sdk/transport.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index bf08b1d1e4..277e0f9597 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -700,7 +700,7 @@ def flush( # type: ignore[override] if timeout > 0: self._worker.submit(lambda: self._flush_client_reports(force=True)) - return self._worker.flush(timeout, callback) + return self._worker.flush(timeout, callback) # type: ignore[func-returns-value] return None def _get_pool_options(self: Self) -> Dict[str, Any]: From e23efd7dde39c0ad5e0aedfeeaf66ab30b7c6a74 Mon Sep 17 00:00:00 2001 From: srothh Date: Wed, 30 Jul 2025 15:54:28 +0200 Subject: [PATCH 079/102] ref(transport): Make client work with sync flush changes GH-4601 --- sentry_sdk/client.py | 6 +++++- sentry_sdk/transport.py | 8 ++++---- tests/test_transport.py | 41 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 50 insertions(+), 5 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index dba7651e92..745f168133 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -1023,7 +1023,11 @@ async def _flush_async( self.session_flusher.flush() if self.log_batcher is not None: self.log_batcher.flush() - await self.transport.flush_async(timeout=timeout, callback=callback) # type: ignore + + # For async transport, flush() returns a Task that needs to be awaited + flush_task = self.transport.flush(timeout=timeout, callback=callback) # type: ignore + if flush_task is not None: + await flush_task def __enter__(self) -> _Client: return self diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 277e0f9597..fadb20a909 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -581,7 +581,7 @@ class AsyncHttpTransport(HttpTransportCore): def __init__(self: Self, options: Dict[str, Any]) -> None: super().__init__(options) # Requires event loop at init time - self._loop = asyncio.get_running_loop() + self.loop = asyncio.get_running_loop() self.background_tasks: set[asyncio.Task[None]] = set() def _get_header_value(self: Self, response: Any, header: str) -> Optional[str]: @@ -679,10 +679,10 @@ def capture_envelope(self: Self, envelope: Envelope) -> None: except RuntimeError: # We are in a background thread, not running an event loop, # have to launch the task on the loop in a threadsafe way. - if self._loop and self._loop.is_running(): + if self.loop and self.loop.is_running(): asyncio.run_coroutine_threadsafe( self._capture_envelope(envelope), - self._loop, + self.loop, ) else: # The event loop is no longer running @@ -793,7 +793,7 @@ def kill(self: Self) -> Optional[asyncio.Task[None]]: # type: ignore self.background_tasks.clear() try: # Return the pool cleanup task so caller can await it if needed - return self._loop.create_task(self._pool.aclose()) # type: ignore + return self.loop.create_task(self._pool.aclose()) # type: ignore except RuntimeError: logger.warning("Event loop not running, aborting kill.") return None diff --git a/tests/test_transport.py b/tests/test_transport.py index f5a53e348d..213561005d 100644 --- a/tests/test_transport.py +++ b/tests/test_transport.py @@ -763,6 +763,47 @@ def close(self): assert seen == ["status_500"] +def test_handle_request_error_basic_coverage(make_client, monkeypatch): + client = make_client() + transport = client.transport + + monkeypatch.setattr(transport._worker, "submit", lambda fn: fn() or True) + + # Track method calls + calls = [] + + def mock_on_dropped_event(reason): + calls.append(("on_dropped_event", reason)) + + def mock_record_lost_event(reason, data_category=None, item=None): + calls.append(("record_lost_event", reason, data_category, item)) + + monkeypatch.setattr(transport, "on_dropped_event", mock_on_dropped_event) + monkeypatch.setattr(transport, "record_lost_event", mock_record_lost_event) + + # Test case 1: envelope is None + transport._handle_request_error(envelope=None, loss_reason="test_reason") + + assert len(calls) == 2 + assert calls[0] == ("on_dropped_event", "test_reason") + assert calls[1] == ("record_lost_event", "network_error", "error", None) + + # Reset + calls.clear() + + # Test case 2: envelope with items + envelope = Envelope() + envelope.add_item(mock.MagicMock()) # Simple mock item + envelope.add_item(mock.MagicMock()) # Another mock item + + transport._handle_request_error(envelope=envelope, loss_reason="connection_error") + + assert len(calls) == 3 + assert calls[0] == ("on_dropped_event", "connection_error") + assert calls[1][0:2] == ("record_lost_event", "network_error") + assert calls[2][0:2] == ("record_lost_event", "network_error") + + @pytest.mark.asyncio @pytest.mark.skipif(not PY38, reason="Async transport requires Python 3.8+") async def test_async_transport_background_thread_capture( From 55b606a57e4bbc33d1eb165c9f43c86f97437b0f Mon Sep 17 00:00:00 2001 From: srothh Date: Wed, 30 Jul 2025 15:59:41 +0200 Subject: [PATCH 080/102] ref(client): Properly add client changes for sync flush GH-4601 --- sentry_sdk/client.py | 2 -- tox.ini | 43 +++++++++++++------------------------------ 2 files changed, 13 insertions(+), 32 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index 745f168133..e209373cc2 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -1023,8 +1023,6 @@ async def _flush_async( self.session_flusher.flush() if self.log_batcher is not None: self.log_batcher.flush() - - # For async transport, flush() returns a Task that needs to be awaited flush_task = self.transport.flush(timeout=timeout, callback=callback) # type: ignore if flush_task is not None: await flush_task diff --git a/tox.ini b/tox.ini index 5cf7438f38..d6f5e173eb 100644 --- a/tox.ini +++ b/tox.ini @@ -10,7 +10,7 @@ # The file (and all resulting CI YAMLs) then need to be regenerated via # "scripts/generate-test-files.sh". # -# Last generated: 2025-07-29T08:45:43.951088+00:00 +# Last generated: 2025-07-30T13:59:12.959550+00:00 [tox] requires = @@ -125,9 +125,9 @@ envlist = # ~~~ Common ~~~ {py3.7,py3.8,py3.9}-common-v1.4.1 - {py3.7,py3.8,py3.9,py3.10,py3.11}-common-v1.14.0 - {py3.8,py3.9,py3.10,py3.11}-common-v1.24.0 - {py3.9,py3.10,py3.11,py3.12,py3.13}-common-v1.35.0 + {py3.7,py3.8,py3.9,py3.10,py3.11}-common-v1.15.0 + {py3.8,py3.9,py3.10,py3.11,py3.12}-common-v1.26.0 + {py3.9,py3.10,py3.11,py3.12,py3.13}-common-v1.36.0 # ~~~ AI ~~~ @@ -141,9 +141,9 @@ envlist = {py3.9,py3.11,py3.12}-cohere-v5.13.12 {py3.9,py3.11,py3.12}-cohere-v5.16.1 - {py3.9,py3.11,py3.12}-openai_agents-v0.0.19 - {py3.9,py3.12,py3.13}-openai_agents-v0.1.0 - {py3.9,py3.12,py3.13}-openai_agents-v0.2.3 + {py3.10,py3.11,py3.12}-openai_agents-v0.0.19 + {py3.10,py3.12,py3.13}-openai_agents-v0.1.0 + {py3.10,py3.12,py3.13}-openai_agents-v0.2.4 {py3.8,py3.10,py3.11}-huggingface_hub-v0.22.2 {py3.8,py3.11,py3.12}-huggingface_hub-v0.26.5 @@ -165,7 +165,7 @@ envlist = {py3.7,py3.8,py3.9}-sqlalchemy-v1.3.24 {py3.7,py3.11,py3.12}-sqlalchemy-v1.4.54 - {py3.7,py3.12,py3.13}-sqlalchemy-v2.0.41 + {py3.7,py3.12,py3.13}-sqlalchemy-v2.0.42 # ~~~ Flags ~~~ @@ -205,9 +205,6 @@ envlist = {py3.8,py3.11,py3.12}-strawberry-v0.232.2 {py3.8,py3.12,py3.13}-strawberry-v0.255.0 {py3.9,py3.12,py3.13}-strawberry-v0.278.0 - {py3.8,py3.11,py3.12}-strawberry-v0.232.2 - {py3.8,py3.12,py3.13}-strawberry-v0.255.0 - {py3.9,py3.12,py3.13}-strawberry-v0.278.0 # ~~~ Network ~~~ @@ -254,7 +251,6 @@ envlist = {py3.7,py3.10,py3.11}-starlette-v0.26.1 {py3.8,py3.11,py3.12}-starlette-v0.36.3 {py3.9,py3.12,py3.13}-starlette-v0.47.2 - {py3.9,py3.12,py3.13}-starlette-v0.47.2 {py3.7,py3.9,py3.10}-fastapi-v0.79.1 {py3.7,py3.10,py3.11}-fastapi-v0.91.0 @@ -275,7 +271,6 @@ envlist = {py3.7,py3.11,py3.12}-falcon-v3.1.3 {py3.8,py3.11,py3.12}-falcon-v4.0.2 {py3.8,py3.11,py3.12}-falcon-v4.1.0a3 - {py3.8,py3.11,py3.12}-falcon-v4.1.0a3 {py3.8,py3.10,py3.11}-litestar-v2.0.1 {py3.8,py3.11,py3.12}-litestar-v2.5.5 @@ -304,8 +299,6 @@ envlist = {py3.8,py3.10,py3.11}-trytond-v6.8.17 {py3.8,py3.11,py3.12}-trytond-v7.0.34 {py3.9,py3.12,py3.13}-trytond-v7.6.4 - {py3.8,py3.11,py3.12}-trytond-v7.0.34 - {py3.9,py3.12,py3.13}-trytond-v7.6.4 {py3.7,py3.12,py3.13}-typer-v0.15.4 {py3.7,py3.12,py3.13}-typer-v0.16.0 @@ -491,9 +484,9 @@ deps = # ~~~ Common ~~~ common-v1.4.1: opentelemetry-sdk==1.4.1 - common-v1.14.0: opentelemetry-sdk==1.14.0 - common-v1.24.0: opentelemetry-sdk==1.24.0 - common-v1.35.0: opentelemetry-sdk==1.35.0 + common-v1.15.0: opentelemetry-sdk==1.15.0 + common-v1.26.0: opentelemetry-sdk==1.26.0 + common-v1.36.0: opentelemetry-sdk==1.36.0 common: pytest common: pytest-asyncio py3.7-common: pytest<7.0.0 @@ -517,8 +510,7 @@ deps = openai_agents-v0.0.19: openai-agents==0.0.19 openai_agents-v0.1.0: openai-agents==0.1.0 - openai_agents-v0.2.3: openai-agents==0.2.3 - openai_agents-v0.2.3: openai-agents==0.2.3 + openai_agents-v0.2.4: openai-agents==0.2.4 openai_agents: pytest-asyncio huggingface_hub-v0.22.2: huggingface_hub==0.22.2 @@ -542,7 +534,7 @@ deps = sqlalchemy-v1.3.24: sqlalchemy==1.3.24 sqlalchemy-v1.4.54: sqlalchemy==1.4.54 - sqlalchemy-v2.0.41: sqlalchemy==2.0.41 + sqlalchemy-v2.0.42: sqlalchemy==2.0.42 # ~~~ Flags ~~~ @@ -591,15 +583,10 @@ deps = strawberry-v0.232.2: strawberry-graphql[fastapi,flask]==0.232.2 strawberry-v0.255.0: strawberry-graphql[fastapi,flask]==0.255.0 strawberry-v0.278.0: strawberry-graphql[fastapi,flask]==0.278.0 - strawberry-v0.232.2: strawberry-graphql[fastapi,flask]==0.232.2 - strawberry-v0.255.0: strawberry-graphql[fastapi,flask]==0.255.0 - strawberry-v0.278.0: strawberry-graphql[fastapi,flask]==0.278.0 strawberry: httpx strawberry-v0.209.8: pydantic<2.11 strawberry-v0.232.2: pydantic<2.11 strawberry-v0.255.0: pydantic<2.11 - strawberry-v0.232.2: pydantic<2.11 - strawberry-v0.255.0: pydantic<2.11 # ~~~ Network ~~~ @@ -680,7 +667,6 @@ deps = starlette-v0.26.1: starlette==0.26.1 starlette-v0.36.3: starlette==0.36.3 starlette-v0.47.2: starlette==0.47.2 - starlette-v0.47.2: starlette==0.47.2 starlette: pytest-asyncio starlette: python-multipart starlette: requests @@ -725,7 +711,6 @@ deps = falcon-v3.1.3: falcon==3.1.3 falcon-v4.0.2: falcon==4.0.2 falcon-v4.1.0a3: falcon==4.1.0a3 - falcon-v4.1.0a3: falcon==4.1.0a3 litestar-v2.0.1: litestar==2.0.1 litestar-v2.5.5: litestar==2.5.5 @@ -771,8 +756,6 @@ deps = trytond-v6.8.17: trytond==6.8.17 trytond-v7.0.34: trytond==7.0.34 trytond-v7.6.4: trytond==7.6.4 - trytond-v7.0.34: trytond==7.0.34 - trytond-v7.6.4: trytond==7.6.4 trytond: werkzeug trytond-v5.0.63: werkzeug<1.0 From d89abed4e59514ad3d22b15fcea9e28f27d7b97b Mon Sep 17 00:00:00 2001 From: srothh Date: Mon, 21 Jul 2025 14:06:50 +0200 Subject: [PATCH 081/102] feat(transport): Add async transport class Add an implementation of Transport to work with the async background worker and HTTPCore async. GH-4582 --- sentry_sdk/transport.py | 182 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 182 insertions(+) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index f8328cac12..44a7c6eacb 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -6,6 +6,7 @@ import socket import ssl import time +import asyncio from datetime import datetime, timedelta, timezone from collections import defaultdict from urllib.request import getproxies @@ -571,6 +572,187 @@ def flush( self._worker.flush(timeout, callback) +class AsyncHttpTransport(HttpTransportCore): + def __init__(self: Self, options: Dict[str, Any]) -> None: + super().__init__(options) + # Requires event loop at init time + self._loop = asyncio.get_running_loop() + self.background_tasks = set() + + async def _send_envelope(self: Self, envelope: Envelope) -> None: + _prepared_envelope = self._prepare_envelope(envelope) + if _prepared_envelope is None: + return None + envelope, body, headers = _prepared_envelope + await self._send_request( + body.getvalue(), + headers=headers, + endpoint_type=EndpointType.ENVELOPE, + envelope=envelope, + ) + return None + + async def _send_request( + self: Self, + body: bytes, + headers: Dict[str, str], + endpoint_type: EndpointType, + envelope: Optional[Envelope], + ) -> None: + self._update_headers(headers) + try: + response = await self._request( + "POST", + endpoint_type, + body, + headers, + ) + except Exception: + self._handle_request_error(envelope=envelope, loss_reason="network") + raise + try: + self._handle_response(response=response, envelope=envelope) + finally: + response.close() + + async def _request( + self: Self, + method: str, + endpoint_type: EndpointType, + body: Any, + headers: Mapping[str, str], + ) -> httpcore.Response: + return await self._pool.request( + method, + self._auth.get_api_url(endpoint_type), + content=body, + headers=headers, # type: ignore + ) + + def _flush_client_reports(self: Self, force: bool = False) -> None: + client_report = self._fetch_pending_client_report(force=force, interval=60) + if client_report is not None: + self.capture_envelope(Envelope(items=[client_report])) + + async def _capture_envelope(self: Self, envelope: Envelope) -> None: + async def send_envelope_wrapper() -> None: + with capture_internal_exceptions(): + await self._send_envelope(envelope) + self._flush_client_reports() + + if not self._worker.submit(send_envelope_wrapper): + self.on_dropped_event("full_queue") + for item in envelope.items: + self.record_lost_event("queue_overflow", item=item) + + def capture_envelope(self: Self, envelope: Envelope) -> None: + # Synchronous entry point + if asyncio.get_running_loop() is not None: + # We are on the main thread running the event loop + task = asyncio.create_task(self._capture_envelope(envelope)) + self.background_tasks.add(task) + task.add_done_callback(self.background_tasks.discard) + else: + # We are in a background thread, not running an event loop, + # have to launch the task on the loop in a threadsafe way. + asyncio.run_coroutine_threadsafe( + self._capture_envelope(envelope), + self._loop, + ) + + async def flush_async( + self: Self, + timeout: float, + callback: Optional[Callable[[int, float], None]] = None, + ) -> None: + logger.debug("Flushing HTTP transport") + + if timeout > 0: + self._worker.submit(lambda: self._flush_client_reports(force=True)) + await self._worker.flush_async(timeout, callback) # type: ignore + + def _get_pool_options(self: Self) -> Dict[str, Any]: + options: Dict[str, Any] = { + "http2": False, # no HTTP2 for now + "retries": 3, + } + + socket_options = ( + self.options["socket_options"] + if self.options["socket_options"] is not None + else [] + ) + + used_options = {(o[0], o[1]) for o in socket_options} + for default_option in KEEP_ALIVE_SOCKET_OPTIONS: + if (default_option[0], default_option[1]) not in used_options: + socket_options.append(default_option) + + options["socket_options"] = socket_options + + ssl_context = ssl.create_default_context() + ssl_context.load_verify_locations( + self.options["ca_certs"] # User-provided bundle from the SDK init + or os.environ.get("SSL_CERT_FILE") + or os.environ.get("REQUESTS_CA_BUNDLE") + or certifi.where() + ) + cert_file = self.options["cert_file"] or os.environ.get("CLIENT_CERT_FILE") + key_file = self.options["key_file"] or os.environ.get("CLIENT_KEY_FILE") + if cert_file is not None: + ssl_context.load_cert_chain(cert_file, key_file) + + options["ssl_context"] = ssl_context + + return options + + def _make_pool( + self: Self, + ) -> Union[ + httpcore.AsyncSOCKSProxy, httpcore.AsyncHTTPProxy, httpcore.AsyncConnectionPool + ]: + if self.parsed_dsn is None: + raise ValueError("Cannot create HTTP-based transport without valid DSN") + proxy = None + no_proxy = self._in_no_proxy(self.parsed_dsn) + + # try HTTPS first + https_proxy = self.options["https_proxy"] + if self.parsed_dsn.scheme == "https" and (https_proxy != ""): + proxy = https_proxy or (not no_proxy and getproxies().get("https")) + + # maybe fallback to HTTP proxy + http_proxy = self.options["http_proxy"] + if not proxy and (http_proxy != ""): + proxy = http_proxy or (not no_proxy and getproxies().get("http")) + + opts = self._get_pool_options() + + if proxy: + proxy_headers = self.options["proxy_headers"] + if proxy_headers: + opts["proxy_headers"] = proxy_headers + + if proxy.startswith("socks"): + try: + if "socket_options" in opts: + socket_options = opts.pop("socket_options") + if socket_options: + logger.warning( + "You have defined socket_options but using a SOCKS proxy which doesn't support these. We'll ignore socket_options." + ) + return httpcore.AsyncSOCKSProxy(proxy_url=proxy, **opts) + except RuntimeError: + logger.warning( + "You have configured a SOCKS proxy (%s) but support for SOCKS proxies is not installed. Disabling proxy support.", + proxy, + ) + else: + return httpcore.AsyncHTTPProxy(proxy_url=proxy, **opts) + + return httpcore.AsyncConnectionPool(**opts) + + class HttpTransport(BaseHttpTransport): if TYPE_CHECKING: _pool: Union[PoolManager, ProxyManager] From 5e1e0c6b5f842693a1559054cecafe5aab7174a2 Mon Sep 17 00:00:00 2001 From: srothh Date: Tue, 22 Jul 2025 10:31:45 +0200 Subject: [PATCH 082/102] ref(transport): Fix event loop handling in async transport Async Transport now properly checks for the presence of the event loop in capture_envelop, and drops items in case the event loop is no longer running for some reason. GH-4582 --- sentry_sdk/transport.py | 39 +++++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 14 deletions(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 44a7c6eacb..fd160f347a 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -29,7 +29,7 @@ from sentry_sdk.consts import EndpointType from sentry_sdk.utils import Dsn, logger, capture_internal_exceptions -from sentry_sdk.worker import BackgroundWorker, Worker +from sentry_sdk.worker import BackgroundWorker, Worker, AsyncWorker from sentry_sdk.envelope import Envelope, Item, PayloadRef from typing import TYPE_CHECKING @@ -225,9 +225,10 @@ def __init__(self: Self, options: Dict[str, Any]) -> None: elif self._compression_algo == "br": self._compression_level = 4 - def _create_worker(self: Self, options: Dict[str, Any]) -> Worker: - # For now, we only support the threaded sync background worker. - return BackgroundWorker(queue_size=options["transport_queue_size"]) + def _create_worker(self, options: dict[str, Any]) -> Worker: + async_enabled = options.get("_experiments", {}).get("transport_async", False) + worker_cls = AsyncWorker if async_enabled else BackgroundWorker + return worker_cls(queue_size=options["transport_queue_size"]) def record_lost_event( self: Self, @@ -647,18 +648,26 @@ async def send_envelope_wrapper() -> None: def capture_envelope(self: Self, envelope: Envelope) -> None: # Synchronous entry point - if asyncio.get_running_loop() is not None: + try: + asyncio.get_running_loop() # We are on the main thread running the event loop task = asyncio.create_task(self._capture_envelope(envelope)) self.background_tasks.add(task) task.add_done_callback(self.background_tasks.discard) - else: + except RuntimeError: # We are in a background thread, not running an event loop, # have to launch the task on the loop in a threadsafe way. - asyncio.run_coroutine_threadsafe( - self._capture_envelope(envelope), - self._loop, - ) + if self._loop and self._loop.is_running(): + asyncio.run_coroutine_threadsafe( + self._capture_envelope(envelope), + self._loop, + ) + else: + # The event loop is no longer running + logger.warning("Async Transport is not running in an event loop.") + self.on_dropped_event("no_async_context") + for item in envelope.items: + self.record_lost_event("no_async_context", item=item) async def flush_async( self: Self, @@ -998,11 +1007,13 @@ def make_transport(options: Dict[str, Any]) -> Optional[Transport]: ref_transport = options["transport"] use_http2_transport = options.get("_experiments", {}).get("transport_http2", False) - + use_async_transport = options.get("_experiments", {}).get("transport_async", False) # By default, we use the http transport class - transport_cls: Type[Transport] = ( - Http2Transport if use_http2_transport else HttpTransport - ) + if use_async_transport and asyncio.get_running_loop() is not None: + transport_cls: Type[Transport] = AsyncHttpTransport + else: + use_http2 = use_http2_transport + transport_cls = Http2Transport if use_http2 else HttpTransport if isinstance(ref_transport, Transport): return ref_transport From 8fdf43d5af684b7879de21c6d384420792c7333f Mon Sep 17 00:00:00 2001 From: srothh Date: Tue, 22 Jul 2025 11:10:38 +0200 Subject: [PATCH 083/102] feat(transport): Add kill method for async transport Implement a kill method that properly shuts down the async transport. The httpcore async connection pool needs to be explicitly shutdown at the end of its usage. GH-4582 --- sentry_sdk/transport.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index fd160f347a..3a2c0cb2df 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -761,6 +761,16 @@ def _make_pool( return httpcore.AsyncConnectionPool(**opts) + def kill(self: Self) -> None: + + logger.debug("Killing HTTP transport") + self._worker.kill() + for task in self.background_tasks: + task.cancel() + self.background_tasks.clear() + + self._loop.create_task(self._pool.aclose()) # type: ignore + class HttpTransport(BaseHttpTransport): if TYPE_CHECKING: From 6619670153c59ff8734763bf361264b89b441363 Mon Sep 17 00:00:00 2001 From: srothh Date: Wed, 23 Jul 2025 15:50:06 +0200 Subject: [PATCH 084/102] ref(transport): Fix type errors in async transport Fix type errors resulting from async override and missing type definition in the async transport. GH-4582 --- sentry_sdk/transport.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 3a2c0cb2df..71d563423d 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -578,7 +578,7 @@ def __init__(self: Self, options: Dict[str, Any]) -> None: super().__init__(options) # Requires event loop at init time self._loop = asyncio.get_running_loop() - self.background_tasks = set() + self.background_tasks: set[asyncio.Task[None]] = set() async def _send_envelope(self: Self, envelope: Envelope) -> None: _prepared_envelope = self._prepare_envelope(envelope) @@ -616,7 +616,7 @@ async def _send_request( finally: response.close() - async def _request( + async def _request( # type: ignore[override] self: Self, method: str, endpoint_type: EndpointType, From 2eee1b1a6941c541213775b05b0472ba67456c46 Mon Sep 17 00:00:00 2001 From: srothh Date: Thu, 24 Jul 2025 09:44:58 +0200 Subject: [PATCH 085/102] Add silent failing to kill on event loop errors Add a try/catch to ensure silent fail on kill in case the event loop shuts down. GH-4582 --- sentry_sdk/transport.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 71d563423d..7743910c79 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -768,8 +768,10 @@ def kill(self: Self) -> None: for task in self.background_tasks: task.cancel() self.background_tasks.clear() - - self._loop.create_task(self._pool.aclose()) # type: ignore + try: + self._loop.create_task(self._pool.aclose()) # type: ignore + except RuntimeError: + logger.warning("Event loop not running, aborting kill.") class HttpTransport(BaseHttpTransport): From 6c787a446dd3dc0bc5a35e3e5d4270db455a9d5a Mon Sep 17 00:00:00 2001 From: srothh Date: Thu, 24 Jul 2025 11:27:45 +0200 Subject: [PATCH 086/102] ref(transport): Fix event loop check in make_transport Fix the event loop check in make_transport so that it does not throw a runtime error but rather falls back correctly. GH-4582 --- sentry_sdk/transport.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 7743910c79..541f71ba53 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -1021,11 +1021,14 @@ def make_transport(options: Dict[str, Any]) -> Optional[Transport]: use_http2_transport = options.get("_experiments", {}).get("transport_http2", False) use_async_transport = options.get("_experiments", {}).get("transport_async", False) # By default, we use the http transport class - if use_async_transport and asyncio.get_running_loop() is not None: - transport_cls: Type[Transport] = AsyncHttpTransport - else: - use_http2 = use_http2_transport - transport_cls = Http2Transport if use_http2 else HttpTransport + if use_async_transport: + try: + asyncio.get_running_loop() + transport_cls: Type[Transport] = AsyncHttpTransport + except RuntimeError: + # No event loop running, fall back to sync transport + logger.warning("No event loop running, falling back to sync transport.") + transport_cls = Http2Transport if use_http2_transport else HttpTransport if isinstance(ref_transport, Transport): return ref_transport From b79d3465b363ea621152ebb017bc02a7e43ac2b1 Mon Sep 17 00:00:00 2001 From: srothh Date: Thu, 24 Jul 2025 11:45:13 +0200 Subject: [PATCH 087/102] ref(transport): Add missing transport instantiation in non-async context GH-4582 --- sentry_sdk/transport.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 541f71ba53..832a5d5610 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -1029,6 +1029,8 @@ def make_transport(options: Dict[str, Any]) -> Optional[Transport]: # No event loop running, fall back to sync transport logger.warning("No event loop running, falling back to sync transport.") transport_cls = Http2Transport if use_http2_transport else HttpTransport + else: + transport_cls = Http2Transport if use_http2_transport else HttpTransport if isinstance(ref_transport, Transport): return ref_transport From 1717888e3af4bfc24b445bd7d8401d31a97924ee Mon Sep 17 00:00:00 2001 From: srothh Date: Fri, 25 Jul 2025 11:21:37 +0200 Subject: [PATCH 088/102] ref(transport): Fix httpcore async specific request handling GH-4582 --- sentry_sdk/transport.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 832a5d5610..aff2132941 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -580,6 +580,16 @@ def __init__(self: Self, options: Dict[str, Any]) -> None: self._loop = asyncio.get_running_loop() self.background_tasks: set[asyncio.Task[None]] = set() + def _get_header_value(self: Self, response: Any, header: str) -> Optional[str]: + return next( + ( + val.decode("ascii") + for key, val in response.headers + if key.decode("ascii").lower() == header + ), + None, + ) + async def _send_envelope(self: Self, envelope: Envelope) -> None: _prepared_envelope = self._prepare_envelope(envelope) if _prepared_envelope is None: @@ -614,7 +624,7 @@ async def _send_request( try: self._handle_response(response=response, envelope=envelope) finally: - response.close() + await response.aclose() async def _request( # type: ignore[override] self: Self, From e1fd57aa1c19e05f4614814773a2e39ad5956b70 Mon Sep 17 00:00:00 2001 From: srothh Date: Fri, 25 Jul 2025 12:49:26 +0200 Subject: [PATCH 089/102] ref(transport): Add gc safety to async kill GH-4582 --- sentry_sdk/transport.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index aff2132941..6644a7c4ec 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -779,7 +779,9 @@ def kill(self: Self) -> None: task.cancel() self.background_tasks.clear() try: - self._loop.create_task(self._pool.aclose()) # type: ignore + task = self._loop.create_task(self._pool.aclose()) # type: ignore + self.background_tasks.add(task) + task.add_done_callback(lambda t: self.background_tasks.discard(t)) except RuntimeError: logger.warning("Event loop not running, aborting kill.") From b87c68ef00a203198846d315da3f72044a3b1fb3 Mon Sep 17 00:00:00 2001 From: srothh Date: Fri, 25 Jul 2025 13:06:14 +0200 Subject: [PATCH 090/102] ref(transport): Add missing httpcore extensions GH-4582 --- sentry_sdk/transport.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 6644a7c4ec..7793bc020d 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -638,6 +638,14 @@ async def _request( # type: ignore[override] self._auth.get_api_url(endpoint_type), content=body, headers=headers, # type: ignore + extensions={ + "timeout": { + "pool": self.TIMEOUT, + "connect": self.TIMEOUT, + "write": self.TIMEOUT, + "read": self.TIMEOUT, + } + }, ) def _flush_client_reports(self: Self, force: bool = False) -> None: From a827d0dadf4dadcd679ccd22d88990c8f29462fa Mon Sep 17 00:00:00 2001 From: srothh Date: Mon, 28 Jul 2025 10:17:17 +0200 Subject: [PATCH 091/102] fix(transport): Fix fallback sync transport creating async worker GH-4582 --- sentry_sdk/transport.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 7793bc020d..0986613cb9 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -227,7 +227,11 @@ def __init__(self: Self, options: Dict[str, Any]) -> None: def _create_worker(self, options: dict[str, Any]) -> Worker: async_enabled = options.get("_experiments", {}).get("transport_async", False) - worker_cls = AsyncWorker if async_enabled else BackgroundWorker + try: + asyncio.get_running_loop() + worker_cls = AsyncWorker if async_enabled else BackgroundWorker + except RuntimeError: + worker_cls = BackgroundWorker return worker_cls(queue_size=options["transport_queue_size"]) def record_lost_event( From ee0b440a4a3eb22d2288a4983c92686d89d42efd Mon Sep 17 00:00:00 2001 From: srothh Date: Tue, 29 Jul 2025 13:12:12 +0200 Subject: [PATCH 092/102] ref(transport): Make kill optionally return a task for async Make kill optionally return a task for async transport. This allows for a blocking kill operation if the caller is in an async context. GH-4582 --- sentry_sdk/transport.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 0986613cb9..36a779a210 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -783,7 +783,7 @@ def _make_pool( return httpcore.AsyncConnectionPool(**opts) - def kill(self: Self) -> None: + def kill(self: Self) -> Optional[asyncio.Task[None]]: # type: ignore logger.debug("Killing HTTP transport") self._worker.kill() @@ -791,11 +791,11 @@ def kill(self: Self) -> None: task.cancel() self.background_tasks.clear() try: - task = self._loop.create_task(self._pool.aclose()) # type: ignore - self.background_tasks.add(task) - task.add_done_callback(lambda t: self.background_tasks.discard(t)) + # Return the pool cleanup task so caller can await it if needed + return self._loop.create_task(self._pool.aclose()) # type: ignore except RuntimeError: logger.warning("Event loop not running, aborting kill.") + return None class HttpTransport(BaseHttpTransport): From 70f228ef2a14b869d8adad6bfbdb656e9e8b321d Mon Sep 17 00:00:00 2001 From: srothh Date: Wed, 30 Jul 2025 15:12:20 +0200 Subject: [PATCH 093/102] ref(transport): Adapt transport for synchronous flush interface GH-4582 --- sentry_sdk/transport.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 36a779a210..bf08b1d1e4 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -691,16 +691,17 @@ def capture_envelope(self: Self, envelope: Envelope) -> None: for item in envelope.items: self.record_lost_event("no_async_context", item=item) - async def flush_async( + def flush( # type: ignore[override] self: Self, timeout: float, callback: Optional[Callable[[int, float], None]] = None, - ) -> None: + ) -> Optional[asyncio.Task[None]]: logger.debug("Flushing HTTP transport") if timeout > 0: self._worker.submit(lambda: self._flush_client_reports(force=True)) - await self._worker.flush_async(timeout, callback) # type: ignore + return self._worker.flush(timeout, callback) + return None def _get_pool_options(self: Self) -> Dict[str, Any]: options: Dict[str, Any] = { From 328d8ad60053c03ed82b34588a869f0460715fd7 Mon Sep 17 00:00:00 2001 From: srothh Date: Wed, 30 Jul 2025 15:17:18 +0200 Subject: [PATCH 094/102] ref(transport): Fix mypy error GH-4582 --- sentry_sdk/transport.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index bf08b1d1e4..277e0f9597 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -700,7 +700,7 @@ def flush( # type: ignore[override] if timeout > 0: self._worker.submit(lambda: self._flush_client_reports(force=True)) - return self._worker.flush(timeout, callback) + return self._worker.flush(timeout, callback) # type: ignore[func-returns-value] return None def _get_pool_options(self: Self) -> Dict[str, Any]: From ef6113442086de5cf100337de81963b20769e234 Mon Sep 17 00:00:00 2001 From: srothh Date: Wed, 30 Jul 2025 16:21:51 +0200 Subject: [PATCH 095/102] ref(transport): Make transport loop public GH-4582 --- sentry_sdk/transport.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 277e0f9597..fadb20a909 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -581,7 +581,7 @@ class AsyncHttpTransport(HttpTransportCore): def __init__(self: Self, options: Dict[str, Any]) -> None: super().__init__(options) # Requires event loop at init time - self._loop = asyncio.get_running_loop() + self.loop = asyncio.get_running_loop() self.background_tasks: set[asyncio.Task[None]] = set() def _get_header_value(self: Self, response: Any, header: str) -> Optional[str]: @@ -679,10 +679,10 @@ def capture_envelope(self: Self, envelope: Envelope) -> None: except RuntimeError: # We are in a background thread, not running an event loop, # have to launch the task on the loop in a threadsafe way. - if self._loop and self._loop.is_running(): + if self.loop and self.loop.is_running(): asyncio.run_coroutine_threadsafe( self._capture_envelope(envelope), - self._loop, + self.loop, ) else: # The event loop is no longer running @@ -793,7 +793,7 @@ def kill(self: Self) -> Optional[asyncio.Task[None]]: # type: ignore self.background_tasks.clear() try: # Return the pool cleanup task so caller can await it if needed - return self._loop.create_task(self._pool.aclose()) # type: ignore + return self.loop.create_task(self._pool.aclose()) # type: ignore except RuntimeError: logger.warning("Event loop not running, aborting kill.") return None From 42d3a348b99f998af97fa5ddabafcca5eb95a031 Mon Sep 17 00:00:00 2001 From: srothh Date: Wed, 30 Jul 2025 16:52:24 +0200 Subject: [PATCH 096/102] ref(transport): Add import checking for async transport GH-4582 --- sentry_sdk/transport.py | 414 +++++++++++++++++++++------------------- 1 file changed, 218 insertions(+), 196 deletions(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index fadb20a909..6158868a16 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -23,6 +23,15 @@ HTTP2_ENABLED = True except ImportError: HTTP2_ENABLED = False + httpcore = None + +try: + import httpcore # noqa: F401 + import anyio # noqa: F401 + + ASYNC_TRANSPORT_ENABLED = True +except ImportError: + ASYNC_TRANSPORT_ENABLED = False import urllib3 import certifi @@ -577,226 +586,239 @@ def flush( self._worker.flush(timeout, callback) -class AsyncHttpTransport(HttpTransportCore): - def __init__(self: Self, options: Dict[str, Any]) -> None: - super().__init__(options) - # Requires event loop at init time - self.loop = asyncio.get_running_loop() - self.background_tasks: set[asyncio.Task[None]] = set() - - def _get_header_value(self: Self, response: Any, header: str) -> Optional[str]: - return next( - ( - val.decode("ascii") - for key, val in response.headers - if key.decode("ascii").lower() == header - ), - None, - ) - - async def _send_envelope(self: Self, envelope: Envelope) -> None: - _prepared_envelope = self._prepare_envelope(envelope) - if _prepared_envelope is None: - return None - envelope, body, headers = _prepared_envelope - await self._send_request( - body.getvalue(), - headers=headers, - endpoint_type=EndpointType.ENVELOPE, - envelope=envelope, - ) - return None - - async def _send_request( - self: Self, - body: bytes, - headers: Dict[str, str], - endpoint_type: EndpointType, - envelope: Optional[Envelope], - ) -> None: - self._update_headers(headers) - try: - response = await self._request( - "POST", - endpoint_type, - body, - headers, +if not ASYNC_TRANSPORT_ENABLED: + # Sorry, no AsyncHttpTransport for you + class AsyncHttpTransport(BaseHttpTransport): + def __init__(self: Self, options: Dict[str, Any]) -> None: + super().__init__(options) + logger.warning( + "You tried to use AsyncHttpTransport but don't have httpcore[asyncio] installed. Falling back to sync transport." ) - except Exception: - self._handle_request_error(envelope=envelope, loss_reason="network") - raise - try: - self._handle_response(response=response, envelope=envelope) - finally: - await response.aclose() - async def _request( # type: ignore[override] - self: Self, - method: str, - endpoint_type: EndpointType, - body: Any, - headers: Mapping[str, str], - ) -> httpcore.Response: - return await self._pool.request( - method, - self._auth.get_api_url(endpoint_type), - content=body, - headers=headers, # type: ignore - extensions={ - "timeout": { - "pool": self.TIMEOUT, - "connect": self.TIMEOUT, - "write": self.TIMEOUT, - "read": self.TIMEOUT, - } - }, - ) +else: - def _flush_client_reports(self: Self, force: bool = False) -> None: - client_report = self._fetch_pending_client_report(force=force, interval=60) - if client_report is not None: - self.capture_envelope(Envelope(items=[client_report])) + class AsyncHttpTransport(HttpTransportCore): + def __init__(self: Self, options: Dict[str, Any]) -> None: + super().__init__(options) + # Requires event loop at init time + self.loop = asyncio.get_running_loop() + self.background_tasks: set[asyncio.Task[None]] = set() - async def _capture_envelope(self: Self, envelope: Envelope) -> None: - async def send_envelope_wrapper() -> None: - with capture_internal_exceptions(): - await self._send_envelope(envelope) - self._flush_client_reports() + def _get_header_value(self: Self, response: Any, header: str) -> Optional[str]: + return next( + ( + val.decode("ascii") + for key, val in response.headers + if key.decode("ascii").lower() == header + ), + None, + ) - if not self._worker.submit(send_envelope_wrapper): - self.on_dropped_event("full_queue") - for item in envelope.items: - self.record_lost_event("queue_overflow", item=item) + async def _send_envelope(self: Self, envelope: Envelope) -> None: + _prepared_envelope = self._prepare_envelope(envelope) + if _prepared_envelope is None: + return None + envelope, body, headers = _prepared_envelope + await self._send_request( + body.getvalue(), + headers=headers, + endpoint_type=EndpointType.ENVELOPE, + envelope=envelope, + ) + return None - def capture_envelope(self: Self, envelope: Envelope) -> None: - # Synchronous entry point - try: - asyncio.get_running_loop() - # We are on the main thread running the event loop - task = asyncio.create_task(self._capture_envelope(envelope)) - self.background_tasks.add(task) - task.add_done_callback(self.background_tasks.discard) - except RuntimeError: - # We are in a background thread, not running an event loop, - # have to launch the task on the loop in a threadsafe way. - if self.loop and self.loop.is_running(): - asyncio.run_coroutine_threadsafe( - self._capture_envelope(envelope), - self.loop, + async def _send_request( + self: Self, + body: bytes, + headers: Dict[str, str], + endpoint_type: EndpointType, + envelope: Optional[Envelope], + ) -> None: + self._update_headers(headers) + try: + response = await self._request( + "POST", + endpoint_type, + body, + headers, ) - else: - # The event loop is no longer running - logger.warning("Async Transport is not running in an event loop.") - self.on_dropped_event("no_async_context") - for item in envelope.items: - self.record_lost_event("no_async_context", item=item) + except Exception: + self._handle_request_error(envelope=envelope, loss_reason="network") + raise + try: + self._handle_response(response=response, envelope=envelope) + finally: + await response.aclose() + + async def _request( # type: ignore[override] + self: Self, + method: str, + endpoint_type: EndpointType, + body: Any, + headers: Mapping[str, str], + ) -> httpcore.Response: + return await self._pool.request( + method, + self._auth.get_api_url(endpoint_type), + content=body, + headers=headers, # type: ignore + extensions={ + "timeout": { + "pool": self.TIMEOUT, + "connect": self.TIMEOUT, + "write": self.TIMEOUT, + "read": self.TIMEOUT, + } + }, + ) - def flush( # type: ignore[override] - self: Self, - timeout: float, - callback: Optional[Callable[[int, float], None]] = None, - ) -> Optional[asyncio.Task[None]]: - logger.debug("Flushing HTTP transport") + def _flush_client_reports(self: Self, force: bool = False) -> None: + client_report = self._fetch_pending_client_report(force=force, interval=60) + if client_report is not None: + self.capture_envelope(Envelope(items=[client_report])) - if timeout > 0: - self._worker.submit(lambda: self._flush_client_reports(force=True)) - return self._worker.flush(timeout, callback) # type: ignore[func-returns-value] - return None - - def _get_pool_options(self: Self) -> Dict[str, Any]: - options: Dict[str, Any] = { - "http2": False, # no HTTP2 for now - "retries": 3, - } + async def _capture_envelope(self: Self, envelope: Envelope) -> None: + async def send_envelope_wrapper() -> None: + with capture_internal_exceptions(): + await self._send_envelope(envelope) + self._flush_client_reports() - socket_options = ( - self.options["socket_options"] - if self.options["socket_options"] is not None - else [] - ) + if not self._worker.submit(send_envelope_wrapper): + self.on_dropped_event("full_queue") + for item in envelope.items: + self.record_lost_event("queue_overflow", item=item) + + def capture_envelope(self: Self, envelope: Envelope) -> None: + # Synchronous entry point + try: + asyncio.get_running_loop() + # We are on the main thread running the event loop + task = asyncio.create_task(self._capture_envelope(envelope)) + self.background_tasks.add(task) + task.add_done_callback(self.background_tasks.discard) + except RuntimeError: + # We are in a background thread, not running an event loop, + # have to launch the task on the loop in a threadsafe way. + if self.loop and self.loop.is_running(): + asyncio.run_coroutine_threadsafe( + self._capture_envelope(envelope), + self.loop, + ) + else: + # The event loop is no longer running + logger.warning("Async Transport is not running in an event loop.") + self.on_dropped_event("no_async_context") + for item in envelope.items: + self.record_lost_event("no_async_context", item=item) - used_options = {(o[0], o[1]) for o in socket_options} - for default_option in KEEP_ALIVE_SOCKET_OPTIONS: - if (default_option[0], default_option[1]) not in used_options: - socket_options.append(default_option) + def flush( # type: ignore[override] + self: Self, + timeout: float, + callback: Optional[Callable[[int, float], None]] = None, + ) -> Optional[asyncio.Task[None]]: + logger.debug("Flushing HTTP transport") + + if timeout > 0: + self._worker.submit(lambda: self._flush_client_reports(force=True)) + return self._worker.flush(timeout, callback) # type: ignore[func-returns-value] + return None - options["socket_options"] = socket_options + def _get_pool_options(self: Self) -> Dict[str, Any]: + options: Dict[str, Any] = { + "http2": False, # no HTTP2 for now + "retries": 3, + } - ssl_context = ssl.create_default_context() - ssl_context.load_verify_locations( - self.options["ca_certs"] # User-provided bundle from the SDK init - or os.environ.get("SSL_CERT_FILE") - or os.environ.get("REQUESTS_CA_BUNDLE") - or certifi.where() - ) - cert_file = self.options["cert_file"] or os.environ.get("CLIENT_CERT_FILE") - key_file = self.options["key_file"] or os.environ.get("CLIENT_KEY_FILE") - if cert_file is not None: - ssl_context.load_cert_chain(cert_file, key_file) + socket_options = ( + self.options["socket_options"] + if self.options["socket_options"] is not None + else [] + ) - options["ssl_context"] = ssl_context + used_options = {(o[0], o[1]) for o in socket_options} + for default_option in KEEP_ALIVE_SOCKET_OPTIONS: + if (default_option[0], default_option[1]) not in used_options: + socket_options.append(default_option) - return options + options["socket_options"] = socket_options - def _make_pool( - self: Self, - ) -> Union[ - httpcore.AsyncSOCKSProxy, httpcore.AsyncHTTPProxy, httpcore.AsyncConnectionPool - ]: - if self.parsed_dsn is None: - raise ValueError("Cannot create HTTP-based transport without valid DSN") - proxy = None - no_proxy = self._in_no_proxy(self.parsed_dsn) + ssl_context = ssl.create_default_context() + ssl_context.load_verify_locations( + self.options["ca_certs"] # User-provided bundle from the SDK init + or os.environ.get("SSL_CERT_FILE") + or os.environ.get("REQUESTS_CA_BUNDLE") + or certifi.where() + ) + cert_file = self.options["cert_file"] or os.environ.get("CLIENT_CERT_FILE") + key_file = self.options["key_file"] or os.environ.get("CLIENT_KEY_FILE") + if cert_file is not None: + ssl_context.load_cert_chain(cert_file, key_file) - # try HTTPS first - https_proxy = self.options["https_proxy"] - if self.parsed_dsn.scheme == "https" and (https_proxy != ""): - proxy = https_proxy or (not no_proxy and getproxies().get("https")) + options["ssl_context"] = ssl_context - # maybe fallback to HTTP proxy - http_proxy = self.options["http_proxy"] - if not proxy and (http_proxy != ""): - proxy = http_proxy or (not no_proxy and getproxies().get("http")) + return options - opts = self._get_pool_options() + def _make_pool( + self: Self, + ) -> Union[ + httpcore.AsyncSOCKSProxy, + httpcore.AsyncHTTPProxy, + httpcore.AsyncConnectionPool, + ]: + if self.parsed_dsn is None: + raise ValueError("Cannot create HTTP-based transport without valid DSN") + proxy = None + no_proxy = self._in_no_proxy(self.parsed_dsn) - if proxy: - proxy_headers = self.options["proxy_headers"] - if proxy_headers: - opts["proxy_headers"] = proxy_headers + # try HTTPS first + https_proxy = self.options["https_proxy"] + if self.parsed_dsn.scheme == "https" and (https_proxy != ""): + proxy = https_proxy or (not no_proxy and getproxies().get("https")) - if proxy.startswith("socks"): - try: - if "socket_options" in opts: - socket_options = opts.pop("socket_options") - if socket_options: - logger.warning( - "You have defined socket_options but using a SOCKS proxy which doesn't support these. We'll ignore socket_options." - ) - return httpcore.AsyncSOCKSProxy(proxy_url=proxy, **opts) - except RuntimeError: - logger.warning( - "You have configured a SOCKS proxy (%s) but support for SOCKS proxies is not installed. Disabling proxy support.", - proxy, - ) - else: - return httpcore.AsyncHTTPProxy(proxy_url=proxy, **opts) + # maybe fallback to HTTP proxy + http_proxy = self.options["http_proxy"] + if not proxy and (http_proxy != ""): + proxy = http_proxy or (not no_proxy and getproxies().get("http")) - return httpcore.AsyncConnectionPool(**opts) + opts = self._get_pool_options() - def kill(self: Self) -> Optional[asyncio.Task[None]]: # type: ignore + if proxy: + proxy_headers = self.options["proxy_headers"] + if proxy_headers: + opts["proxy_headers"] = proxy_headers - logger.debug("Killing HTTP transport") - self._worker.kill() - for task in self.background_tasks: - task.cancel() - self.background_tasks.clear() - try: - # Return the pool cleanup task so caller can await it if needed - return self.loop.create_task(self._pool.aclose()) # type: ignore - except RuntimeError: - logger.warning("Event loop not running, aborting kill.") - return None + if proxy.startswith("socks"): + try: + if "socket_options" in opts: + socket_options = opts.pop("socket_options") + if socket_options: + logger.warning( + "You have defined socket_options but using a SOCKS proxy which doesn't support these. We'll ignore socket_options." + ) + return httpcore.AsyncSOCKSProxy(proxy_url=proxy, **opts) + except RuntimeError: + logger.warning( + "You have configured a SOCKS proxy (%s) but support for SOCKS proxies is not installed. Disabling proxy support.", + proxy, + ) + else: + return httpcore.AsyncHTTPProxy(proxy_url=proxy, **opts) + + return httpcore.AsyncConnectionPool(**opts) + + def kill(self: Self) -> Optional[asyncio.Task[None]]: # type: ignore + + logger.debug("Killing HTTP transport") + self._worker.kill() + for task in self.background_tasks: + task.cancel() + self.background_tasks.clear() + try: + # Return the pool cleanup task so caller can await it if needed + return self.loop.create_task(self._pool.aclose()) # type: ignore + except RuntimeError: + logger.warning("Event loop not running, aborting kill.") + return None class HttpTransport(BaseHttpTransport): From 10d85f657fa8a6f028129d5164d2dfaa976dbede Mon Sep 17 00:00:00 2001 From: srothh Date: Wed, 30 Jul 2025 16:58:26 +0200 Subject: [PATCH 097/102] ref(transport): Fix typing errors GH-4582 --- sentry_sdk/transport.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 6158868a16..3049e88e8f 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -23,7 +23,7 @@ HTTP2_ENABLED = True except ImportError: HTTP2_ENABLED = False - httpcore = None + httpcore = None # type: ignore try: import httpcore # noqa: F401 @@ -597,7 +597,7 @@ def __init__(self: Self, options: Dict[str, Any]) -> None: else: - class AsyncHttpTransport(HttpTransportCore): + class AsyncHttpTransport(HttpTransportCore): # type: ignore def __init__(self: Self, options: Dict[str, Any]) -> None: super().__init__(options) # Requires event loop at init time From aaae195d417f820a09e18cd39128394ad5fd8287 Mon Sep 17 00:00:00 2001 From: srothh Date: Wed, 30 Jul 2025 17:04:56 +0200 Subject: [PATCH 098/102] ref(transport): Fix import checking GH-4582 --- sentry_sdk/transport.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 3049e88e8f..3f28b38d19 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -18,18 +18,18 @@ try: import httpcore +except ImportError: + httpcore = None # type: ignore + +try: import h2 # noqa: F401 - HTTP2_ENABLED = True + HTTP2_ENABLED = httpcore is not None except ImportError: HTTP2_ENABLED = False - httpcore = None # type: ignore try: - import httpcore # noqa: F401 - import anyio # noqa: F401 - - ASYNC_TRANSPORT_ENABLED = True + ASYNC_TRANSPORT_ENABLED = httpcore is not None except ImportError: ASYNC_TRANSPORT_ENABLED = False @@ -238,7 +238,11 @@ def _create_worker(self, options: dict[str, Any]) -> Worker: async_enabled = options.get("_experiments", {}).get("transport_async", False) try: asyncio.get_running_loop() - worker_cls = AsyncWorker if async_enabled else BackgroundWorker + worker_cls = ( + AsyncWorker + if async_enabled and ASYNC_TRANSPORT_ENABLED + else BackgroundWorker + ) except RuntimeError: worker_cls = BackgroundWorker return worker_cls(queue_size=options["transport_queue_size"]) From 6e2c4f60eb0f32143686dc558513e0983d7e2cc0 Mon Sep 17 00:00:00 2001 From: srothh Date: Wed, 30 Jul 2025 17:17:46 +0200 Subject: [PATCH 099/102] ref(client): Fix type checking with fallback asynctransport GH-4601 --- sentry_sdk/client.py | 8 ++++++-- sentry_sdk/transport.py | 2 ++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index e209373cc2..df1f719470 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -959,7 +959,9 @@ async def _flush_and_close( await self._close_components_async() if self.transport is not None: - if isinstance(self.transport, AsyncHttpTransport): + if isinstance(self.transport, AsyncHttpTransport) and hasattr( + self.transport, "loop" + ): try: flush_task = self.transport.loop.create_task( @@ -996,7 +998,9 @@ def flush( # type: ignore[override] if timeout is None: timeout = self.options["shutdown_timeout"] - if isinstance(self.transport, AsyncHttpTransport): + if isinstance(self.transport, AsyncHttpTransport) and hasattr( + self.transport, "loop" + ): try: return self.transport.loop.create_task( self._flush_async(timeout, callback) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 3f28b38d19..d0ccf65e31 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -29,6 +29,8 @@ HTTP2_ENABLED = False try: + import anyio # noqa: F401 + ASYNC_TRANSPORT_ENABLED = httpcore is not None except ImportError: ASYNC_TRANSPORT_ENABLED = False From 9da7be818c0b97927556fdcacf0d94f43dce82fc Mon Sep 17 00:00:00 2001 From: srothh Date: Thu, 31 Jul 2025 12:34:57 +0200 Subject: [PATCH 100/102] ref(asyncio): Refactor loop close patch in asyncio integration GH-4601 --- sentry_sdk/integrations/asyncio.py | 85 +++++++++++----------- tests/integrations/asyncio/test_asyncio.py | 49 +++++++++++++ 2 files changed, 91 insertions(+), 43 deletions(-) diff --git a/sentry_sdk/integrations/asyncio.py b/sentry_sdk/integrations/asyncio.py index 3bcf135abd..5188e8efba 100644 --- a/sentry_sdk/integrations/asyncio.py +++ b/sentry_sdk/integrations/asyncio.py @@ -30,6 +30,47 @@ def get_name(coro: Any) -> str: ) +def patch_loop_close() -> None: + """Patch loop.close to flush pending events before shutdown.""" + # Atexit shutdown hook happens after the event loop is closed. + # Therefore, it is necessary to patch the loop.close method to ensure + # that pending events are flushed before the interpreter shuts down. + try: + loop = asyncio.get_running_loop() + except RuntimeError: + # No running loop → cannot patch now + return + + if getattr(loop, "_sentry_flush_patched", False): + return + + async def _flush() -> None: + client = sentry_sdk.get_client() + if not client: + return + + try: + if not isinstance(client.transport, AsyncHttpTransport): + return + + task = client.close() # type: ignore + if task is not None: + await task + except Exception: + logger.warning("Sentry flush failed during loop shutdown", exc_info=True) + + orig_close = loop.close + + def _patched_close() -> None: + try: + loop.run_until_complete(_flush()) + finally: + orig_close() + + loop.close = _patched_close # type: ignore + loop._sentry_flush_patched = True # type: ignore + + def patch_asyncio() -> None: orig_task_factory = None try: @@ -125,46 +166,4 @@ class AsyncioIntegration(Integration): @staticmethod def setup_once() -> None: patch_asyncio() - - def _patch_loop_close() -> None: - # Atexit shutdown hook happens after the event loop is closed. - # Therefore, it is necessary to patch the loop.close method to ensure - # that pending events are flushed before the interpreter shuts down. - try: - loop = asyncio.get_running_loop() - except RuntimeError: - # No running loop → cannot patch now - return - - if getattr(loop, "_sentry_flush_patched", False): - return - - async def _flush() -> None: - client = sentry_sdk.get_client() - if not client: - return - try: - - if not isinstance(client.transport, AsyncHttpTransport): - return - - t = client.close() # type: ignore - if t is not None: - await t - except Exception: - logger.warning( - "Sentry flush failed during loop shutdown", exc_info=True - ) - - orig_close = loop.close - - def _patched_close() -> None: - try: - loop.run_until_complete(_flush()) - finally: - orig_close() - - loop.close = _patched_close # type: ignore - loop._sentry_flush_patched = True # type: ignore - - _patch_loop_close() + patch_loop_close() diff --git a/tests/integrations/asyncio/test_asyncio.py b/tests/integrations/asyncio/test_asyncio.py index 2ae71f8f43..5c329f8185 100644 --- a/tests/integrations/asyncio/test_asyncio.py +++ b/tests/integrations/asyncio/test_asyncio.py @@ -377,3 +377,52 @@ async def test_span_origin( assert event["contexts"]["trace"]["origin"] == "manual" assert event["spans"][0]["origin"] == "auto.function.asyncio" + + +@minimum_python_38 +def test_loop_close_patching(sentry_init): + sentry_init(integrations=[AsyncioIntegration()]) + + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + try: + with patch("asyncio.get_running_loop", return_value=loop): + assert not hasattr(loop, "_sentry_flush_patched") + AsyncioIntegration.setup_once() + assert hasattr(loop, "_sentry_flush_patched") + assert loop._sentry_flush_patched is True + + finally: + if not loop.is_closed(): + loop.close() + + +@minimum_python_38 +def test_loop_close_flushes_async_transport(sentry_init): + from sentry_sdk.transport import AsyncHttpTransport + from unittest.mock import Mock, AsyncMock + + sentry_init(integrations=[AsyncioIntegration()]) + + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + try: + with patch("asyncio.get_running_loop", return_value=loop): + AsyncioIntegration.setup_once() + + mock_client = Mock() + mock_transport = Mock(spec=AsyncHttpTransport) + mock_client.transport = mock_transport + mock_client.close = AsyncMock(return_value=None) + + with patch("sentry_sdk.get_client", return_value=mock_client): + loop.close() + + mock_client.close.assert_called_once() + mock_client.close.assert_awaited_once() + + except Exception: + if not loop.is_closed(): + loop.close() From 8a5ab067d7f5500dfc02ffdfa64c612e92f821bd Mon Sep 17 00:00:00 2001 From: srothh Date: Thu, 31 Jul 2025 13:50:32 +0200 Subject: [PATCH 101/102] ref(client): Split client flush into seperate function for readability GH-4601 --- sentry_sdk/client.py | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index df1f719470..e09316f3ee 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -995,9 +995,6 @@ def flush( # type: ignore[override] :param callback: Is invoked with the number of pending events and the configured timeout. """ if self.transport is not None: - if timeout is None: - timeout = self.options["shutdown_timeout"] - if isinstance(self.transport, AsyncHttpTransport) and hasattr( self.transport, "loop" ): @@ -1009,27 +1006,37 @@ def flush( # type: ignore[override] logger.warning("Event loop not running, aborting flush.") return None else: - self.session_flusher.flush() + self._flush_sync(timeout, callback) + return None - if self.log_batcher is not None: - self.log_batcher.flush() + def _flush_sync( + self, timeout: Optional[float], callback: Optional[Callable[[int, float], None]] + ) -> None: + """Synchronous flush implementation.""" + if timeout is None: + timeout = self.options["shutdown_timeout"] - self.transport.flush(timeout=timeout, callback=callback) - return None + self._flush_components() + if self.transport is not None: + self.transport.flush(timeout=timeout, callback=callback) async def _flush_async( self, timeout: Optional[float], callback: Optional[Callable[[int, float], None]] ) -> None: - + """Asynchronous flush implementation.""" if timeout is None: timeout = self.options["shutdown_timeout"] + self._flush_components() + if self.transport is not None: + flush_task = self.transport.flush(timeout=timeout, callback=callback) # type: ignore + if flush_task is not None: + await flush_task + + def _flush_components(self) -> None: self.session_flusher.flush() if self.log_batcher is not None: self.log_batcher.flush() - flush_task = self.transport.flush(timeout=timeout, callback=callback) # type: ignore - if flush_task is not None: - await flush_task def __enter__(self) -> _Client: return self From 295a0e9ef377b49ca31a31f1169933eb47341b91 Mon Sep 17 00:00:00 2001 From: srothh Date: Thu, 31 Jul 2025 14:01:15 +0200 Subject: [PATCH 102/102] ref(transport): Refactor async transport to be more aligned with sync GH-4582 --- sentry_sdk/transport.py | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 3f28b38d19..7f89b01849 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -620,15 +620,14 @@ def _get_header_value(self: Self, response: Any, header: str) -> Optional[str]: async def _send_envelope(self: Self, envelope: Envelope) -> None: _prepared_envelope = self._prepare_envelope(envelope) - if _prepared_envelope is None: - return None - envelope, body, headers = _prepared_envelope - await self._send_request( - body.getvalue(), - headers=headers, - endpoint_type=EndpointType.ENVELOPE, - envelope=envelope, - ) + if _prepared_envelope is not None: + envelope, body, headers = _prepared_envelope + await self._send_request( + body.getvalue(), + headers=headers, + endpoint_type=EndpointType.ENVELOPE, + envelope=envelope, + ) return None async def _send_request( @@ -676,7 +675,7 @@ async def _request( # type: ignore[override] }, ) - def _flush_client_reports(self: Self, force: bool = False) -> None: + async def _flush_client_reports(self: Self, force: bool = False) -> None: client_report = self._fetch_pending_client_report(force=force, interval=60) if client_report is not None: self.capture_envelope(Envelope(items=[client_report])) @@ -685,7 +684,7 @@ async def _capture_envelope(self: Self, envelope: Envelope) -> None: async def send_envelope_wrapper() -> None: with capture_internal_exceptions(): await self._send_envelope(envelope) - self._flush_client_reports() + await self._flush_client_reports() if not self._worker.submit(send_envelope_wrapper): self.on_dropped_event("full_queue") @@ -711,9 +710,9 @@ def capture_envelope(self: Self, envelope: Envelope) -> None: else: # The event loop is no longer running logger.warning("Async Transport is not running in an event loop.") - self.on_dropped_event("no_async_context") + self.on_dropped_event("internal_sdk_error") for item in envelope.items: - self.record_lost_event("no_async_context", item=item) + self.record_lost_event("internal_sdk_error", item=item) def flush( # type: ignore[override] self: Self,