- 
                Notifications
    
You must be signed in to change notification settings  - Fork 4
 
Add support for partial response messages #101
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: dev
Are you sure you want to change the base?
Changes from all commits
0c2245c
              34e2a84
              162510a
              7b3d4d8
              d1a8ec9
              File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | 
|---|---|---|
| 
          
            
          
           | 
    @@ -29,6 +29,8 @@ | |
| import uuid | ||
| 
     | 
||
| from threading import Event | ||
| from typing import Callable, Optional | ||
| 
     | 
||
| from pika.channel import Channel | ||
| from pika.exceptions import ProbableAccessDeniedError, StreamLostError | ||
| from neon_mq_connector.connector import MQConnector | ||
| 
          
            
          
           | 
    @@ -60,16 +62,20 @@ def __init__(self, config: dict, service_name: str, vhost: str): | |
| 
     | 
||
| def send_mq_request(vhost: str, request_data: dict, target_queue: str, | ||
| response_queue: str = None, timeout: int = 30, | ||
| expect_response: bool = True) -> dict: | ||
| expect_response: bool = True, | ||
| stream_callback: Optional[Callable[[dict], None]] = None) -> dict: | ||
| """ | ||
| Sends a request to the MQ server and returns the response. | ||
| :param vhost: vhost to target | ||
| :param request_data: data to post to target_queue | ||
| :param target_queue: queue to post request to | ||
| :param response_queue: optional queue to monitor for a response. | ||
| Generally should be blank | ||
| :param timeout: time in seconds to wait for a response before timing out | ||
| :param timeout: time in seconds to wait for a complete response before | ||
| timing out. Note that in the event of a timeout, a partial response may | ||
| have been handled by `stream_callback`. | ||
| :param expect_response: boolean indicating whether a response is expected | ||
| :param stream_callback: Optional function to pass partial responses to | ||
| :return: response to request | ||
| """ | ||
| response_queue = response_queue or uuid.uuid4().hex | ||
| 
        
          
        
         | 
    @@ -94,22 +100,31 @@ def handle_mq_response(channel: Channel, method, _, body): | |
| """ | ||
| api_output = b64_to_dict(body) | ||
| 
     | 
||
| # The Messagebus connector generates a unique `message_id` for each | ||
| # response message. Check context for the original one; otherwise, | ||
| # check in output directly as some APIs emit responses without a unique | ||
| # message_id | ||
| # Backwards-compat. handles `context` in response for raw `Message` | ||
| # objects sent across the MQ bus | ||
| api_output_msg_id = \ | ||
| api_output.get('context', | ||
| api_output).get('mq', api_output).get('message_id') | ||
| # TODO: One of these specs should be deprecated | ||
| if api_output_msg_id != api_output.get('message_id'): | ||
| LOG.debug(f"Handling message_id from response context") | ||
| # TODO: `context.mq` handling should be deprecated | ||
| LOG.warning(f"Handling message_id from response context") | ||
| if api_output_msg_id == message_id: | ||
| LOG.debug(f'MQ output: {api_output}') | ||
| channel.basic_ack(delivery_tag=method.delivery_tag) | ||
| channel.close() | ||
| response_data.update(api_output) | ||
| response_event.set() | ||
| if isinstance(api_output.get('_part'), int): | ||
| 
         There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Maybe we could try to achieve this behaviour using Streams? https://www.rabbitmq.com/docs/streams There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I looked into streams briefly and it seemed to me they were more oriented at sending either larger quantities of data or broadcasting to multiple recipients based on RMQ documented use cases. I opted for this method because it appears to be simpler to implement with fewer changes to our client helpers.  | 
||
| # TODO: Consider forcing these to be passed to `stream_callback` synchronously | ||
| # Handle multi-part responses | ||
| if stream_callback: | ||
| # Pass each part to the stream callback method if defined | ||
| stream_callback(api_output) | ||
| if api_output.get('_is_final'): | ||
| # Always return final result | ||
| response_data.update(api_output) | ||
| else: | ||
| response_data.update(api_output) | ||
| 
         There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Does it mean that the only way to send objects is not chunk by chunk but only solid object but with partial additions to it's content? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. That is the intended use case as documented in the README changes. I implemented the stream callback as optional, so  Chunked responses would require specifying a standard method for combining them into a single complete response which is also valid, but would require some additional information in responses, i.e.: 
  | 
||
| if api_output.get('_is_final', True): | ||
| channel.close() | ||
| response_event.set() | ||
| else: | ||
| channel.basic_nack(delivery_tag=method.delivery_tag) | ||
| LOG.debug(f"Ignoring {api_output_msg_id} waiting for {message_id}") | ||
| 
          
            
          
           | 
    ||
| Original file line number | Diff line number | Diff line change | 
|---|---|---|
| 
          
            
          
           | 
    @@ -109,6 +109,28 @@ def respond(channel, method, _, body): | |
| properties=pika.BasicProperties(expiration='1000')) | ||
| channel.basic_ack(delivery_tag=method.delivery_tag) | ||
| 
     | 
||
| @staticmethod | ||
| def respond_multiple(channel, method, _, body): | ||
| request = b64_to_dict(body) | ||
| num_parts = request.get("num_parts", 3) | ||
| base_response = {"message_id": request["message_id"], | ||
| "success": True, | ||
| "request_data": request["data"]} | ||
| reply_channel = request.get("routing_key") | ||
| channel.queue_declare(queue=reply_channel) | ||
| response_text = "" | ||
| for i in range(num_parts): | ||
| response_text += f" {i}" | ||
| response = {**base_response, **{"response": response_text, | ||
| "_part": i, | ||
| "_is_final": i == num_parts - 1}} | ||
| channel.basic_publish(exchange='', | ||
| routing_key=reply_channel, | ||
| body=dict_to_b64(response), | ||
| properties=pika.BasicProperties(expiration='1000')) | ||
| time.sleep(0.5) # Used to ensure synchronous response handling | ||
| 
         There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is currently required because nothing accounts for responses arriving out of order There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is very bad, if rabbitMQ do not guarantee order of delivery, sleep isn't a reliable solution There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is how I specified it in documentation, each response should be cumulative, rather than incremental (response 2 includes all of response 1 and more). The one with the  Each message does have an index, so we could enforce on the client side that the callback receives the messages in order..  | 
||
| channel.basic_ack(delivery_tag=method.delivery_tag) | ||
| 
     | 
||
| 
     | 
||
| @pytest.mark.usefixtures("rmq_instance") | ||
| class TestClientUtils(unittest.TestCase): | ||
| 
        
          
        
         | 
    @@ -132,6 +154,11 @@ def setUp(self) -> None: | |
| INPUT_CHANNEL, | ||
| self.test_connector.respond, | ||
| auto_ack=False) | ||
| self.test_connector.register_consumer("neon_utils_test_multi", | ||
| vhost, | ||
| f"{INPUT_CHANNEL}-multi", | ||
| self.test_connector.respond_multiple, | ||
| auto_ack=False) | ||
| self.test_connector.run_consumers() | ||
| 
     | 
||
| @classmethod | ||
| 
        
          
        
         | 
    @@ -156,6 +183,27 @@ def test_send_mq_request_spec_output_channel_valid(self): | |
| self.assertTrue(response["success"]) | ||
| self.assertEqual(response["request_data"], request["data"]) | ||
| 
     | 
||
| def test_multi_part_mq_response(self): | ||
| from neon_mq_connector.utils.client_utils import send_mq_request | ||
| request = {"data": time.time(), | ||
| "num_parts": 5} | ||
| target_queue = f"{INPUT_CHANNEL}-multi" | ||
| stream_callback = Mock() | ||
| response = send_mq_request("/neon_testing", request, target_queue, | ||
| stream_callback=stream_callback) | ||
| 
     | 
||
| self.assertEqual(stream_callback.call_count, request['num_parts'], | ||
| stream_callback.call_args_list) | ||
| 
     | 
||
| self.assertIsInstance(response, dict, response) | ||
| self.assertTrue(response.get("success"), response) | ||
| self.assertEqual(response["request_data"], request["data"]) | ||
| self.assertEqual(len(response['response'].split()), request['num_parts']) | ||
| self.assertTrue(response['_is_final']) | ||
| 
     | 
||
| # Last callback is the same as the standard response | ||
| self.assertEqual(response, stream_callback.call_args[0][0]) | ||
| 
     | 
||
| def test_multiple_mq_requests(self): | ||
| from neon_mq_connector.utils.client_utils import send_mq_request | ||
| responses = dict() | ||
| 
          
            
          
           | 
    ||
Uh oh!
There was an error while loading. Please reload this page.