diff --git a/CHANGELOG.rst b/CHANGELOG.rst index efb07a459e..9fa18fa030 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -126,6 +126,12 @@ Added * Cherry-pick changes to runners.sh from st2-packages git repo. #6302 Cherry-picked by @cognifloyd +Added +~~~~~ +* add a schema to shell and winrm runners to allow validations to be executed againt their payloads. This means that shell or winrm jobs + that return a json object will be parsed into a dict to allow for field validations to be performed. Non json string resonses are still + handled as normal #6102 + 3.8.1 - December 13, 2023 ------------------------- Fixed diff --git a/Makefile b/Makefile index 84679bceda..382ef9220d 100644 --- a/Makefile +++ b/Makefile @@ -377,7 +377,7 @@ black: requirements .black-check echo "==========================================================="; \ echo "Running black on" $$component; \ echo "==========================================================="; \ - . $(VIRTUALENV_DIR)/bin/activate ; black --check --config pyproject.toml $$component/ || exit 1; \ + . $(VIRTUALENV_DIR)/bin/activate ; black --diff --check --config pyproject.toml $$component/ || exit 1; \ if [ -d "$$component/bin" ]; then \ . $(VIRTUALENV_DIR)/bin/activate ; black $$(grep -rl '^#!/.*python' $$component/bin) || exit 1; \ fi \ @@ -387,7 +387,7 @@ black: requirements .black-check echo "==========================================================="; \ echo "Running black on" $$component; \ echo "==========================================================="; \ - . $(VIRTUALENV_DIR)/bin/activate ; black --check --config pyproject.toml $$component/ || exit 1; \ + . $(VIRTUALENV_DIR)/bin/activate ; black --diff --check --config pyproject.toml $$component/ || exit 1; \ if [ -d "$$component/bin" ]; then \ . $(VIRTUALENV_DIR)/bin/activate ; black $$(grep -rl '^#!/.*python' $$component/bin) || exit 1; \ fi \ diff --git a/contrib/runners/local_runner/local_runner/runner.yaml b/contrib/runners/local_runner/local_runner/runner.yaml index 7463f46943..f6d97b835b 100644 --- a/contrib/runners/local_runner/local_runner/runner.yaml +++ b/contrib/runners/local_runner/local_runner/runner.yaml @@ -33,6 +33,27 @@ description: Action timeout in seconds. Action will get killed if it doesn't finish in timeout seconds. type: integer + output_key: stdout + output_schema: + type: object + properties: + succeeded: + type: boolean + failed: + type: boolean + stdout: + anyOf: + - type: "object" + - type: "string" + - type: "integer" + - type: "number" + - type: "boolean" + - type: "array" + - type: "null" + stderr: + type: string + return_code: + type: integer - description: A runner to execute local actions as a fixed user. enabled: true name: local-shell-script @@ -74,3 +95,24 @@ description: Action timeout in seconds. Action will get killed if it doesn't finish in timeout seconds. type: integer + output_key: stdout + output_schema: + type: object + properties: + succeeded: + type: boolean + failed: + type: boolean + stdout: + anyOf: + - type: "object" + - type: "string" + - type: "integer" + - type: "number" + - type: "boolean" + - type: "array" + - type: "null" + stderr: + type: string + return_code: + type: integer \ No newline at end of file diff --git a/contrib/runners/orquesta_runner/tests/unit/test_rerun.py b/contrib/runners/orquesta_runner/tests/unit/test_rerun.py index 420b909e27..261f54812a 100644 --- a/contrib/runners/orquesta_runner/tests/unit/test_rerun.py +++ b/contrib/runners/orquesta_runner/tests/unit/test_rerun.py @@ -57,7 +57,7 @@ ) RUNNER_RESULT_SUCCEEDED = ( action_constants.LIVEACTION_STATUS_SUCCEEDED, - {"stdout": "foobar"}, + {"stdout": "foobar", "succeeded": True, "failed": False, "stderr": ""}, {}, ) diff --git a/contrib/runners/orquesta_runner/tests/unit/test_with_items.py b/contrib/runners/orquesta_runner/tests/unit/test_with_items.py index 8e8b67bd94..7a1d1ec549 100644 --- a/contrib/runners/orquesta_runner/tests/unit/test_with_items.py +++ b/contrib/runners/orquesta_runner/tests/unit/test_with_items.py @@ -56,13 +56,13 @@ RUNNER_RESULT_RUNNING = ( action_constants.LIVEACTION_STATUS_RUNNING, - {"stdout": "..."}, + {"stdout": "...", "stderr": ""}, {}, ) RUNNER_RESULT_SUCCEEDED = ( action_constants.LIVEACTION_STATUS_SUCCEEDED, - {"stdout": "..."}, + {"stdout": "...", "stderr": "", "succeeded": True, "failed": False}, {}, ) diff --git a/contrib/runners/remote_runner/remote_runner/runner.yaml b/contrib/runners/remote_runner/remote_runner/runner.yaml index 138973e218..414a5b3582 100644 --- a/contrib/runners/remote_runner/remote_runner/runner.yaml +++ b/contrib/runners/remote_runner/remote_runner/runner.yaml @@ -82,6 +82,27 @@ config is used. required: false type: string + output_key: stdout + output_schema: + type: object + properties: + succeeded: + type: boolean + failed: + type: boolean + stdout: + anyOf: + - type: "object" + - type: "string" + - type: "integer" + - type: "number" + - type: "boolean" + - type: "array" + - type: "null" + stderr: + type: string + return_code: + type: integer - description: A remote execution runner that executes actions as a fixed system user. enabled: true name: remote-shell-script @@ -162,3 +183,24 @@ config is used. required: false type: string + output_key: stdout + output_schema: + type: object + properties: + succeeded: + type: boolean + failed: + type: boolean + stdout: + anyOf: + - type: "object" + - type: "string" + - type: "integer" + - type: "number" + - type: "boolean" + - type: "array" + - type: "null" + stderr: + type: string + return_code: + type: integer \ No newline at end of file diff --git a/contrib/runners/winrm_runner/winrm_runner/runner.yaml b/contrib/runners/winrm_runner/winrm_runner/runner.yaml index 88a938c37f..6e5a9b94e4 100644 --- a/contrib/runners/winrm_runner/winrm_runner/runner.yaml +++ b/contrib/runners/winrm_runner/winrm_runner/runner.yaml @@ -66,6 +66,27 @@ CA bundle which comes from Mozilla. Verification using a custom CA bundle is not yet supported. Set to False to skip verification. type: boolean + output_key: stdout + output_schema: + type: object + properties: + succeeded: + type: boolean + failed: + type: boolean + stdout: + anyOf: + - type: "object" + - type: "string" + - type: "integer" + - type: "number" + - type: "boolean" + - type: "array" + - type: "null" + stderr: + type: string + return_code: + type: integer - description: A remote execution runner that executes PowerShell commands via WinRM on a remote host enabled: true name: winrm-ps-cmd @@ -134,6 +155,27 @@ CA bundle which comes from Mozilla. Verification using a custom CA bundle is not yet supported. Set to False to skip verification. type: boolean + output_key: stdout + output_schema: + type: object + properties: + succeeded: + type: boolean + failed: + type: boolean + stdout: + anyOf: + - type: "object" + - type: "string" + - type: "integer" + - type: "number" + - type: "boolean" + - type: "array" + - type: "null" + stderr: + type: string + return_code: + type: integer - description: A remote execution runner that executes PowerShell script via WinRM on a set of remote hosts enabled: true name: winrm-ps-script @@ -199,3 +241,24 @@ CA bundle which comes from Mozilla. Verification using a custom CA bundle is not yet supported. Set to False to skip verification. type: boolean + output_key: stdout + output_schema: + type: object + properties: + succeeded: + type: boolean + failed: + type: boolean + stdout: + anyOf: + - type: "object" + - type: "string" + - type: "integer" + - type: "number" + - type: "boolean" + - type: "array" + - type: "null" + stderr: + type: string + return_code: + type: integer \ No newline at end of file diff --git a/st2actions/tests/unit/test_policies.py b/st2actions/tests/unit/test_policies.py index 09a401e6b0..dedc285207 100644 --- a/st2actions/tests/unit/test_policies.py +++ b/st2actions/tests/unit/test_policies.py @@ -131,6 +131,43 @@ def test_apply(self): FakeConcurrencyApplicator.apply_after.assert_called_once_with(liveaction) RaiseExceptionApplicator.apply_after.assert_called_once_with(liveaction) + @mock.patch.object( + FakeConcurrencyApplicator, + "apply_before", + mock.MagicMock( + side_effect=FakeConcurrencyApplicator(None, None, threshold=3).apply_before + ), + ) + @mock.patch.object( + RaiseExceptionApplicator, + "apply_before", + mock.MagicMock(side_effect=RaiseExceptionApplicator(None, None).apply_before), + ) + @mock.patch.object( + FakeConcurrencyApplicator, + "apply_after", + mock.MagicMock( + side_effect=FakeConcurrencyApplicator(None, None, threshold=3).apply_after + ), + ) + @mock.patch.object( + RaiseExceptionApplicator, + "apply_after", + mock.MagicMock(side_effect=RaiseExceptionApplicator(None, None).apply_after), + ) + def test_apply_with_dict(self): + liveaction = LiveActionDB( + action="wolfpack.action-1", parameters={"actionstr": "dict_resp"} + ) + liveaction, _ = action_service.request(liveaction) + liveaction = self._wait_on_status( + liveaction, action_constants.LIVEACTION_STATUS_SUCCEEDED + ) + FakeConcurrencyApplicator.apply_before.assert_called_once_with(liveaction) + RaiseExceptionApplicator.apply_before.assert_called_once_with(liveaction) + FakeConcurrencyApplicator.apply_after.assert_called_once_with(liveaction) + RaiseExceptionApplicator.apply_after.assert_called_once_with(liveaction) + @mock.patch.object( FakeConcurrencyApplicator, "get_threshold", mock.MagicMock(return_value=0) ) diff --git a/st2api/tests/unit/controllers/v1/test_executions.py b/st2api/tests/unit/controllers/v1/test_executions.py index ea5e45b6fe..56595152fb 100644 --- a/st2api/tests/unit/controllers/v1/test_executions.py +++ b/st2api/tests/unit/controllers/v1/test_executions.py @@ -453,7 +453,12 @@ def test_get_one_max_result_size_query_parameter(self): # Update it with the result (this populates result and result size attributes) data = { - "result": {"fooo": "a" * 1000}, + "result": { + "stdout": {"fooo": "a" * 1000}, + "succeeded": True, + "failed": False, + "stderr": "", + }, "status": "succeeded", } actual_result_size = len(json_encode(data["result"])) @@ -1289,16 +1294,30 @@ def test_put_status_and_result(self): self.assertEqual(post_resp.status_int, 201) execution_id = self._get_actionexecution_id(post_resp) - updates = {"status": "succeeded", "result": {"stdout": "foobar"}} + updates = { + "status": "succeeded", + "result": { + "stdout": "foobar", + "succeeded": True, + "failed": False, + "stderr": "", + }, + } put_resp = self._do_put(execution_id, updates) self.assertEqual(put_resp.status_int, 200) self.assertEqual(put_resp.json["status"], "succeeded") - self.assertDictEqual(put_resp.json["result"], {"stdout": "foobar"}) + self.assertDictEqual( + put_resp.json["result"], + {"stdout": "foobar", "succeeded": True, "failed": False, "stderr": ""}, + ) get_resp = self._do_get_one(execution_id) self.assertEqual(get_resp.status_int, 200) self.assertEqual(get_resp.json["status"], "succeeded") - self.assertDictEqual(get_resp.json["result"], {"stdout": "foobar"}) + self.assertDictEqual( + get_resp.json["result"], + {"stdout": "foobar", "succeeded": True, "failed": False, "stderr": ""}, + ) def test_put_bad_state(self): post_resp = self._do_post(LIVE_ACTION_1) @@ -1337,11 +1356,22 @@ def test_put_status_to_completed_execution(self): self.assertEqual(post_resp.status_int, 201) execution_id = self._get_actionexecution_id(post_resp) - updates = {"status": "succeeded", "result": {"stdout": "foobar"}} + updates = { + "status": "succeeded", + "result": { + "stdout": "foobar", + "succeeded": True, + "failed": False, + "stderr": "", + }, + } put_resp = self._do_put(execution_id, updates) self.assertEqual(put_resp.status_int, 200) self.assertEqual(put_resp.json["status"], "succeeded") - self.assertDictEqual(put_resp.json["result"], {"stdout": "foobar"}) + self.assertDictEqual( + put_resp.json["result"], + {"stdout": "foobar", "succeeded": True, "failed": False, "stderr": ""}, + ) updates = {"status": "abandoned"} put_resp = self._do_put(execution_id, updates, expect_errors=True) @@ -1353,7 +1383,15 @@ def test_put_execution_missing_liveaction(self): self.assertEqual(post_resp.status_int, 201) execution_id = self._get_actionexecution_id(post_resp) - updates = {"status": "succeeded", "result": {"stdout": "foobar"}} + updates = { + "status": "succeeded", + "result": { + "stdout": "foobar", + "succeeded": True, + "failed": False, + "stderr": "", + }, + } put_resp = self._do_put(execution_id, updates, expect_errors=True) self.assertEqual(put_resp.status_int, 500) @@ -1620,22 +1658,39 @@ def test_get_raw_result(self): execution_id = self._get_actionexecution_id(get_resp) updates = { "status": "succeeded", - "result": {"stdout": "foobar", "stderr": "barfoo"}, + "result": { + "stdout": "foobar", + "stderr": "barfoo", + "succeeded": True, + "failed": False, + }, } put_resp = self._do_put(execution_id, updates) self.assertEqual(put_resp.status_int, 200) self.assertEqual(put_resp.json["status"], "succeeded") self.assertDictEqual( - put_resp.json["result"], {"stdout": "foobar", "stderr": "barfoo"} + put_resp.json["result"], + { + "stdout": "foobar", + "stderr": "barfoo", + "succeeded": True, + "failed": False, + }, ) self.assertEqual( - put_resp.json["result_size"], len('{"stdout":"foobar","stderr":"barfoo"}') + put_resp.json["result_size"], + len( + '{"stdout":"foobar","stderr":"barfoo","succeeded":true,"failed":false}' + ), ) # 1. download=False, compress=False, pretty_format=False get_resp = self.app.get("/v1/executions/%s/result" % (execution_id)) self.assertEqual(get_resp.headers["Content-Type"], "text/json") - self.assertEqual(get_resp.body, b'{"stdout":"foobar","stderr":"barfoo"}') + self.assertEqual( + get_resp.body, + b'{"stdout":"foobar","stderr":"barfoo","succeeded":true,"failed":false}', + ) # 2. download=False, compress=False, pretty_format=True get_resp = self.app.get( @@ -1644,7 +1699,9 @@ def test_get_raw_result(self): expected_result = b""" { "stdout": "foobar", - "stderr": "barfoo" + "stderr": "barfoo", + "succeeded": true, + "failed": false }""".strip() self.assertEqual(get_resp.headers["Content-Type"], "text/json") self.assertEqual(get_resp.body, expected_result) @@ -1653,7 +1710,10 @@ def test_get_raw_result(self): # NOTE: webtest auto decompresses the result get_resp = self.app.get("/v1/executions/%s/result?compress=1" % (execution_id)) self.assertEqual(get_resp.headers["Content-Type"], "application/x-gzip") - self.assertEqual(get_resp.body, b'{"stdout":"foobar","stderr":"barfoo"}') + self.assertEqual( + get_resp.body, + b'{"stdout":"foobar","stderr":"barfoo","succeeded":true,"failed":false}', + ) # 4. download=True, compress=False, pretty_format=False get_resp = self.app.get("/v1/executions/%s/result?download=1" % (execution_id)) @@ -1662,7 +1722,10 @@ def test_get_raw_result(self): get_resp.headers["Content-Disposition"], "attachment; filename=execution_%s_result.json" % (execution_id), ) - self.assertEqual(get_resp.body, b'{"stdout":"foobar","stderr":"barfoo"}') + self.assertEqual( + get_resp.body, + b'{"stdout":"foobar","stderr":"barfoo","succeeded":true,"failed":false}', + ) # 5. download=True, compress=False, pretty_format=True get_resp = self.app.get( @@ -1671,7 +1734,9 @@ def test_get_raw_result(self): expected_result = b""" { "stdout": "foobar", - "stderr": "barfoo" + "stderr": "barfoo", + "succeeded": true, + "failed": false }""".strip() self.assertEqual(get_resp.headers["Content-Type"], "text/json") @@ -1689,7 +1754,9 @@ def test_get_raw_result(self): expected_result = b""" { "stdout": "foobar", - "stderr": "barfoo" + "stderr": "barfoo", + "succeeded": true, + "failed": false }""".strip() self.assertEqual(get_resp.headers["Content-Type"], "application/x-gzip") @@ -1782,7 +1849,13 @@ def test_get_single_attribute_success(self): data = {} data["status"] = action_constants.LIVEACTION_STATUS_SUCCEEDED - data["result"] = {"foo": "bar"} + data["result"] = { + "foo": "bar", + "stdout": "hello world", + "stderr": "", + "succeeded": True, + "failed": False, + } resp = self.app.put_json("/v1/executions/%s" % (exec_id), data) self.assertEqual(resp.status_int, 200) diff --git a/st2common/st2common/util/schema/__init__.py b/st2common/st2common/util/schema/__init__.py index f6b4103b30..2b8f118f33 100644 --- a/st2common/st2common/util/schema/__init__.py +++ b/st2common/st2common/util/schema/__init__.py @@ -419,9 +419,16 @@ def validate( :type use_default: ``bool`` """ instance = fast_deepcopy_dict(instance) - schema_type = schema.get("type", None) + schema_type = None + if isinstance(schema, dict): + schema_type = schema.get("type", None) + instance_is_dict = isinstance(instance, dict) + if isinstance(schema, dict) and ("type" in schema or "$schema" in schema): + jsonschema.validate(instance=instance, schema=schema, *args, **kwargs) + return instance + if use_default and allow_default_none: schema = modify_schema_allow_default_none(schema=schema) diff --git a/st2common/tests/unit/services/test_workflow_rerun.py b/st2common/tests/unit/services/test_workflow_rerun.py index c91e140d6c..0aa5cbaad5 100644 --- a/st2common/tests/unit/services/test_workflow_rerun.py +++ b/st2common/tests/unit/services/test_workflow_rerun.py @@ -51,7 +51,7 @@ RUNNER_RESULT_FAILED = (action_constants.LIVEACTION_STATUS_FAILED, {}, {}) RUNNER_RESULT_SUCCEEDED = ( action_constants.LIVEACTION_STATUS_SUCCEEDED, - {"stdout": "foobar"}, + {"stdout": "foobar", "succeeded": True, "failed": False, "stderr": ""}, {}, ) diff --git a/st2tests/st2tests/mocks/runners/runner.py b/st2tests/st2tests/mocks/runners/runner.py index b89b75b712..0a6603c537 100644 --- a/st2tests/st2tests/mocks/runners/runner.py +++ b/st2tests/st2tests/mocks/runners/runner.py @@ -29,7 +29,6 @@ def get_runner(config=None): class MockActionRunner(ActionRunner): def __init__(self): super(MockActionRunner, self).__init__(runner_id="1") - self.pre_run_called = False self.run_called = False self.post_run_called = False @@ -45,7 +44,17 @@ def run(self, action_params): if self.runner_parameters.get("raise", False): raise Exception("Raise required.") - default_result = {"ran": True, "action_params": action_params} + default_result = { + "ran": True, + "action_params": action_params, + "failed": False, + "stdout": "res", + "stderr": "", + "succeeded": True, + } + if action_params.get("actionstr", "") == "dict_resp": + default_result["stdout"] = {"key": "value", "key2": {"sk1": "v1"}} + default_context = {"third_party_system": {"ref_id": "1234"}} status = self.runner_parameters.get("mock_status", LIVEACTION_STATUS_SUCCEEDED)