diff --git a/backend/.sqlx/query-04ce5c530c80ae6f911dfe0dc9ed7d1a2e10342bbbc7f8486df0b73f5657a493.json b/backend/.sqlx/query-04ce5c530c80ae6f911dfe0dc9ed7d1a2e10342bbbc7f8486df0b73f5657a493.json new file mode 100644 index 0000000000000..2878e549203b4 --- /dev/null +++ b/backend/.sqlx/query-04ce5c530c80ae6f911dfe0dc9ed7d1a2e10342bbbc7f8486df0b73f5657a493.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*) FROM v2_job", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "04ce5c530c80ae6f911dfe0dc9ed7d1a2e10342bbbc7f8486df0b73f5657a493" +} diff --git a/backend/.sqlx/query-08e5832c4002a0970d4105bb80371dcc58cb037a59afe08cd1372b51791b4165.json b/backend/.sqlx/query-08e5832c4002a0970d4105bb80371dcc58cb037a59afe08cd1372b51791b4165.json new file mode 100644 index 0000000000000..2fdaa1a63d04a --- /dev/null +++ b/backend/.sqlx/query-08e5832c4002a0970d4105bb80371dcc58cb037a59afe08cd1372b51791b4165.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*) FROM v2_job_completed", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "08e5832c4002a0970d4105bb80371dcc58cb037a59afe08cd1372b51791b4165" +} diff --git a/backend/.sqlx/query-1c047fef05e8cfc07aef7ea9a5454334eb7375b483e8ffd80364c60c3aad04b4.json b/backend/.sqlx/query-1c047fef05e8cfc07aef7ea9a5454334eb7375b483e8ffd80364c60c3aad04b4.json new file mode 100644 index 0000000000000..7162b07befa79 --- /dev/null +++ b/backend/.sqlx/query-1c047fef05e8cfc07aef7ea9a5454334eb7375b483e8ffd80364c60c3aad04b4.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*) FROM app_version WHERE app_id = '2'", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "1c047fef05e8cfc07aef7ea9a5454334eb7375b483e8ffd80364c60c3aad04b4" +} diff --git a/backend/.sqlx/query-1c739fddea33331fdb5acbfe614dc18a4db630616b3aa80c27b54e4bb4e20f30.json b/backend/.sqlx/query-1c739fddea33331fdb5acbfe614dc18a4db630616b3aa80c27b54e4bb4e20f30.json new file mode 100644 index 0000000000000..3d444ae09b423 --- /dev/null +++ b/backend/.sqlx/query-1c739fddea33331fdb5acbfe614dc18a4db630616b3aa80c27b54e4bb4e20f30.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT runnable_id FROM v2_job ORDER BY created_at DESC", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "runnable_id", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true + ] + }, + "hash": "1c739fddea33331fdb5acbfe614dc18a4db630616b3aa80c27b54e4bb4e20f30" +} diff --git a/backend/.sqlx/query-3528fbb71569b9bfd2a66d0692f9f0d1ecabb750d3d508f5c6a5cb3eaf3cd209.json b/backend/.sqlx/query-3528fbb71569b9bfd2a66d0692f9f0d1ecabb750d3d508f5c6a5cb3eaf3cd209.json new file mode 100644 index 0000000000000..fbeff75f564e7 --- /dev/null +++ b/backend/.sqlx/query-3528fbb71569b9bfd2a66d0692f9f0d1ecabb750d3d508f5c6a5cb3eaf3cd209.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT versions FROM flow WHERE path = 'f/dre/flow'", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "versions", + "type_info": "Int8Array" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "3528fbb71569b9bfd2a66d0692f9f0d1ecabb750d3d508f5c6a5cb3eaf3cd209" +} diff --git a/backend/.sqlx/query-3af3c1080cde18cfbeb08e290f63b465b4bbf83bc86c0efacd6133bb432ee13d.json b/backend/.sqlx/query-3af3c1080cde18cfbeb08e290f63b465b4bbf83bc86c0efacd6133bb432ee13d.json new file mode 100644 index 0000000000000..623fab4416333 --- /dev/null +++ b/backend/.sqlx/query-3af3c1080cde18cfbeb08e290f63b465b4bbf83bc86c0efacd6133bb432ee13d.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT (scheduled_for - created_at) FROM v2_job_queue WHERE running = false", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Interval" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "3af3c1080cde18cfbeb08e290f63b465b4bbf83bc86c0efacd6133bb432ee13d" +} diff --git a/backend/.sqlx/query-421c7e0388326889b33fae4c8fa2fd7cb4a7be6dc50c9f7507eaa15f138484dc.json b/backend/.sqlx/query-421c7e0388326889b33fae4c8fa2fd7cb4a7be6dc50c9f7507eaa15f138484dc.json new file mode 100644 index 0000000000000..8d828b265add2 --- /dev/null +++ b/backend/.sqlx/query-421c7e0388326889b33fae4c8fa2fd7cb4a7be6dc50c9f7507eaa15f138484dc.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*) FROM flow_version WHERE path = 'f/dre/flow'", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "421c7e0388326889b33fae4c8fa2fd7cb4a7be6dc50c9f7507eaa15f138484dc" +} diff --git a/backend/.sqlx/query-44d3e7dce67967471fa638f2beedf8567579218d5c4332b01e169732c836ebdc.json b/backend/.sqlx/query-44d3e7dce67967471fa638f2beedf8567579218d5c4332b01e169732c836ebdc.json new file mode 100644 index 0000000000000..41c08565b0d83 --- /dev/null +++ b/backend/.sqlx/query-44d3e7dce67967471fa638f2beedf8567579218d5c4332b01e169732c836ebdc.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT hash FROM script WHERE path = 'f/dre_script/script' AND archived = false", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "hash", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "44d3e7dce67967471fa638f2beedf8567579218d5c4332b01e169732c836ebdc" +} diff --git a/backend/.sqlx/query-47a55edc0f5c54ac9f5e48665c8dafc044d1861048857ec2fc70f929ab358373.json b/backend/.sqlx/query-47a55edc0f5c54ac9f5e48665c8dafc044d1861048857ec2fc70f929ab358373.json new file mode 100644 index 0000000000000..7876a6f7f02f4 --- /dev/null +++ b/backend/.sqlx/query-47a55edc0f5c54ac9f5e48665c8dafc044d1861048857ec2fc70f929ab358373.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id FROM v2_job_queue WHERE running = false", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "47a55edc0f5c54ac9f5e48665c8dafc044d1861048857ec2fc70f929ab358373" +} diff --git a/backend/.sqlx/query-53b236c65e8790b7474e919daa0583902596c4faed6d83e668be85aba8eb644a.json b/backend/.sqlx/query-53b236c65e8790b7474e919daa0583902596c4faed6d83e668be85aba8eb644a.json new file mode 100644 index 0000000000000..d567e90159792 --- /dev/null +++ b/backend/.sqlx/query-53b236c65e8790b7474e919daa0583902596c4faed6d83e668be85aba8eb644a.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*) from debounce_key", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "53b236c65e8790b7474e919daa0583902596c4faed6d83e668be85aba8eb644a" +} diff --git a/backend/.sqlx/query-5848d416e2d96eb20c7417dec608f8ca36db091c67cf9ec5e7cec40486532ab9.json b/backend/.sqlx/query-5848d416e2d96eb20c7417dec608f8ca36db091c67cf9ec5e7cec40486532ab9.json new file mode 100644 index 0000000000000..ae9532cdeee4e --- /dev/null +++ b/backend/.sqlx/query-5848d416e2d96eb20c7417dec608f8ca36db091c67cf9ec5e7cec40486532ab9.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*) FROM v2_job_queue WHERE running = false", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "5848d416e2d96eb20c7417dec608f8ca36db091c67cf9ec5e7cec40486532ab9" +} diff --git a/backend/.sqlx/query-5bedbfe98b3cacd1c6f4b2344a9a626245abe58c306392a76710252d16d0bd44.json b/backend/.sqlx/query-5bedbfe98b3cacd1c6f4b2344a9a626245abe58c306392a76710252d16d0bd44.json new file mode 100644 index 0000000000000..a7733f54035ef --- /dev/null +++ b/backend/.sqlx/query-5bedbfe98b3cacd1c6f4b2344a9a626245abe58c306392a76710252d16d0bd44.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT versions FROM app WHERE path = 'f/dre_app/app'", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "versions", + "type_info": "Int8Array" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "5bedbfe98b3cacd1c6f4b2344a9a626245abe58c306392a76710252d16d0bd44" +} diff --git a/backend/.sqlx/query-6641d63a691d712bc24be9c972646fed29a76e579e72d6c8539cd0de73b424db.json b/backend/.sqlx/query-6641d63a691d712bc24be9c972646fed29a76e579e72d6c8539cd0de73b424db.json new file mode 100644 index 0000000000000..a193aa5b98235 --- /dev/null +++ b/backend/.sqlx/query-6641d63a691d712bc24be9c972646fed29a76e579e72d6c8539cd0de73b424db.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*) FROM debounce_stale_data", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "6641d63a691d712bc24be9c972646fed29a76e579e72d6c8539cd0de73b424db" +} diff --git a/backend/.sqlx/query-b5fbd7893950610f1285662df24f438c9855ba860e23befd88c2544ef86e9133.json b/backend/.sqlx/query-69550451b86f221a3d2ef626be7073fc77421710992b10f526aa36bc64ff0930.json similarity index 60% rename from backend/.sqlx/query-b5fbd7893950610f1285662df24f438c9855ba860e23befd88c2544ef86e9133.json rename to backend/.sqlx/query-69550451b86f221a3d2ef626be7073fc77421710992b10f526aa36bc64ff0930.json index 766748fa603ac..caf6213fa2f85 100644 --- a/backend/.sqlx/query-b5fbd7893950610f1285662df24f438c9855ba860e23befd88c2544ef86e9133.json +++ b/backend/.sqlx/query-69550451b86f221a3d2ef626be7073fc77421710992b10f526aa36bc64ff0930.json @@ -1,17 +1,16 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO script\n (workspace_id, hash, path, parent_hashes, summary, description, content, created_by, schema, is_template, extra_perms, lock, language, kind, tag, draft_only, envs, concurrent_limit, concurrency_time_window_s, cache_ttl, dedicated_worker, ws_error_handler_muted, priority, restart_unless_cancelled, delete_after_use, timeout, concurrency_key, visible_to_runner_only, no_main_func, codebase, has_preprocessor, on_behalf_of_email, schema_validation, assets)\n\n SELECT workspace_id, $1, path, array_prepend($2::bigint, COALESCE(parent_hashes, '{}'::bigint[])), summary, description, content, created_by, schema, is_template, extra_perms, $4, language, kind, tag, draft_only, envs, concurrent_limit, concurrency_time_window_s, cache_ttl, dedicated_worker, ws_error_handler_muted, priority, restart_unless_cancelled, delete_after_use, timeout, concurrency_key, visible_to_runner_only, no_main_func, codebase, has_preprocessor, on_behalf_of_email, schema_validation, assets\n\n FROM script WHERE hash = $2 AND workspace_id = $3;\n ", + "query": "\n INSERT INTO script\n (workspace_id, hash, path, parent_hashes, summary, description, content, created_by, schema, is_template, extra_perms, lock, language, kind, tag, draft_only, envs, concurrent_limit, concurrency_time_window_s, cache_ttl, dedicated_worker, ws_error_handler_muted, priority, restart_unless_cancelled, delete_after_use, timeout, concurrency_key, visible_to_runner_only, no_main_func, codebase, has_preprocessor, on_behalf_of_email, schema_validation, assets)\n\n SELECT workspace_id, $1, path, array_prepend($2::bigint, COALESCE(parent_hashes, '{}'::bigint[])), summary, description, content, created_by, schema, is_template, extra_perms, NULL, language, kind, tag, draft_only, envs, concurrent_limit, concurrency_time_window_s, cache_ttl, dedicated_worker, ws_error_handler_muted, priority, restart_unless_cancelled, delete_after_use, timeout, concurrency_key, visible_to_runner_only, no_main_func, codebase, has_preprocessor, on_behalf_of_email, schema_validation, assets\n\n FROM script WHERE hash = $2 AND workspace_id = $3;\n ", "describe": { "columns": [], "parameters": { "Left": [ "Int8", "Int8", - "Text", "Text" ] }, "nullable": [] }, - "hash": "b5fbd7893950610f1285662df24f438c9855ba860e23befd88c2544ef86e9133" + "hash": "69550451b86f221a3d2ef626be7073fc77421710992b10f526aa36bc64ff0930" } diff --git a/backend/.sqlx/query-720d7f0d258d52a6b89e2f4b32ea0fd360da8a9f06e0ab59d9e724851ced4247.json b/backend/.sqlx/query-720d7f0d258d52a6b89e2f4b32ea0fd360da8a9f06e0ab59d9e724851ced4247.json new file mode 100644 index 0000000000000..8239f32a08e6f --- /dev/null +++ b/backend/.sqlx/query-720d7f0d258d52a6b89e2f4b32ea0fd360da8a9f06e0ab59d9e724851ced4247.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*) from debounce_stale_data", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "720d7f0d258d52a6b89e2f4b32ea0fd360da8a9f06e0ab59d9e724851ced4247" +} diff --git a/backend/.sqlx/query-73fcf81d272c1613e094d60c0a088f9d694bc37caacef7269e3738de8b5f6013.json b/backend/.sqlx/query-73fcf81d272c1613e094d60c0a088f9d694bc37caacef7269e3738de8b5f6013.json new file mode 100644 index 0000000000000..00f7ec50d200b --- /dev/null +++ b/backend/.sqlx/query-73fcf81d272c1613e094d60c0a088f9d694bc37caacef7269e3738de8b5f6013.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT key FROM debounce_key WHERE job_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "key", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "73fcf81d272c1613e094d60c0a088f9d694bc37caacef7269e3738de8b5f6013" +} diff --git a/backend/.sqlx/query-76774e6f72c8c8b7473487e4176dc17b17372b7292e39d3888a93ff4fe49e4f5.json b/backend/.sqlx/query-76774e6f72c8c8b7473487e4176dc17b17372b7292e39d3888a93ff4fe49e4f5.json new file mode 100644 index 0000000000000..694ed1887fdfa --- /dev/null +++ b/backend/.sqlx/query-76774e6f72c8c8b7473487e4176dc17b17372b7292e39d3888a93ff4fe49e4f5.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM debounce_key WHERE key = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [] + }, + "hash": "76774e6f72c8c8b7473487e4176dc17b17372b7292e39d3888a93ff4fe49e4f5" +} diff --git a/backend/.sqlx/query-77f33de1d95e38a44968ea7f026476226c66fe42c9f992bd4c6c232a68ade2bd.json b/backend/.sqlx/query-77f33de1d95e38a44968ea7f026476226c66fe42c9f992bd4c6c232a68ade2bd.json new file mode 100644 index 0000000000000..6d3b24293fcc6 --- /dev/null +++ b/backend/.sqlx/query-77f33de1d95e38a44968ea7f026476226c66fe42c9f992bd4c6c232a68ade2bd.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT hash FROM script WHERE path = 'f/dre_script/script' AND archived = true", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "hash", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "77f33de1d95e38a44968ea7f026476226c66fe42c9f992bd4c6c232a68ade2bd" +} diff --git a/backend/.sqlx/query-526bfaccaafbe2e6f70dd5e6cd21c0c60d4ec155f79d067a8b74cf24eebad88c.json b/backend/.sqlx/query-7bfb3b210d23f2c00a1d6a653e9df5d7df9acf74de6dcc566924de02f1807af2.json similarity index 52% rename from backend/.sqlx/query-526bfaccaafbe2e6f70dd5e6cd21c0c60d4ec155f79d067a8b74cf24eebad88c.json rename to backend/.sqlx/query-7bfb3b210d23f2c00a1d6a653e9df5d7df9acf74de6dcc566924de02f1807af2.json index 08e38953ab78a..19ec28bbe3d6e 100644 --- a/backend/.sqlx/query-526bfaccaafbe2e6f70dd5e6cd21c0c60d4ec155f79d067a8b74cf24eebad88c.json +++ b/backend/.sqlx/query-7bfb3b210d23f2c00a1d6a653e9df5d7df9acf74de6dcc566924de02f1807af2.json @@ -1,11 +1,11 @@ { "db_name": "PostgreSQL", - "query": "SELECT versions[array_upper(versions, 1)] FROM flow WHERE path = $1 AND workspace_id = $2", + "query": "SELECT id FROM flow_version WHERE path = $1 AND workspace_id = $2 ORDER BY created_at DESC LIMIT 1", "describe": { "columns": [ { "ordinal": 0, - "name": "versions", + "name": "id", "type_info": "Int8" } ], @@ -16,8 +16,8 @@ ] }, "nullable": [ - null + false ] }, - "hash": "526bfaccaafbe2e6f70dd5e6cd21c0c60d4ec155f79d067a8b74cf24eebad88c" + "hash": "7bfb3b210d23f2c00a1d6a653e9df5d7df9acf74de6dcc566924de02f1807af2" } diff --git a/backend/.sqlx/query-7ec724b84479c2f737637e91b8cbed6cae29f361167deee879b8b683ad1bf684.json b/backend/.sqlx/query-7ec724b84479c2f737637e91b8cbed6cae29f361167deee879b8b683ad1bf684.json new file mode 100644 index 0000000000000..a979af6114bed --- /dev/null +++ b/backend/.sqlx/query-7ec724b84479c2f737637e91b8cbed6cae29f361167deee879b8b683ad1bf684.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO debounce_stale_data (job_id, to_relock)\n VALUES ($1, $2)\n ON CONFLICT (job_id)\n DO UPDATE SET to_relock = (\n SELECT array_agg(DISTINCT x)\n FROM unnest(\n -- Combine existing array with new values, removing duplicates\n array_cat(debounce_stale_data.to_relock, EXCLUDED.to_relock)\n ) AS x\n )\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "TextArray" + ] + }, + "nullable": [] + }, + "hash": "7ec724b84479c2f737637e91b8cbed6cae29f361167deee879b8b683ad1bf684" +} diff --git a/backend/.sqlx/query-7ed404f3a8b23f98fb7c15a26b14f9e3e416e1baf20bdb25c9a8ac9efc288931.json b/backend/.sqlx/query-7ed404f3a8b23f98fb7c15a26b14f9e3e416e1baf20bdb25c9a8ac9efc288931.json new file mode 100644 index 0000000000000..9216f73a7ee78 --- /dev/null +++ b/backend/.sqlx/query-7ed404f3a8b23f98fb7c15a26b14f9e3e416e1baf20bdb25c9a8ac9efc288931.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO debounce_key (key, job_id) VALUES ($1, $2)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "7ed404f3a8b23f98fb7c15a26b14f9e3e416e1baf20bdb25c9a8ac9efc288931" +} diff --git a/backend/.sqlx/query-91a13927f7e0577e52755fd8463a4fc58818c109deda6678c79474bf45428966.json b/backend/.sqlx/query-91a13927f7e0577e52755fd8463a4fc58818c109deda6678c79474bf45428966.json new file mode 100644 index 0000000000000..d41b7811ee8aa --- /dev/null +++ b/backend/.sqlx/query-91a13927f7e0577e52755fd8463a4fc58818c109deda6678c79474bf45428966.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT running FROM v2_job_queue WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "running", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "91a13927f7e0577e52755fd8463a4fc58818c109deda6678c79474bf45428966" +} diff --git a/backend/.sqlx/query-99e11c04bcc436ec7a75f46365423c85a6e6490c2d9e1dc6ac39112d763f0f75.json b/backend/.sqlx/query-99e11c04bcc436ec7a75f46365423c85a6e6490c2d9e1dc6ac39112d763f0f75.json new file mode 100644 index 0000000000000..6fb4205e32392 --- /dev/null +++ b/backend/.sqlx/query-99e11c04bcc436ec7a75f46365423c85a6e6490c2d9e1dc6ac39112d763f0f75.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id FROM app_version WHERE app_id = (SELECT id FROM app WHERE path = $1 AND workspace_id = $2) ORDER BY created_at DESC LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text", + "Text" + ] + }, + "nullable": [ + false + ] + }, + "hash": "99e11c04bcc436ec7a75f46365423c85a6e6490c2d9e1dc6ac39112d763f0f75" +} diff --git a/backend/.sqlx/query-9b37ec5aa9b979393c6e5c8d98f467dcabc2c3367fb33592e5608bd985c9e436.json b/backend/.sqlx/query-9b37ec5aa9b979393c6e5c8d98f467dcabc2c3367fb33592e5608bd985c9e436.json new file mode 100644 index 0000000000000..751c25d83924d --- /dev/null +++ b/backend/.sqlx/query-9b37ec5aa9b979393c6e5c8d98f467dcabc2c3367fb33592e5608bd985c9e436.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n dsd.to_relock,\n dk.key\n FROM debounce_key dk\n JOIN debounce_stale_data dsd ON dk.job_id = dsd.job_id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "to_relock", + "type_info": "TextArray" + }, + { + "ordinal": 1, + "name": "key", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true, + false + ] + }, + "hash": "9b37ec5aa9b979393c6e5c8d98f467dcabc2c3367fb33592e5608bd985c9e436" +} diff --git a/backend/.sqlx/query-9d1bd189940d345f27e4850651c423b891b57e9a65a02b52966377c1960ab89e.json b/backend/.sqlx/query-9d1bd189940d345f27e4850651c423b891b57e9a65a02b52966377c1960ab89e.json new file mode 100644 index 0000000000000..5b0998c259a8c --- /dev/null +++ b/backend/.sqlx/query-9d1bd189940d345f27e4850651c423b891b57e9a65a02b52966377c1960ab89e.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT versions[1] FROM flow WHERE path = 'f/dre/flow'", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "versions", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "9d1bd189940d345f27e4850651c423b891b57e9a65a02b52966377c1960ab89e" +} diff --git a/backend/.sqlx/query-a14270c6a2af936539c7d7e4a950b45df00a1ae022b3c29730af2347bd76deef.json b/backend/.sqlx/query-a14270c6a2af936539c7d7e4a950b45df00a1ae022b3c29730af2347bd76deef.json new file mode 100644 index 0000000000000..3df9d93ef049d --- /dev/null +++ b/backend/.sqlx/query-a14270c6a2af936539c7d7e4a950b45df00a1ae022b3c29730af2347bd76deef.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id FROM v2_job_completed", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "a14270c6a2af936539c7d7e4a950b45df00a1ae022b3c29730af2347bd76deef" +} diff --git a/backend/.sqlx/query-a206326b6c13c88b773adcd1f7b3e0822cb91bde2324327f1828236112d278b1.json b/backend/.sqlx/query-a206326b6c13c88b773adcd1f7b3e0822cb91bde2324327f1828236112d278b1.json new file mode 100644 index 0000000000000..cdcab1c509e1d --- /dev/null +++ b/backend/.sqlx/query-a206326b6c13c88b773adcd1f7b3e0822cb91bde2324327f1828236112d278b1.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT (scheduled_for - created_at) FROM v2_job_queue", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Interval" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "a206326b6c13c88b773adcd1f7b3e0822cb91bde2324327f1828236112d278b1" +} diff --git a/backend/.sqlx/query-bf252ea52aeb57664ad5054b39998a5e50bc98a8360387bd4158f9bc1289319f.json b/backend/.sqlx/query-bf252ea52aeb57664ad5054b39998a5e50bc98a8360387bd4158f9bc1289319f.json deleted file mode 100644 index 97929c71e34c4..0000000000000 --- a/backend/.sqlx/query-bf252ea52aeb57664ad5054b39998a5e50bc98a8360387bd4158f9bc1289319f.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "INSERT INTO flow_version\n (workspace_id, path, value, schema, created_by)\n\n SELECT workspace_id, path, value, schema, created_by\n FROM flow_version WHERE path = $1 AND workspace_id = $2 AND id = $3\n\n RETURNING id", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "id", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Text", - "Text", - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "bf252ea52aeb57664ad5054b39998a5e50bc98a8360387bd4158f9bc1289319f" -} diff --git a/backend/.sqlx/query-bfc96c870fe0c50ebb9e9bc5ccff231912db86c05c881aff1ffb5460f9711390.json b/backend/.sqlx/query-bfc96c870fe0c50ebb9e9bc5ccff231912db86c05c881aff1ffb5460f9711390.json new file mode 100644 index 0000000000000..bfd7cd16bd42e --- /dev/null +++ b/backend/.sqlx/query-bfc96c870fe0c50ebb9e9bc5ccff231912db86c05c881aff1ffb5460f9711390.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT parent_hashes FROM script WHERE path = 'f/dre_script/script' AND archived = false", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "parent_hashes", + "type_info": "Int8Array" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true + ] + }, + "hash": "bfc96c870fe0c50ebb9e9bc5ccff231912db86c05c881aff1ffb5460f9711390" +} diff --git a/backend/.sqlx/query-c42326e2121b79d1381a3c91ef90f6929f5dcbf29f3938e40c7f93d98aa7f49c.json b/backend/.sqlx/query-c42326e2121b79d1381a3c91ef90f6929f5dcbf29f3938e40c7f93d98aa7f49c.json new file mode 100644 index 0000000000000..ddc6263aff5f5 --- /dev/null +++ b/backend/.sqlx/query-c42326e2121b79d1381a3c91ef90f6929f5dcbf29f3938e40c7f93d98aa7f49c.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT runnable_path FROM v2_job", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "runnable_path", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true + ] + }, + "hash": "c42326e2121b79d1381a3c91ef90f6929f5dcbf29f3938e40c7f93d98aa7f49c" +} diff --git a/backend/.sqlx/query-9f4811fe735d401b62f4b7bf3db2b5cd13eb3364a8a8546007dec7ab528b1f9d.json b/backend/.sqlx/query-c6ef0acdf20bd71dd26de981fb49f178ba8a1b8c1e01e0fec1dfd6a54ea7a894.json similarity index 50% rename from backend/.sqlx/query-9f4811fe735d401b62f4b7bf3db2b5cd13eb3364a8a8546007dec7ab528b1f9d.json rename to backend/.sqlx/query-c6ef0acdf20bd71dd26de981fb49f178ba8a1b8c1e01e0fec1dfd6a54ea7a894.json index 4c9693ce27248..0bd82635daee7 100644 --- a/backend/.sqlx/query-9f4811fe735d401b62f4b7bf3db2b5cd13eb3364a8a8546007dec7ab528b1f9d.json +++ b/backend/.sqlx/query-c6ef0acdf20bd71dd26de981fb49f178ba8a1b8c1e01e0fec1dfd6a54ea7a894.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\nWITH rows_to_delete AS (\n SELECT concurrency_id\n FROM concurrency_counter\n WHERE job_uuids = '{}'::jsonb\n FOR UPDATE SKIP LOCKED\n)\nDELETE FROM concurrency_counter\nWHERE concurrency_id IN (SELECT concurrency_id FROM rows_to_delete) RETURNING concurrency_id", + "query": "\nWITH rows_to_delete AS (\n SELECT concurrency_id\n FROM concurrency_counter\n \n WHERE job_uuids = '{}'::jsonb\n FOR UPDATE SKIP LOCKED\n)\nDELETE FROM concurrency_counter\nWHERE concurrency_id IN (SELECT concurrency_id FROM rows_to_delete) RETURNING concurrency_id", "describe": { "columns": [ { @@ -16,5 +16,5 @@ false ] }, - "hash": "9f4811fe735d401b62f4b7bf3db2b5cd13eb3364a8a8546007dec7ab528b1f9d" + "hash": "c6ef0acdf20bd71dd26de981fb49f178ba8a1b8c1e01e0fec1dfd6a54ea7a894" } diff --git a/backend/.sqlx/query-cbc8e3e22862a76e1a8386cca247694378b2be15f3a40e7e6690f31157bdb5af.json b/backend/.sqlx/query-cbc8e3e22862a76e1a8386cca247694378b2be15f3a40e7e6690f31157bdb5af.json new file mode 100644 index 0000000000000..4c4921e2a0c07 --- /dev/null +++ b/backend/.sqlx/query-cbc8e3e22862a76e1a8386cca247694378b2be15f3a40e7e6690f31157bdb5af.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT key FROM debounce_key", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "key", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "cbc8e3e22862a76e1a8386cca247694378b2be15f3a40e7e6690f31157bdb5af" +} diff --git a/backend/.sqlx/query-cfe06702916362aaf5122bb95593eff389e0d44b7a58b69fd5c79629599902fc.json b/backend/.sqlx/query-cfe06702916362aaf5122bb95593eff389e0d44b7a58b69fd5c79629599902fc.json new file mode 100644 index 0000000000000..e9cedd467d747 --- /dev/null +++ b/backend/.sqlx/query-cfe06702916362aaf5122bb95593eff389e0d44b7a58b69fd5c79629599902fc.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM debounce_stale_data WHERE job_id = $1 RETURNING to_relock", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "to_relock", + "type_info": "TextArray" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + true + ] + }, + "hash": "cfe06702916362aaf5122bb95593eff389e0d44b7a58b69fd5c79629599902fc" +} diff --git a/backend/.sqlx/query-ca15fe5d43f0e94f50408efe5c9e359770b759e8661687b4503c4b692ecd245e.json b/backend/.sqlx/query-d5661c7557cf3a8dee7cf799cd364d21d38edb827d2c08b0ca7d72311b78d574.json similarity index 50% rename from backend/.sqlx/query-ca15fe5d43f0e94f50408efe5c9e359770b759e8661687b4503c4b692ecd245e.json rename to backend/.sqlx/query-d5661c7557cf3a8dee7cf799cd364d21d38edb827d2c08b0ca7d72311b78d574.json index 6812bfdfc7f73..907b140fdd051 100644 --- a/backend/.sqlx/query-ca15fe5d43f0e94f50408efe5c9e359770b759e8661687b4503c4b692ecd245e.json +++ b/backend/.sqlx/query-d5661c7557cf3a8dee7cf799cd364d21d38edb827d2c08b0ca7d72311b78d574.json @@ -1,11 +1,11 @@ { "db_name": "PostgreSQL", - "query": "SELECT versions[array_upper(versions, 1)] FROM app WHERE path = $1 AND workspace_id = $2", + "query": "SELECT hash FROM script WHERE path = $1 AND workspace_id = $2 AND deleted = false ORDER BY created_at DESC LIMIT 1", "describe": { "columns": [ { "ordinal": 0, - "name": "versions", + "name": "hash", "type_info": "Int8" } ], @@ -16,8 +16,8 @@ ] }, "nullable": [ - null + false ] }, - "hash": "ca15fe5d43f0e94f50408efe5c9e359770b759e8661687b4503c4b692ecd245e" + "hash": "d5661c7557cf3a8dee7cf799cd364d21d38edb827d2c08b0ca7d72311b78d574" } diff --git a/backend/.sqlx/query-d6a060b255f02a2a776a6a3ac4024d3b06170d3a02a06a4c209e49e5ef175916.json b/backend/.sqlx/query-d6a060b255f02a2a776a6a3ac4024d3b06170d3a02a06a4c209e49e5ef175916.json new file mode 100644 index 0000000000000..b69e04115f2d6 --- /dev/null +++ b/backend/.sqlx/query-d6a060b255f02a2a776a6a3ac4024d3b06170d3a02a06a4c209e49e5ef175916.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT lock FROM script WHERE path = 'f/dre_script/script'", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "lock", + "type_info": "Text" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true + ] + }, + "hash": "d6a060b255f02a2a776a6a3ac4024d3b06170d3a02a06a4c209e49e5ef175916" +} diff --git a/backend/.sqlx/query-db7a756d541dbedbdeb23ab8f914d0ab538043fff345a0438a40708c4066771b.json b/backend/.sqlx/query-db7a756d541dbedbdeb23ab8f914d0ab538043fff345a0438a40708c4066771b.json new file mode 100644 index 0000000000000..99c0aa49ad3f8 --- /dev/null +++ b/backend/.sqlx/query-db7a756d541dbedbdeb23ab8f914d0ab538043fff345a0438a40708c4066771b.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT versions[2] FROM flow WHERE path = 'f/dre/flow'", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "versions", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "db7a756d541dbedbdeb23ab8f914d0ab538043fff345a0438a40708c4066771b" +} diff --git a/backend/.sqlx/query-e49b72cf5a05b47f76ebcdc5306928e1be4696120c5d78c1649d5e430a5dae4c.json b/backend/.sqlx/query-e49b72cf5a05b47f76ebcdc5306928e1be4696120c5d78c1649d5e430a5dae4c.json new file mode 100644 index 0000000000000..cd4130b542b01 --- /dev/null +++ b/backend/.sqlx/query-e49b72cf5a05b47f76ebcdc5306928e1be4696120c5d78c1649d5e430a5dae4c.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT jsonb_array_elements(value->'modules')->'value'->>'lock' AS lock FROM flow", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "lock", + "type_info": "Text" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "e49b72cf5a05b47f76ebcdc5306928e1be4696120c5d78c1649d5e430a5dae4c" +} diff --git a/backend/.sqlx/query-e7bc612ccdbb2532a321ed83e26a0854975ad4c3f6fe24557b87a197485dff39.json b/backend/.sqlx/query-e7bc612ccdbb2532a321ed83e26a0854975ad4c3f6fe24557b87a197485dff39.json new file mode 100644 index 0000000000000..8e8a5c98e9745 --- /dev/null +++ b/backend/.sqlx/query-e7bc612ccdbb2532a321ed83e26a0854975ad4c3f6fe24557b87a197485dff39.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*) FROM v2_job_queue", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "e7bc612ccdbb2532a321ed83e26a0854975ad4c3f6fe24557b87a197485dff39" +} diff --git a/backend/.sqlx/query-eaeeb4708a6d9bf6be3265dcaaaf97f2301599a39668fcd20e1642fd828e3eec.json b/backend/.sqlx/query-eaeeb4708a6d9bf6be3265dcaaaf97f2301599a39668fcd20e1642fd828e3eec.json new file mode 100644 index 0000000000000..e866bd4001b60 --- /dev/null +++ b/backend/.sqlx/query-eaeeb4708a6d9bf6be3265dcaaaf97f2301599a39668fcd20e1642fd828e3eec.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\nSELECT\n j1.completed_at < j2.started_at\nFROM\n v2_job_completed j1,\n v2_job_completed j2\nWHERE\n j1.id = $1 \n AND j2.id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "eaeeb4708a6d9bf6be3265dcaaaf97f2301599a39668fcd20e1642fd828e3eec" +} diff --git a/backend/.sqlx/query-ee96e97f1a8bd2ac592665ad3e02680159db11c8aeef82056ec30b498f8129da.json b/backend/.sqlx/query-ee96e97f1a8bd2ac592665ad3e02680159db11c8aeef82056ec30b498f8129da.json new file mode 100644 index 0000000000000..0e1f9620e05fd --- /dev/null +++ b/backend/.sqlx/query-ee96e97f1a8bd2ac592665ad3e02680159db11c8aeef82056ec30b498f8129da.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*) from v2_job_queue", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "ee96e97f1a8bd2ac592665ad3e02680159db11c8aeef82056ec30b498f8129da" +} diff --git a/backend/.sqlx/query-eff9926fa211497ddbc53866444768ab79733e33bf8f9110a4f4d75a3c07da67.json b/backend/.sqlx/query-eff9926fa211497ddbc53866444768ab79733e33bf8f9110a4f4d75a3c07da67.json new file mode 100644 index 0000000000000..6a48df6e2b3a6 --- /dev/null +++ b/backend/.sqlx/query-eff9926fa211497ddbc53866444768ab79733e33bf8f9110a4f4d75a3c07da67.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*) FROM debounce_key", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "eff9926fa211497ddbc53866444768ab79733e33bf8f9110a4f4d75a3c07da67" +} diff --git a/backend/.sqlx/query-f0efa383f2025158de160577ad839ae72faf0c8fe097e6ad6d309aee9a8aede2.json b/backend/.sqlx/query-f0efa383f2025158de160577ad839ae72faf0c8fe097e6ad6d309aee9a8aede2.json new file mode 100644 index 0000000000000..32616f298f851 --- /dev/null +++ b/backend/.sqlx/query-f0efa383f2025158de160577ad839ae72faf0c8fe097e6ad6d309aee9a8aede2.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO flow_version\n (workspace_id, path, value, schema, created_by)\n\n SELECT workspace_id, path, value, schema, created_by\n FROM flow_version WHERE path = $1 AND workspace_id = $2 AND id = $3\n\n RETURNING id\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "f0efa383f2025158de160577ad839ae72faf0c8fe097e6ad6d309aee9a8aede2" +} diff --git a/backend/.sqlx/query-fe1539db7384c8edc6d8ec672495fe3964efb8551ad6d74f141fc457034cc5b9.json b/backend/.sqlx/query-fe1539db7384c8edc6d8ec672495fe3964efb8551ad6d74f141fc457034cc5b9.json new file mode 100644 index 0000000000000..7b7e250ef7aff --- /dev/null +++ b/backend/.sqlx/query-fe1539db7384c8edc6d8ec672495fe3964efb8551ad6d74f141fc457034cc5b9.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT job_id FROM debounce_key WHERE key = $1 FOR UPDATE", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "job_id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false + ] + }, + "hash": "fe1539db7384c8edc6d8ec672495fe3964efb8551ad6d74f141fc457034cc5b9" +} diff --git a/backend/Cargo.toml b/backend/Cargo.toml index 855287e63cda4..58ecd617ddce4 100644 --- a/backend/Cargo.toml +++ b/backend/Cargo.toml @@ -86,6 +86,7 @@ oauth2 = ["windmill-api/oauth2"] zip = ["windmill-api/zip"] static_frontend = ["windmill-api/static_frontend"] scoped_cache = ["windmill-common/scoped_cache"] +test_job_debouncing = [] # Languages python = ["windmill-worker/python", "windmill-api/python"] rust = ["windmill-worker/rust"] diff --git a/backend/ee-repo-ref.txt b/backend/ee-repo-ref.txt index 85cd80d864048..bf77fc73122b0 100644 --- a/backend/ee-repo-ref.txt +++ b/backend/ee-repo-ref.txt @@ -1 +1 @@ -656c0418f9959154738e3583ade5e2a80c4a4daa \ No newline at end of file +365a3c9dd7a3fddc3280bbda86ba93ef149d1b64 diff --git a/backend/migrations/20250925142554_job_debouncing.down.sql b/backend/migrations/20250925142554_job_debouncing.down.sql new file mode 100644 index 0000000000000..fd1c155f43da5 --- /dev/null +++ b/backend/migrations/20250925142554_job_debouncing.down.sql @@ -0,0 +1,3 @@ +DROP TABLE IF EXISTS debounce_key; +DROP TABLE IF EXISTS debounce_stale_data; +DROP TABLE IF EXISTS debounce_obj_latest_version; diff --git a/backend/migrations/20250925142554_job_debouncing.up.sql b/backend/migrations/20250925142554_job_debouncing.up.sql new file mode 100644 index 0000000000000..0c946aaaf5426 --- /dev/null +++ b/backend/migrations/20250925142554_job_debouncing.up.sql @@ -0,0 +1,19 @@ +CREATE TABLE debounce_key ( + key VARCHAR(255) NOT NULL, + job_id uuid NOT NULL, + PRIMARY KEY (key) +); + +CREATE TABLE debounce_stale_data ( + job_id uuid NOT NULL, + to_relock TEXT[], + PRIMARY KEY (job_id) +); + +-- TODO: Prune on move/deletion +-- But normally this will persist across runs. +-- CREATE TABLE unlocked_script_latest_version ( +-- key VARCHAR(255) NOT NULL, +-- version BIGINT NOT NULL, +-- PRIMARY KEY (key) +-- ); diff --git a/backend/src/monitor.rs b/backend/src/monitor.rs index e4d3c380af47d..3e6ddec903251 100644 --- a/backend/src/monitor.rs +++ b/backend/src/monitor.rs @@ -1588,6 +1588,7 @@ pub async fn monitor_db( } } }; + // run every hour (60 minutes / 30 seconds = 120) let cleanup_worker_group_stats_f = async { if server_mode && iteration.is_some() && iteration.as_ref().unwrap().should_run(120) { @@ -2415,6 +2416,7 @@ async fn cleanup_concurrency_counters_empty_keys(db: &DB) -> error::Result<()> { WITH rows_to_delete AS ( SELECT concurrency_id FROM concurrency_counter + WHERE job_uuids = '{}'::jsonb FOR UPDATE SKIP LOCKED ) diff --git a/backend/tests/common/mod.rs b/backend/tests/common/mod.rs index 026eee12f717a..73db6d1f621c0 100644 --- a/backend/tests/common/mod.rs +++ b/backend/tests/common/mod.rs @@ -20,6 +20,17 @@ use windmill_common::{ }; use windmill_queue::PushIsolationLevel; +pub async fn init_client(db: Pool) -> (windmill_api_client::Client, u16, ApiServer) { + initialize_tracing().await; + let server = ApiServer::start(db).await.unwrap(); + let port = server.addr.port(); + let client = windmill_api_client::create_client( + &format!("http://localhost:{port}"), + "SECRET_TOKEN".to_string(), + ); + (client, port, server) +} + /// it's important this is unique between tests as there is one prometheus registry and /// run_worker shouldn't register the same metric with the same worker name more than once. /// @@ -131,7 +142,7 @@ impl RunJob { let tx = PushIsolationLevel::IsolatedRoot(db.clone()); let (uuid, tx) = windmill_queue::push( - &db, + db, tx, "test-workspace", payload, @@ -157,6 +168,7 @@ impl RunJob { None, false, None, + None, ) .await .expect("push has to succeed"); @@ -170,8 +182,8 @@ impl RunJob { let uuid = self.push(db).await; let listener = listen_for_completed_jobs(db).await; in_test_worker(db, listener.find(&uuid), port).await; - let r = completed_job(uuid, db).await; - r + + completed_job(uuid, db).await } /// push the job, spawn a worker, wait until the job is in completed_job @@ -185,8 +197,8 @@ impl RunJob { let listener = listen_for_completed_jobs(db).await; test(uuid).await; in_test_worker(db, listener.find(&uuid), port).await; - let r = completed_job(uuid, db).await; - r + + completed_job(uuid, db).await } } @@ -251,10 +263,10 @@ pub fn spawn_test_worker( let base_internal_url = format!("http://localhost:{}", port); { let mut wc = WORKER_CONFIG.write().await; - (*wc).worker_tags = windmill_common::worker::DEFAULT_TAGS.clone(); - (*wc).priority_tags_sorted = vec![windmill_common::worker::PriorityTags { + wc.worker_tags = windmill_common::worker::DEFAULT_TAGS.clone(); + wc.priority_tags_sorted = vec![windmill_common::worker::PriorityTags { priority: 0, - tags: (*wc).worker_tags.clone(), + tags: wc.worker_tags.clone(), }]; windmill_common::worker::store_suspended_pull_query(&wc).await; windmill_common::worker::store_pull_query(&wc).await; @@ -349,7 +361,7 @@ fn find_module_in_vec(modules: Vec, id: &str) -> Option () { +pub async fn set_jwt_secret() { let secret = "mytestsecret".to_string(); let mut l = JWT_SECRET.write().await; *l = secret; @@ -475,10 +487,10 @@ pub async fn assert_lockfile( .await .unwrap(); - let mut completed = listen_for_completed_jobs(&db).await; + let mut completed = listen_for_completed_jobs(db).await; let db2 = db.clone(); in_test_worker( - &db, + db, async move { completed.next().await; // deployed script @@ -571,10 +583,10 @@ pub async fn run_deployed_relative_imports( .await .unwrap(); - let mut completed = listen_for_completed_jobs(&db).await; + let mut completed = listen_for_completed_jobs(db).await; let db2 = db.clone(); in_test_worker( - &db, + db, async move { completed.next().await; // deployed script @@ -631,10 +643,10 @@ pub async fn run_preview_relative_imports( let server = ApiServer::start(db.clone()).await?; let port = server.addr.port(); - let mut completed = listen_for_completed_jobs(&db).await; + let mut completed = listen_for_completed_jobs(db).await; let db2 = db.clone(); in_test_worker( - &db, + db, async move { let job = RunJob::from(JobPayload::Code(RawCode { hash: None, @@ -671,3 +683,21 @@ pub async fn run_preview_relative_imports( Ok(()) } + +/// IMPORTANT!: +/// Do not run parallel in tests! +/// +/// No tests can run this at the same time, will result into conflicts!!! +pub async fn rebuild_dmap(client: &windmill_api_client::Client) -> bool { + client + .client() + .post(format!( + "{}/w/test-workspace/workspaces/rebuild_dependency_map", + client.baseurl() + )) + .send() + .await + .unwrap() + .status() + .is_success() +} diff --git a/backend/tests/fixtures/djob_debouncing.sql b/backend/tests/fixtures/djob_debouncing.sql new file mode 100644 index 0000000000000..067d46c398fa6 --- /dev/null +++ b/backend/tests/fixtures/djob_debouncing.sql @@ -0,0 +1,242 @@ +-- FLOWS -- +INSERT INTO public.script(workspace_id, created_by, content, schema, summary, description, path, hash, language, lock) VALUES ( +'test-workspace', +'test-user', +'#requirements: +#bottle==0.13.2 +def main(): + pass +', +'{"$schema":"https://json-schema.org/draft/2020-12/schema","properties":{},"required":[],"type":"object"}', +'', +'', +'f/dre/leaf_left', 333400, 'python3', ''); +-- Padded Hex: 0000000000051658 + +INSERT INTO public.script(workspace_id, created_by, content, schema, summary, description, path, hash, language, lock) VALUES ( +'test-workspace', +'test-user', +'#requirements: +#tiny==0.1.3 +def main(): + pass +', +'{"$schema":"https://json-schema.org/draft/2020-12/schema","properties":{},"required":[],"type":"object"}', +'', +'', +'f/dre/leaf_right', 333403, 'python3', ''); +-- Padded Hex: 000000000005165B + + +INSERT INTO public.flow(workspace_id, summary, description, path, versions, schema, value, edited_by) VALUES ( +'test-workspace', +'', +'', +'f/dre/flow', +'{1443253234253454}', +'{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "properties": {}, + "required": [], + "type": "object" +}', +$tag$ +{ + "modules": [ + { + "id": "a", + "value": { + "lock": "# py: 3.11\n", + "type": "rawscript", + "assets": [], + "content": "import f.dre.leaf_left\n\ndef main():\n pass", + "language": "python3", + "input_transforms": {} + }, + "summary": "leaf Left" + }, + { + "id": "b", + "value": { + "lock": "# py: 3.11\n", + "type": "rawscript", + "assets": [], + "content": "import f.dre.leaf_right\nimport f.dre.leaf_left\n\ndef main():\n pass", + "language": "python3", + "input_transforms": {} + }, + "summary": "leaf Left and Right" + }, + { + "id": "c", + "value": { + "lock": "# py: 3.11\n", + "type": "rawscript", + "assets": [], + "content": "import f.dre.leaf_right\n\ndef main():\n pass", + "language": "python3", + "input_transforms": {} + }, + "summary": "leaf RIght" + } + ] +}$tag$, +'system' +); + +INSERT INTO public.flow_version(id, workspace_id, path, schema, value, created_by) VALUES ( +1443253234253454, +'test-workspace', +'f/dre/flow', +'{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "properties": {}, + "required": [], + "type": "object" +}', +$tag$ +{ + "modules": [ + { + "id": "a", + "value": { + "lock": "# py: 3.11\n", + "type": "rawscript", + "assets": [], + "content": "import f.dre.leaf_left\n\ndef main():\n pass", + "language": "python3", + "input_transforms": {} + }, + "summary": "leaf Left" + }, + { + "id": "b", + "value": { + "lock": "# py: 3.11\n", + "type": "rawscript", + "assets": [], + "content": "import f.dre.leaf_right\nimport f.dre.leaf_left\n\ndef main():\n pass", + "language": "python3", + "input_transforms": {} + }, + "summary": "leaf Left and Right" + }, + { + "id": "c", + "value": { + "lock": "# py: 3.11\n", + "type": "rawscript", + "assets": [], + "content": "import f.dre.leaf_right\n\ndef main():\n pass", + "language": "python3", + "input_transforms": {} + }, + "summary": "leaf RIght" + } + ] +}$tag$, +'system' +); + +-- APPS -- +INSERT INTO public.script(workspace_id, created_by, content, schema, summary, description, path, hash, language, lock) VALUES ( +'test-workspace', +'test-user', +'#requirements: +#bottle==0.13.2 +def main(): + pass +', +'{"$schema":"https://json-schema.org/draft/2020-12/schema","properties":{},"required":[],"type":"object"}', +'', +'', +'f/dre_app/leaf_left', 433400, 'python3', ''); +-- Padded Hex: 0000000000069CF8 + +INSERT INTO public.script(workspace_id, created_by, content, schema, summary, description, path, hash, language, lock) VALUES ( +'test-workspace', +'test-user', +'#requirements: +#tiny==0.1.3 +def main(): + pass +', +'{"$schema":"https://json-schema.org/draft/2020-12/schema","properties":{},"required":[],"type":"object"}', +'', +'', +'f/dre_app/leaf_right', 433403, 'python3', ''); +-- Padded Hex: 0000000000069CFB + +INSERT INTO public.app(id, workspace_id, path, versions, policy) VALUES ( +2, +'test-workspace', +'f/dre_app/app', +'{0}', +'{}' +); + +INSERT INTO public.app_version(id, app_id, value, created_by) VALUES ( +0, +2, +$tag${"grid":[{"3":{"fixed":true,"x":0,"y":0,"fullHeight":false,"w":6,"h":2},"12":{"fixed":true,"x":0,"y":0,"fullHeight":false,"w":12,"h":2},"data":{"type":"containercomponent","configuration":{},"customCss":{"container":{"class":"!p-0","style":""}},"numberOfSubgrids":1,"id":"topbar"},"id":"topbar"},{"3":{"fixed":false,"x":0,"y":2,"fullHeight":false,"w":1,"h":1},"12":{"fixed":false,"x":0,"y":2,"fullHeight":false,"w":2,"h":1},"data":{"type":"buttoncomponent","configuration":{"label":{"type":"static","value":"A"},"color":{"type":"static","value":"blue"},"size":{"type":"static","value":"xs"},"fillContainer":{"type":"static","value":false},"disabled":{"type":"static","value":false},"beforeIcon":{"type":"static"},"afterIcon":{"type":"static"},"tooltip":{"type":"static","value":""},"triggerOnAppLoad":{"type":"static","value":false},"runInBackground":{"type":"static","value":false},"onSuccess":{"type":"oneOf","selected":"none","configuration":{"none":{},"gotoUrl":{"url":{"type":"static","value":""},"newTab":{"type":"static","value":true}},"setTab":{"setTab":{"type":"static","value":[]}},"sendToast":{"message":{"type":"static","value":""}},"openModal":{"modalId":{"type":"static","value":""}},"closeModal":{"modalId":{"type":"static","value":""}},"open":{"id":{"type":"static","value":""}},"close":{"id":{"type":"static","value":""}},"clearFiles":{"id":{"type":"static","value":""}}}},"onError":{"type":"oneOf","selected":"errorOverlay","configuration":{"errorOverlay":{},"gotoUrl":{"url":{"type":"static","value":""},"newTab":{"type":"static","value":true}},"setTab":{"setTab":{"type":"static","value":[]}},"sendErrorToast":{"message":{"type":"static","value":"An error occurred"},"appendError":{"type":"static","value":true}},"open":{"id":{"type":"static","value":""}},"close":{"id":{"type":"static","value":""}}}},"confirmationModal":{"type":"oneOf","selected":"none","configuration":{"none":{},"confirmationModal":{"title":{"type":"static","value":"Title"},"description":{"type":"static","value":"Are you sure?"},"confirmationText":{"type":"static","value":"Confirm"}}}}},"componentInput":{"type":"runnable","fieldType":"any","fields":{},"runnable":{"type":"runnableByName","name":"Inline Script","inlineScript":{"content":"import f.dre_app.leaf_left\n\ndef main():\n pass\n","language":"python3","schema":{"$schema":"https://json-schema.org/draft/2020-12/schema","properties":{},"required":[],"type":"object"},"path":"f/dre_app/app/Inline_Script"}},"autoRefresh":false,"recomputeOnInputChanged":false},"customCss":{"button":{"style":"","class":""},"container":{"style":"","class":""}},"recomputeIds":[],"horizontalAlignment":"center","verticalAlignment":"center","id":"a"},"id":"a"},{"3":{"fixed":false,"x":1,"y":2,"fullHeight":false,"w":1,"h":1},"12":{"fixed":false,"x":2,"y":2,"fullHeight":false,"w":2,"h":1},"data":{"type":"buttoncomponent","configuration":{"label":{"type":"static","value":"B"},"color":{"type":"static","value":"blue"},"size":{"type":"static","value":"xs"},"fillContainer":{"type":"static","value":false},"disabled":{"type":"static","value":false},"beforeIcon":{"type":"static"},"afterIcon":{"type":"static"},"tooltip":{"type":"static","value":""},"triggerOnAppLoad":{"type":"static","value":false},"runInBackground":{"type":"static","value":false},"onSuccess":{"type":"oneOf","selected":"none","configuration":{"none":{},"gotoUrl":{"url":{"type":"static","value":""},"newTab":{"type":"static","value":true}},"setTab":{"setTab":{"type":"static","value":[]}},"sendToast":{"message":{"type":"static","value":""}},"openModal":{"modalId":{"type":"static","value":""}},"closeModal":{"modalId":{"type":"static","value":""}},"open":{"id":{"type":"static","value":""}},"close":{"id":{"type":"static","value":""}},"clearFiles":{"id":{"type":"static","value":""}}}},"onError":{"type":"oneOf","selected":"errorOverlay","configuration":{"errorOverlay":{},"gotoUrl":{"url":{"type":"static","value":""},"newTab":{"type":"static","value":true}},"setTab":{"setTab":{"type":"static","value":[]}},"sendErrorToast":{"message":{"type":"static","value":"An error occurred"},"appendError":{"type":"static","value":true}},"open":{"id":{"type":"static","value":""}},"close":{"id":{"type":"static","value":""}}}},"confirmationModal":{"type":"oneOf","selected":"none","configuration":{"none":{},"confirmationModal":{"title":{"type":"static","value":"Title"},"description":{"type":"static","value":"Are you sure?"},"confirmationText":{"type":"static","value":"Confirm"}}}}},"componentInput":{"type":"runnable","fieldType":"any","fields":{},"runnable":{"type":"runnableByName","name":"Inline Script","inlineScript":{"content":"import f.dre_app.leaf_left\nimport f.dre_app.leaf_right\n\ndef main():\n pass\n","language":"python3","schema":{"$schema":"https://json-schema.org/draft/2020-12/schema","properties":{},"required":[],"type":"object"},"path":"f/dre_app/app/Inline_Script"}},"autoRefresh":false,"recomputeOnInputChanged":false},"customCss":{"button":{"style":"","class":""},"container":{"style":"","class":""}},"recomputeIds":[],"horizontalAlignment":"center","verticalAlignment":"center","id":"b"},"id":"b"},{"3":{"fixed":false,"x":2,"y":2,"fullHeight":false,"w":1,"h":1},"12":{"fixed":false,"x":4,"y":2,"fullHeight":false,"w":2,"h":1},"data":{"type":"buttoncomponent","configuration":{"label":{"type":"static","value":"C"},"color":{"type":"static","value":"blue"},"size":{"type":"static","value":"xs"},"fillContainer":{"type":"static","value":false},"disabled":{"type":"static","value":false},"beforeIcon":{"type":"static"},"afterIcon":{"type":"static"},"tooltip":{"type":"static","value":""},"triggerOnAppLoad":{"type":"static","value":false},"runInBackground":{"type":"static","value":false},"onSuccess":{"type":"oneOf","selected":"none","configuration":{"none":{},"gotoUrl":{"url":{"type":"static","value":""},"newTab":{"type":"static","value":true}},"setTab":{"setTab":{"type":"static","value":[]}},"sendToast":{"message":{"type":"static","value":""}},"openModal":{"modalId":{"type":"static","value":""}},"closeModal":{"modalId":{"type":"static","value":""}},"open":{"id":{"type":"static","value":""}},"close":{"id":{"type":"static","value":""}},"clearFiles":{"id":{"type":"static","value":""}}}},"onError":{"type":"oneOf","selected":"errorOverlay","configuration":{"errorOverlay":{},"gotoUrl":{"url":{"type":"static","value":""},"newTab":{"type":"static","value":true}},"setTab":{"setTab":{"type":"static","value":[]}},"sendErrorToast":{"message":{"type":"static","value":"An error occurred"},"appendError":{"type":"static","value":true}},"open":{"id":{"type":"static","value":""}},"close":{"id":{"type":"static","value":""}}}},"confirmationModal":{"type":"oneOf","selected":"none","configuration":{"none":{},"confirmationModal":{"title":{"type":"static","value":"Title"},"description":{"type":"static","value":"Are you sure?"},"confirmationText":{"type":"static","value":"Confirm"}}}}},"componentInput":{"type":"runnable","fieldType":"any","fields":{},"runnable":{"type":"runnableByName","name":"Inline Script","inlineScript":{"content":"import f.dre_app.leaf_right\n\ndef main():\n pass\n","language":"python3","schema":{"$schema":"https://json-schema.org/draft/2020-12/schema","properties":{},"required":[],"type":"object"},"path":"f/dre_app/app/Inline_Script"}},"autoRefresh":false,"recomputeOnInputChanged":false},"customCss":{"button":{"style":"","class":""},"container":{"style":"","class":""}},"recomputeIds":[],"horizontalAlignment":"center","verticalAlignment":"center","id":"c"},"id":"c"}],"fullscreen":false,"unusedInlineScripts":[],"hiddenInlineScripts":[],"theme":{"type":"path","path":"f/app_themes/theme_0"},"subgrids":{"topbar-0":[{"3":{"fixed":false,"x":0,"y":0,"fullHeight":false,"w":6,"h":1},"12":{"fixed":false,"x":0,"y":0,"fullHeight":false,"w":6,"h":1},"data":{"type":"textcomponent","configuration":{"style":{"type":"static","value":"Body"},"copyButton":{"type":"static","value":false},"tooltip":{"type":"evalv2","value":"","fieldType":"text","expr":"`Author: ${ctx.author}`","connections":[{"componentId":"ctx","id":"author"}]},"disableNoText":{"type":"static","value":true,"fieldType":"boolean"}},"componentInput":{"type":"templatev2","fieldType":"template","eval":"${ctx.summary}","connections":[{"id":"summary","componentId":"ctx"}]},"customCss":{"text":{"class":"text-xl font-semibold whitespace-nowrap truncate","style":""},"container":{"class":"","style":""}},"horizontalAlignment":"left","verticalAlignment":"center","id":"title"},"id":"title"},{"3":{"fixed":false,"x":0,"y":1,"fullHeight":false,"w":3,"h":1},"12":{"fixed":false,"x":6,"y":0,"fullHeight":false,"w":6,"h":1},"data":{"type":"recomputeallcomponent","configuration":{"defaultRefreshInterval":{"type":"static","value":"0"}},"customCss":{"container":{"style":"","class":""}},"menuItems":[],"horizontalAlignment":"right","verticalAlignment":"center","id":"recomputeall"},"id":"recomputeall"}]},"hideLegacyTopBar":true,"mobileViewOnSmallerScreens":false}$tag$, +'system' +); + +-- SCRIPTS -- +INSERT INTO public.script(workspace_id, created_by, content, schema, summary, description, path, hash, language, lock) VALUES ( +'test-workspace', +'test-user', +'#requirements: +#bottle==0.13.2 +def main(): + pass +', +'{"$schema":"https://json-schema.org/draft/2020-12/schema","properties":{},"required":[],"type":"object"}', +'', +'', +'f/dre_script/leaf_left', 533400, 'python3', ''); +-- Padded Hex: 0000000000082398 + +INSERT INTO public.script(workspace_id, created_by, content, schema, summary, description, path, hash, language, lock) VALUES ( +'test-workspace', +'test-user', +'#requirements: +#tiny==0.1.3 +def main(): + pass +', +'{"$schema":"https://json-schema.org/draft/2020-12/schema","properties":{},"required":[],"type":"object"}', +'', +'', +'f/dre_script/leaf_right', 533403, 'python3', ''); +-- Padded Hex: 000000000008239B + +INSERT INTO public.script(workspace_id, created_by, content, schema, summary, description, path, hash, language, lock) VALUES ( +'test-workspace', +'test-user', +' +import f.dre_script.leaf_left +import f.dre_script.leaf_right + +def main(): + pass +', +'{"$schema":"https://json-schema.org/draft/2020-12/schema","properties":{},"required":[],"type":"object"}', +'', +'', +'f/dre_script/script', 533404, 'python3', ''); +-- Padded Hex: 000000000008239C + +-- Create dependency map +INSERT INTO dependency_map (workspace_id, imported_path, importer_kind, importer_path, importer_node_id) VALUES ('test-workspace', 'f/dre/leaf_left', 'flow', 'f/dre/flow', 'a'); +INSERT INTO dependency_map (workspace_id, imported_path, importer_kind, importer_path, importer_node_id) VALUES ('test-workspace', 'f/dre/leaf_left', 'flow', 'f/dre/flow', 'b'); +INSERT INTO dependency_map (workspace_id, imported_path, importer_kind, importer_path, importer_node_id) VALUES ('test-workspace', 'f/dre/leaf_right', 'flow', 'f/dre/flow', 'b'); +INSERT INTO dependency_map (workspace_id, imported_path, importer_kind, importer_path, importer_node_id) VALUES ('test-workspace', 'f/dre/leaf_right', 'flow', 'f/dre/flow', 'c'); + +INSERT INTO dependency_map (workspace_id, imported_path, importer_kind, importer_path, importer_node_id) VALUES ('test-workspace', 'f/dre_app/leaf_left', 'app', 'f/dre_app/app', 'a'); +INSERT INTO dependency_map (workspace_id, imported_path, importer_kind, importer_path, importer_node_id) VALUES ('test-workspace', 'f/dre_app/leaf_left', 'app', 'f/dre_app/app', 'b'); +INSERT INTO dependency_map (workspace_id, imported_path, importer_kind, importer_path, importer_node_id) VALUES ('test-workspace', 'f/dre_app/leaf_right', 'app', 'f/dre_app/app', 'b'); +INSERT INTO dependency_map (workspace_id, imported_path, importer_kind, importer_path, importer_node_id) VALUES ('test-workspace', 'f/dre_app/leaf_right', 'app', 'f/dre_app/app', 'c'); + +INSERT INTO dependency_map (workspace_id, imported_path, importer_kind, importer_path, importer_node_id) VALUES ('test-workspace', 'f/dre_script/leaf_left', 'script', 'f/dre_script/script', ''); +INSERT INTO dependency_map (workspace_id, imported_path, importer_kind, importer_path, importer_node_id) VALUES ('test-workspace', 'f/dre_script/leaf_right', 'script', 'f/dre_script/script', ''); diff --git a/backend/tests/job_payload.rs b/backend/tests/job_payload.rs index d01064d4bc439..ace3e61ef00d2 100644 --- a/backend/tests/job_payload.rs +++ b/backend/tests/job_payload.rs @@ -10,6 +10,7 @@ mod job_payload { use windmill_common::jobs::JobPayload; use windmill_common::flows::{FlowValue, FlowModule, FlowModuleValue}; use windmill_common::flow_status::RestartedFrom; + use windmill_common::worker::{ MIN_VERSION_IS_AT_LEAST_1_427, MIN_VERSION_IS_AT_LEAST_1_432, MIN_VERSION_IS_AT_LEAST_1_440, }; diff --git a/backend/tests/relative_imports.rs b/backend/tests/relative_imports.rs index 91af80918b6cf..b088241f5ac78 100644 --- a/backend/tests/relative_imports.rs +++ b/backend/tests/relative_imports.rs @@ -1,48 +1,57 @@ // TODO: move all related logic here (if anything left anywhere in codebase) mod common; +use windmill_api_client::types::NewScript; + +fn quick_ns( + content: &str, + language: windmill_api_client::types::ScriptLang, + path: &str, + lock: Option, + parent_hash: Option, +) -> NewScript { + NewScript { + content: content.into(), + language, + lock, + parent_hash, + path: path.into(), + concurrent_limit: None, + concurrency_time_window_s: None, + cache_ttl: None, + dedicated_worker: None, + description: "".to_string(), + draft_only: None, + envs: vec![], + is_template: None, + kind: None, + summary: "".to_string(), + tag: None, + schema: std::collections::HashMap::new(), + ws_error_handler_muted: Some(false), + priority: None, + delete_after_use: None, + timeout: None, + restart_unless_cancelled: None, + deployment_message: None, + concurrency_key: None, + visible_to_runner_only: None, + no_main_func: None, + codebase: None, + has_preprocessor: None, + on_behalf_of_email: None, + assets: vec![], + } +} + mod dependency_map { + use super::quick_ns; use sqlx::{Pool, Postgres}; use tokio_stream::StreamExt; - use windmill_api_client::types::NewScript; - use crate::common::{in_test_worker, listen_for_completed_jobs, ApiServer}; - - pub async fn initialize_tracing() { - use std::sync::Once; - - static ONCE: Once = Once::new(); - ONCE.call_once(|| { - let _ = windmill_common::tracing_init::initialize_tracing( - "test", - &windmill_common::utils::Mode::Standalone, - "test", - ); - }); - } - - async fn rebuild_dmap(client: &windmill_api_client::Client) -> bool { - client - .client() - .post(format!( - "{}/w/test-workspace/workspaces/rebuild_dependency_map", - client.baseurl() - )) - .send() - .await - .unwrap() - .status() - .is_success() - } + use crate::common::{in_test_worker, init_client, listen_for_completed_jobs, ApiServer}; async fn init(db: Pool) -> (windmill_api_client::Client, u16, ApiServer) { - initialize_tracing().await; - let server = ApiServer::start(db).await.unwrap(); - let port = server.addr.port(); - let client = windmill_api_client::create_client( - &format!("http://localhost:{port}"), - "SECRET_TOKEN".to_string(), - ); - (client, port, server) + init_client(db).await } async fn _clear_dmap(db: &Pool) { @@ -108,47 +117,6 @@ mod dependency_map { ); } - fn quick_ns( - content: &str, - language: windmill_api_client::types::ScriptLang, - path: &str, - lock: Option, - parent_hash: Option, - ) -> NewScript { - NewScript { - content: content.into(), - language, - lock, - parent_hash, - path: path.into(), - concurrent_limit: None, - concurrency_time_window_s: None, - cache_ttl: None, - dedicated_worker: None, - description: "".to_string(), - draft_only: None, - envs: vec![], - is_template: None, - kind: None, - summary: "".to_string(), - tag: None, - schema: std::collections::HashMap::new(), - ws_error_handler_muted: Some(false), - priority: None, - delete_after_use: None, - timeout: None, - restart_unless_cancelled: None, - deployment_message: None, - concurrency_key: None, - visible_to_runner_only: None, - no_main_func: None, - codebase: None, - has_preprocessor: None, - on_behalf_of_email: None, - assets: vec![], - } - } - lazy_static::lazy_static! { pub static ref CORRECT_DMAP: Vec<(&'static str, &'static str, &'static str, &'static str)> = vec![ ("f/rel/branch", "script", "f/rel/leaf_1", ""), @@ -170,13 +138,16 @@ mod dependency_map { ("f/rel/root_app", "app", "f/rel/branch", "youcanpressme")]; } + // TODO: + // Test that checks that we can run rebuild_dmap multiple times in tests. + #[cfg(feature = "python")] #[sqlx::test(fixtures("base", "dependency_map"))] async fn relative_imports_test_rebuild_correctness(db: Pool) -> anyhow::Result<()> { let (client, _port, _s) = init(db.clone()).await; assert_dmap(&db, None, CORRECT_DMAP.clone()).await; // rebuild map - assert!(rebuild_dmap(&client).await); + assert!(super::common::rebuild_dmap(&client).await); assert_dmap(&db, None, CORRECT_DMAP.clone()).await; Ok(()) } @@ -189,7 +160,7 @@ mod dependency_map { // Spawn first rebuild let handle = { let client = client.clone(); - tokio::spawn(async move { rebuild_dmap(&client).await }) + tokio::spawn(async move { super::common::rebuild_dmap(&client).await }) }; // Immidiately spawn another @@ -235,7 +206,7 @@ def main(): ", windmill_api_client::types::ScriptLang::Python3, "f/rel/root_script", - Some(format!("# from requirements.txt")), + Some("# from requirements.txt".to_string()), Some("000000000005165B".into()), ), ) @@ -287,14 +258,14 @@ def main(): windmill_api_client::types::ScriptLang::Python3, "f/rel/root_script", // We still want to pass lock to it. - Some(format!("# py311")), + Some("# py311".to_string()), Some("000000000005165B".into()), ), ) .await .unwrap(); assert_dmap(&db, None, CORRECT_DMAP.clone()).await; - tokio::time::sleep(std::time::Duration::from_secs(13)).await; + // tokio::time::sleep(std::time::Duration::from_secs(13)).await; assert_dmap(&db, None, CORRECT_DMAP.clone()).await; Ok(()) } @@ -547,3 +518,1847 @@ def main(): Ok(()) } } + +#[cfg(feature = "test_job_debouncing")] +mod job_debouncing { + async fn trigger_djob_for( + client: &windmill_api_client::Client, + path: &str, + parent_hash: &str, + content: Option, + ) { + use super::quick_ns; + use windmill_api_client::types::ScriptLang; + client + .create_script( + "test-workspace", + &quick_ns( + &content.unwrap_or( + " +def main(): + pass + " + .into(), + ), + ScriptLang::Python3, + path, + None, + Some(parent_hash.into()), + ), + ) + .await + .unwrap(); + } + // TODO: test workspaces specific things, + + /// # Double referenced even + /// It follows this topology: + /// + /// ┌─FLOW──────────┐ + /// │┌───┐┌───┐┌───┐│ + /// ││ A ││ B ││ C ││ + /// │└─▲─┘▲───▲└─▲─┘│ + /// └──┼──┼───┼──┼──┘ + /// ┌┴──┴┐ ┌┴──┴┐ + /// │L_LF│ │R_LF│ + /// └────┘ └────┘ + /// + /// p.s: "LF" stands for "Leaf", "L" - "Left", "R" - "Right" + mod flows { + use crate::common::{in_test_worker, init_client, listen_for_completed_jobs}; + use crate::job_debouncing::trigger_djob_for; + use std::time::Duration; + use tokio::time::sleep; + use tokio_stream::StreamExt; + + /// 1. LLF and RLF create two djobs for flow at the same and fall into single debounce + #[cfg(feature = "python")] + #[sqlx::test(fixtures("base", "djob_debouncing"))] + async fn test_1(db: sqlx::Pool) -> anyhow::Result<()> { + // This tests if debouncing and consolidation works. + // Also makes sures that dependency job does not create new flow version + + let (client, port, _s) = init_client(db.clone()).await; + let mut completed = listen_for_completed_jobs(&db).await; + + // Verify locks are empty + { + assert_eq!( + sqlx::query_scalar!("SELECT jsonb_array_elements(value->'modules')->'value'->>'lock' AS lock FROM flow") + .fetch_all(&db) + .await + .unwrap(), + vec![ + Some("# py: 3.11\n".into()), + Some("# py: 3.11\n".into()), + Some("# py: 3.11\n".into()) + ] + ); + } + + // Trigger both at the same time. + { + trigger_djob_for( + &client, + "f/dre/leaf_left", + "0000000000051658", + Some("#requirements:\n#bottle==0.13.2\ndef main():\npass".into()), + ) + .await; + + trigger_djob_for( + &client, + "f/dre/leaf_right", + "000000000005165B", + Some("#requirements:\n#tiny==0.1.3\ndef main():\npass".into()), + ) + .await; + } + + in_test_worker( + &db, + async { + assert_eq!( + &sqlx::query_scalar!( + "SELECT runnable_path FROM v2_job WHERE id = $1", + completed.next().await.unwrap() + ) + .fetch_one(&db) + .await + .unwrap() + .unwrap(), + "f/dre/leaf_left" + ); + + assert_eq!( + &sqlx::query_scalar!( + "SELECT runnable_path FROM v2_job WHERE id = $1", + completed.next().await.unwrap() + ) + .fetch_one(&db) + .await + .unwrap() + .unwrap(), + "f/dre/leaf_right" + ); + + // Let jobs propagate + sleep(Duration::from_secs(2)).await; + + // Verify there is only one queued job that is scheduled for atleast 3s ahead. + { + let q = sqlx::query_scalar!( + "SELECT (scheduled_for - created_at) FROM v2_job_queue" + ) + .fetch_all(&db) + .await + .unwrap(); + + assert_eq!(1, q.len()); + assert!(dbg!(q[0].unwrap().microseconds) > 1_000_000 /* 1 second */); + } + + // Verify debounce_stale_data and debounce_key + { + let q = sqlx::query!( + "SELECT + dsd.to_relock, + dk.key + FROM debounce_key dk + JOIN debounce_stale_data dsd ON dk.job_id = dsd.job_id" + ) + .fetch_all(&db) + .await + .unwrap(); + + // Should be single entry + assert!(q.len() == 1); + + // This verifies that all nodes_to_relock are consolidated correctly + // AND there is no doublicats + assert_eq!( + q[0].to_relock.clone().unwrap(), + vec!["a".to_owned(), "b".to_owned(), "c".to_owned()] + ); + + // Should be workspace specific and these specific tests cover only dependency job debouncing + assert_eq!( + q[0].key.clone(), + "test-workspace:f/dre/flow:dependency".to_owned(), + ); + } + + assert_eq!( + &sqlx::query_scalar!( + "SELECT runnable_path FROM v2_job WHERE id = $1", + completed.next().await.unwrap() + ) + .fetch_one(&db) + .await + .unwrap() + .unwrap(), + "f/dre/flow" + ); + }, + port, + ) + .await; + + // Verify latest flow.version property + { + // Latest flow version should not be initial one + assert_eq!( + 1, // Automatically assigned + dbg!(sqlx::query_scalar!( + "SELECT versions[2] FROM flow WHERE path = 'f/dre/flow'" + ) + .fetch_one(&db) + .await + .unwrap() + .unwrap()) + ); + + // Only second element should be our initial version + assert_eq!( + 1443253234253454, // < Predefined in fixture + dbg!(sqlx::query_scalar!( + "SELECT versions[1] FROM flow WHERE path = 'f/dre/flow'" + ) + .fetch_one(&db) + .await + .unwrap() + .unwrap()) + ); + } + + // Verify that there is only two versions of flow in global flow_version + { + assert_eq!( + 2, + sqlx::query_scalar!( + "SELECT COUNT(*) FROM flow_version WHERE path = 'f/dre/flow'" + ) + .fetch_one(&db) + .await + .unwrap() + .unwrap() + ); + } + + // Verify locks + { + assert_eq!( + sqlx::query_scalar!("SELECT jsonb_array_elements(value->'modules')->'value'->>'lock' AS lock FROM flow") + .fetch_all(&db) + .await + .unwrap(), + vec![ + Some("# py: 3.11\nbottle==0.13.2".into()), + Some("# py: 3.11\nbottle==0.13.2\ntiny==0.1.3".into()), + Some("# py: 3.11\ntiny==0.1.3".into()) + ] + ); + } + + // TODO: + // tracing_assertions::assert_has_events!([info("This is supposed to be called")]); + // 2025-10-06T14:31:10.832469Z WARN windmill-worker/src/worker.rs:1593: pull took more than 0.1s (0.222477345) this is a sign that the database is undersized for this load. empty: true, err: true worker=wk-default-nixos-EzDEL hostname=nixos + + // Verify cleanup + { + assert_eq!( + 0, + sqlx::query_scalar!("SELECT COUNT(*) from debounce_key") + .fetch_one(&db) + .await + .unwrap() + .unwrap() + ); + assert_eq!( + 0, + sqlx::query_scalar!("SELECT COUNT(*) from debounce_stale_data") + .fetch_one(&db) + .await + .unwrap() + .unwrap() + ); + } + + Ok(()) + } + + #[cfg(feature = "python")] + #[sqlx::test(fixtures("base", "djob_debouncing"))] + async fn test_left(db: sqlx::Pool) -> anyhow::Result<()> { + use crate::common::RunJob; + + // TODO: We don't care about timer. If there is no timer, it will be set automatically for djobs?? + let (_client, port, _s) = init_client(db.clone()).await; + let mut completed = listen_for_completed_jobs(&db).await; + + // Trigger both at the same time. + { + let mut args = std::collections::HashMap::new(); + args.insert( + "dbg_djob_sleep".to_owned(), + // Execution should take this seconds + windmill_common::worker::to_raw_value(&20), + ); + + args.insert( + "triggered_by_relative_import".to_owned(), + // Execution should take this seconds + windmill_common::worker::to_raw_value(&()), + ); + + let (_flow_id, new_tx) = windmill_queue::push( + &db, + windmill_queue::PushIsolationLevel::IsolatedRoot(db.clone()), + "test-workspace", + windmill_common::jobs::JobPayload::FlowDependencies { + path: "f/dre/flow".to_owned(), + dedicated_worker: None, + version: 1443253234253454, + }, + windmill_queue::PushArgs { args: &args, extra: None }, + "admin", + "admin@windmill.dev", + "admin".to_owned(), + Some("trigger.dependents.to.recompute.dependencies"), + // Debounce period + Some(chrono::Utc::now() + chrono::Duration::seconds(5)), + None, + None, + None, + None, + None, + false, + false, + None, + true, + Some("dependency".into()), + None, + None, + None, + None, + false, + None, + None, + ) + .await + .unwrap(); + new_tx.commit().await.unwrap(); + + // let handle = { + // // let mut completed = listen_for_completed_jobs(&db).await; + // let db2 = db.clone(); + // // let uuid = flow_id.clone(); + // tokio::spawn(async move { + // in_test_worker( + // &db2, + // tokio::time::sleep(tokio::time::Duration::from_secs(60)), + // // completed.find(&uuid), + // port, + // ) + // .await; + // }) + // }; + + let db2 = db.clone(); + in_test_worker( + &db2, + async { + // This job should execute and then try to start another job that will get debounced. + RunJob::from(windmill_common::jobs::JobPayload::Dependencies { + path: "f/dre/leaf_right".to_owned(), + hash: 333403.into(), + language: windmill_common::scripts::ScriptLang::Python3, + dedicated_worker: None, + }) + // .arg("dbg_djob_sleep", serde_json::json!(10)) + .run_until_complete(&db, port) + .await; + + RunJob::from(windmill_common::jobs::JobPayload::Dependencies { + path: "f/dre/leaf_left".to_owned(), + hash: 333400.into(), + language: windmill_common::scripts::ScriptLang::Python3, + dedicated_worker: None, + }) + // So set it to this long + .arg("dbg_djob_sleep", serde_json::json!(10)) + .run_until_complete(&db, port) + .await; + + completed.next().await; // leaf_right + completed.next().await; // leaf_left + completed.next().await; // importer + completed.next().await; // importer + }, + port, + ) + .await; + } + + assert_eq!( + sqlx::query_scalar!("SELECT COUNT(*) FROM v2_job_queue") + .fetch_one(&db) + .await + .unwrap() + .unwrap(), + 0 + ); + + let r = sqlx::query_scalar!("SELECT runnable_id FROM v2_job ORDER BY created_at DESC") + .fetch_all(&db) + .await + .unwrap(); + + assert_eq!(r.len(), 4); + assert!(r.contains(&Some(1))); + assert!(r.contains(&Some(333400))); + assert!(r.contains(&Some(333403))); + assert!(r.contains(&Some(1443253234253454))); + + Ok(()) + } + + #[cfg(feature = "python")] + #[sqlx::test(fixtures("base", "djob_debouncing"))] + async fn test_2(db: sqlx::Pool) -> anyhow::Result<()> { + let (_client, port, _s) = init_client(db.clone()).await; + let mut completed = listen_for_completed_jobs(&db).await; + + // Function to create a dependency job + let create_dependency_job = + |delay, + nodes_to_relock, + db: sqlx::Pool, + version, + debounce_job_id_o| async move { + let mut args = std::collections::HashMap::new(); + args.insert( + "dbg_sleep_between_pull_and_debounce_key_removal".to_owned(), + windmill_common::worker::to_raw_value(&delay), + ); + + args.insert( + "nodes_to_relock".to_owned(), + windmill_common::worker::to_raw_value(&nodes_to_relock), + ); + + args.insert( + "triggered_by_relative_import".to_string(), + windmill_common::worker::to_raw_value(&()), + ); + + args.insert( + "dbg_create_job_for_unexistant_flow_version".to_string(), + windmill_common::worker::to_raw_value(&()), + ); + + let (job_uuid, new_tx) = windmill_queue::push( + &db, + windmill_queue::PushIsolationLevel::IsolatedRoot(db.clone()), + "test-workspace", + windmill_common::jobs::JobPayload::FlowDependencies { + path: "f/dre/flow".to_owned(), + dedicated_worker: None, + version, + }, + windmill_queue::PushArgs { args: &args, extra: None }, + "admin", + "admin@windmill.dev", + "admin".to_owned(), + Some("trigger.dependents.to.recompute.dependencies"), + Some(chrono::Utc::now()), // Schedule immediately + None, + None, + None, + None, + None, + false, + false, + None, + true, + Some("dependency".into()), + None, + None, + None, + None, + false, + None, + debounce_job_id_o, + ) + .await + .unwrap(); + + new_tx.commit().await.unwrap(); + job_uuid + }; + + // Push the first dependency job + let job1 = + create_dependency_job(2, vec!["a", "b"], db.clone(), 1443253234253454, None).await; + + let db2 = db.clone(); + in_test_worker( + &db2, + async { + windmill_common::worker::update_min_version( + &windmill_common::worker::Connection::Sql(db2.clone()), + ) + .await; + // Small delay to ensure the job is marked as running + tokio::time::sleep(tokio::time::Duration::from_millis(200)).await; + + // Now is the time when the job is pulled, but debounce_key is not yet cleared. + { + assert!(sqlx::query_scalar!( + "SELECT running FROM v2_job_queue WHERE id = $1", + job1 + ) + .fetch_one(&db) + .await + .unwrap()); + + assert_eq!( + sqlx::query_scalar!("SELECT COUNT(*) FROM debounce_key") + .fetch_one(&db) + .await + .unwrap() + .unwrap(), + 1 + ); + } + + // Block all tests using this variable until we are done + // let mut min_v = windmill_common::worker::MIN_VERSION_IS_AT_LEAST_1_440 + // .write() + // .await; + + // // Save initial min_v value; + // let initi_min_v = *min_v; + + // // Make it true for this test. + // *min_v = true; + + // Now push a second dependency job while the first is being processed + // This should trigger the race condition handling code + let job2 = + create_dependency_job(0, vec!["b", "c"], db.clone(), 1, Some(job1)).await; + + // Set it back to initial + // *min_v = initi_min_v; + + // Unblock all other tests + // drop(min_v); + + // Process the first job completion, and the second job should also get debounced by this one + completed.next().await; + + // Verify that both jobs were created and processed + assert_eq!(job1, job2, "Second job should be debounced"); + }, + port, + ) + .await; + + assert_eq!( + vec![1443253234253454, 1], + sqlx::query_scalar!("SELECT versions FROM flow WHERE path = 'f/dre/flow'") + .fetch_one(&db) + .await + .unwrap() + ); + + // Verify cleanup - all debounce entries should be cleaned up + assert_eq!( + 0, + sqlx::query_scalar!("SELECT COUNT(*) from debounce_key") + .fetch_one(&db) + .await + .unwrap() + .unwrap(), + "All debounce_key entries should be cleaned up after job completion" + ); + + assert_eq!( + 0, + sqlx::query_scalar!("SELECT COUNT(*) from debounce_stale_data") + .fetch_one(&db) + .await + .unwrap() + .unwrap(), + "All debounce_stale_data entries should be cleaned up after job completion" + ); + + // Verify locks + { + assert_eq!( + sqlx::query_scalar!("SELECT jsonb_array_elements(value->'modules')->'value'->>'lock' AS lock FROM flow") + .fetch_all(&db) + .await + .unwrap(), + vec![ + Some("# py: 3.11\nbottle==0.13.2".into()), + Some("# py: 3.11\nbottle==0.13.2\ntiny==0.1.3".into()), + Some("# py: 3.11\ntiny==0.1.3".into()) + ] + ); + } + + Ok(()) + } + /// 2. Same as second test, however first flow djob will take longer than second debounce. + /// NOTE: This test should be ran in debug mode with `private` features enabled. In release it will not work properly. + #[cfg(all(feature = "python", feature = "private"))] + #[sqlx::test(fixtures("base", "djob_debouncing"))] + // #[windmill::all_min_versions] + async fn test_3(db: sqlx::Pool) -> anyhow::Result<()> { + // This tests checks if concurrency limit works correcly and there is no race conditions. + + use windmill_common::worker::Connection; + let (_client, port, _s) = init_client(db.clone()).await; + let mut completed = listen_for_completed_jobs(&db).await; + + // At this point we should have two + let mut job_ids = vec![]; + let push_job = |delay, version, db, nodes_to_relock, debounce_job_id_o| async move { + let mut args = std::collections::HashMap::new(); + args.insert( + "dbg_djob_sleep".to_owned(), + // First one will create delay for 5 seconds + // The second will have no delay at all. + windmill_common::worker::to_raw_value(&delay), + ); + + args.insert( + "nodes_to_relock".to_owned(), + windmill_common::worker::to_raw_value(&nodes_to_relock), + ); + + args.insert( + "triggered_by_relative_import".to_string(), + windmill_common::worker::to_raw_value(&()), + ); + + let (job_uuid, new_tx) = windmill_queue::push( + &db, + windmill_queue::PushIsolationLevel::IsolatedRoot(db.clone()), + "test-workspace", + windmill_common::jobs::JobPayload::FlowDependencies { + path: "f/dre/flow".to_owned(), + dedicated_worker: None, + // In newest versions we pass the current version to the djob + // version: 1443253234253454, + version, + }, + windmill_queue::PushArgs { args: &args, extra: None }, + "admin", + "admin@windmill.dev", + "admin".to_owned(), + Some("trigger.dependents.to.recompute.dependencies"), + // Schedule for now. + Some(chrono::Utc::now()), + None, + None, + None, + None, + None, + false, + false, + None, + true, + Some("dependency".into()), + None, + None, + None, + None, + false, + None, + debounce_job_id_o, + ) + .await + .unwrap(); + + new_tx.commit().await.unwrap(); + + job_uuid + }; + + // Push first + job_ids.push(push_job(5, 1443253234253454, db.clone(), ["a", "b"], None).await); + + // Verify debounce_stale_data and debounce_key + { + let q = sqlx::query!("SELECT COUNT(*) FROM debounce_key") + .fetch_all(&db) + .await + .unwrap(); + + // Should be single entry + assert_eq!(q.len(), 1); + } + + // Start the first one in the background + let handle = { + let mut completed = listen_for_completed_jobs(&db).await; + let db2 = db.clone(); + tokio::spawn(async move { + in_test_worker( + &db2, + // sleep(Duration::from_secs(7)), + completed.next(), // Only wait for the single job. We are going to spawn another worker for second one. + port, + ) + .await; + }) + }; + + // Wait for the job to be created and started + // This way next job is not going to be consumed by the first one. + sleep(Duration::from_secs(2)).await; + + // Push second + job_ids.push(push_job(0, 1, db.clone(), ["b", "c"], None).await); + + // Wait for the second one to finish in separate worker. + // in_test_worker(&db, completed.next(), port).await; + in_test_worker( + &db, + async { + // First job will be pulled + completed.next().await; + // However since we have concurrency limit enabled it will get rescheduled by creation of new djob. + // So we have to wait for that one as well. + completed.next().await; + }, + port, + ) + .await; + + // Wait for the first one + handle.await.unwrap(); + + // Verify locks + { + assert_eq!( + sqlx::query_scalar!("SELECT jsonb_array_elements(value->'modules')->'value'->>'lock' AS lock FROM flow") + .fetch_all(&db) + .await + .unwrap(), + vec![ + Some("# py: 3.11\nbottle==0.13.2".into()), + Some("# py: 3.11\nbottle==0.13.2\ntiny==0.1.3".into()), + Some("# py: 3.11\ntiny==0.1.3".into()) + ] + ); + } + // Verify that we have expected outcome + { + assert_eq!( + sqlx::query_scalar!("SELECT COUNT(*) FROM v2_job",) + .fetch_one(&db) + .await + .unwrap() + .unwrap(), + 2 + ); + + assert_eq!( + sqlx::query_scalar!("SELECT COUNT(*) FROM v2_job_completed",) + .fetch_one(&db) + .await + .unwrap() + .unwrap(), + 2 + ); + // Check that two jobs were executed sequentially + assert!(sqlx::query_scalar!( + " +SELECT + j1.completed_at < j2.started_at +FROM + v2_job_completed j1, + v2_job_completed j2 +WHERE + j1.id = $1 + AND j2.id = $2", + job_ids[0], + job_ids[1], + ) + .fetch_one(&db) + .await + .unwrap() + .unwrap()); + } + Ok(()) + } + + // TODO: + // test that update or create flow that should bypass debouncing + } + + /// ## Testing for Apps + /// For apps we are going to do similar tests that we did for flows + mod apps { + use crate::common::{in_test_worker, init_client, listen_for_completed_jobs}; + use crate::job_debouncing::trigger_djob_for; + use std::time::Duration; + use tokio::time::sleep; + use tokio_stream::StreamExt; + + /// 1. LLF and RLF create two djobs for flow at the same and fall into single debounce + #[cfg(feature = "python")] + #[sqlx::test(fixtures("base", "djob_debouncing"))] + async fn test_1(db: sqlx::Pool) -> anyhow::Result<()> { + // This tests if debouncing and consolidation works. + // Also makes sures that dependency job does not create new flow version + + let (client, port, _s) = init_client(db.clone()).await; + let mut completed = listen_for_completed_jobs(&db).await; + assert_eq!( + sqlx::query_scalar!("SELECT COUNT(*) FROM v2_job") + .fetch_one(&db) + .await + .unwrap() + .unwrap(), + 0 + ); + + // Trigger both at the same time. + // It will create two immediate dependency jobs + { + trigger_djob_for( + &client, + "f/dre_app/leaf_left", + "0000000000069CF8", + Some("#requirements:\n#bottle==0.13.2\ndef main():\npass".into()), + ) + .await; + + trigger_djob_for( + &client, + "f/dre_app/leaf_right", + "0000000000069CFB", + Some("#requirements:\n#tiny==0.1.3\ndef main():\npass".into()), + ) + .await; + } + + assert_eq!( + sqlx::query_scalar!("SELECT COUNT(*) FROM v2_job") + .fetch_one(&db) + .await + .unwrap() + .unwrap(), + 2 + ); + + // Spawn single worker. + in_test_worker( + &db, + async { + assert_eq!( + &sqlx::query_scalar!( + "SELECT runnable_path FROM v2_job WHERE id = $1", + completed.next().await.unwrap() + ) + .fetch_one(&db) + .await + .unwrap() + .unwrap(), + "f/dre_app/leaf_left" + ); + + assert_eq!( + &sqlx::query_scalar!( + "SELECT runnable_path FROM v2_job WHERE id = $1", + completed.next().await.unwrap() + ) + .fetch_one(&db) + .await + .unwrap() + .unwrap(), + "f/dre_app/leaf_right" + ); + + // Verify there is only one queued job that is scheduled for atleast 3s ahead. + { + let q = sqlx::query_scalar!( + "SELECT (scheduled_for - created_at) FROM v2_job_queue" + ) + .fetch_all(&db) + .await + .unwrap(); + + assert_eq!(1, q.len()); + assert!(dbg!(q[0].unwrap().microseconds) > 2_000_000); + } + + // Verify debounce_stale_data and debounce_key + { + let q = sqlx::query!( + "SELECT + dsd.to_relock, + dk.key + FROM debounce_key dk + JOIN debounce_stale_data dsd ON dk.job_id = dsd.job_id" + ) + .fetch_all(&db) + .await + .unwrap(); + + // Should be single entry + assert!(q.len() == 1); + + // This verifies that all nodes_to_relock are consolidated correctly + // AND there is no doublicats + assert_eq!( + q[0].to_relock.clone().unwrap(), + vec!["a".to_owned(), "b".to_owned(), "c".to_owned()] + ); + + // Should be workspace specific and these specific tests cover only dependency job debouncing + assert_eq!( + q[0].key.clone(), + "test-workspace:f/dre_app/app:dependency".to_owned(), + ); + } + + assert_eq!( + &sqlx::query_scalar!( + "SELECT runnable_path FROM v2_job WHERE id = $1", + completed.next().await.unwrap() + ) + .fetch_one(&db) + .await + .unwrap() + .unwrap(), + "f/dre_app/app" + ); + }, + port, + ) + .await; + + // Verify App states + { + let q = dbg!(sqlx::query_scalar!( + "SELECT versions FROM app WHERE path = 'f/dre_app/app'" + ) + .fetch_one(&db) + .await + .unwrap()); + + assert_eq!(2, q.len()); + + // There is also supposed to be this amount of app_versions + assert_eq!( + 2, + sqlx::query_scalar!("SELECT COUNT(*) FROM app_version WHERE app_id = '2'") + .fetch_one(&db) + .await + .unwrap() + .unwrap() + ); + } + + // Verify cleanup + { + assert_eq!( + 0, + sqlx::query_scalar!("SELECT COUNT(*) from debounce_key") + .fetch_one(&db) + .await + .unwrap() + .unwrap() + ); + assert_eq!( + 0, + sqlx::query_scalar!("SELECT COUNT(*) from debounce_stale_data") + .fetch_one(&db) + .await + .unwrap() + .unwrap() + ); + } + + Ok(()) + } + + #[cfg(feature = "python")] + #[sqlx::test(fixtures("base", "djob_debouncing"))] + async fn test_left(db: sqlx::Pool) -> anyhow::Result<()> { + use crate::common::RunJob; + + // TODO: We don't care about timer. If there is no timer, it will be set automatically for djobs?? + let (_client, port, _s) = init_client(db.clone()).await; + let mut completed = listen_for_completed_jobs(&db).await; + + let mut args = std::collections::HashMap::new(); + args.insert( + "dbg_djob_sleep".to_owned(), + // Execution should take this seconds + windmill_common::worker::to_raw_value(&20), + ); + args.insert( + "triggered_by_relative_import".to_owned(), + // Execution should take this seconds + windmill_common::worker::to_raw_value(&()), + ); + + let (_flow_id, new_tx) = windmill_queue::push( + &db, + windmill_queue::PushIsolationLevel::IsolatedRoot(db.clone()), + "test-workspace", + windmill_common::jobs::JobPayload::AppDependencies { + path: "f/dre_app/app".to_owned(), + version: 0, + }, + windmill_queue::PushArgs { args: &args, extra: None }, + "admin", + "admin@windmill.dev", + "admin".to_owned(), + Some("trigger.dependents.to.recompute.dependencies"), + // Debounce period + Some(chrono::Utc::now() + chrono::Duration::seconds(5)), + None, + None, + None, + None, + None, + false, + false, + None, + true, + Some("dependency".into()), + None, + None, + None, + None, + false, + None, + None, + ) + .await + .unwrap(); + new_tx.commit().await.unwrap(); + + // let mut handle = { + // let mut completed = listen_for_completed_jobs(&db).await; + // let db2 = db.clone(); + // let uuid = flow_id.clone(); + // tokio::spawn(async move { + // in_test_worker( + // &db2, + // // tokio::time::sleep(tokio::time::Duration::from_secs(60)), + // async move { + // completed.find(&uuid).await; + // }, + // port, + // ) + // .await; + // }) + // }; + + let db2 = db.clone(); + in_test_worker( + &db2, + async { + // This job should execute and then try to start another job that will get debounced. + RunJob::from(windmill_common::jobs::JobPayload::Dependencies { + path: "f/dre_app/leaf_right".to_owned(), + hash: 433403.into(), + language: windmill_common::scripts::ScriptLang::Python3, + dedicated_worker: None, + }) + // .arg("dbg_djob_sleep", serde_json::json!(10)) + .run_until_complete(&db, port) + .await; + + RunJob::from(windmill_common::jobs::JobPayload::Dependencies { + path: "f/dre_app/leaf_left".to_owned(), + hash: 433400.into(), + language: windmill_common::scripts::ScriptLang::Python3, + dedicated_worker: None, + }) + // So set it to this long + .arg("dbg_djob_sleep", serde_json::json!(10)) + .run_until_complete(&db, port) + .await; + + completed.next().await; // leaf_right + completed.next().await; // leaf_left + completed.next().await; // importer + completed.next().await; // importer + }, + port, + ) + .await; + + assert_eq!( + sqlx::query_scalar!("SELECT COUNT(*) FROM v2_job_queue") + .fetch_one(&db) + .await + .unwrap() + .unwrap(), + 0 + ); + + let r = sqlx::query_scalar!("SELECT runnable_id FROM v2_job ORDER BY created_at DESC") + .fetch_all(&db) + .await + .unwrap(); + + assert_eq!(r.len(), 4); + assert!(r.contains(&Some(9))); + assert!(r.contains(&Some(433400))); + assert!(r.contains(&Some(433403))); + assert!(r.contains(&Some(0))); + + // handle.await.unwrap(); + + Ok(()) + } + /// 2. Same as second test, however first app djob will take longer than second debounce. + /// NOTE: This test should be ran in debug mode. In release it will not work properly. + #[cfg(all(feature = "python", feature = "private"))] + #[sqlx::test(fixtures("base", "djob_debouncing"))] + async fn test_3(db: sqlx::Pool) -> anyhow::Result<()> { + // This tests checks if concurrency limit works correcly and there is no race conditions. + let (_client, port, _s) = init_client(db.clone()).await; + let mut completed = listen_for_completed_jobs(&db).await; + + // At this point we should have two + let mut job_ids = vec![]; + let push_job = |delay, db| async move { + let mut args = std::collections::HashMap::new(); + args.insert( + "dbg_djob_sleep".to_owned(), + // First one will create delay for 5 seconds + // The second will have no delay at all. + windmill_common::worker::to_raw_value(&delay), + ); + + args.insert( + "triggered_by_relative_import".to_string(), + windmill_common::worker::to_raw_value(&()), + ); + + let (job_uuid, new_tx) = windmill_queue::push( + &db, + windmill_queue::PushIsolationLevel::IsolatedRoot(db.clone()), + "test-workspace", + windmill_common::jobs::JobPayload::AppDependencies { + path: "f/dre_app/app".to_owned(), + // In newest versions we pass the current version to the djob + version: 0, + }, + windmill_queue::PushArgs { args: &args, extra: None }, + "admin", + "admin@windmill.dev", + "admin".to_owned(), + Some("trigger.dependents.to.recompute.dependencies"), + // Schedule for now. + Some(chrono::Utc::now()), + None, + None, + None, + None, + None, + false, + false, + None, + true, + Some("dependency".into()), + None, + None, + None, + None, + false, + None, + None, + ) + .await + .unwrap(); + + new_tx.commit().await.unwrap(); + + job_uuid + }; + + // TODO: Verify concurrency key. + // Push first + job_ids.push(push_job(5, db.clone()).await); + + // Verify debounce_stale_data and debounce_key + { + let q = sqlx::query!("SELECT COUNT(*) FROM debounce_key") + .fetch_all(&db) + .await + .unwrap(); + + // Should be single entry + assert_eq!(q.len(), 1); + } + + // Start the first one in the background + let handle = { + let mut completed = listen_for_completed_jobs(&db).await; + let db2 = db.clone(); + tokio::spawn(async move { + in_test_worker( + &db2, + // sleep(Duration::from_secs(7)), + completed.next(), // Only wait for the single job. We are going to spawn another worker for second one. + port, + ) + .await; + }) + }; + + // Wait for the job to be created and started + // This way next job is not going to be consumed by the first one. + sleep(Duration::from_secs(2)).await; + + // Push second + job_ids.push(push_job(0, db.clone()).await); + + // Wait for the second one to finish in separate worker. + // in_test_worker(&db, completed.next(), port).await; + in_test_worker( + &db, + async { + // First job will be pulled + completed.next().await; + // However since we have concurrency limit enabled it will get rescheduled by creation of new djob. + // So we have to wait for that one as well. + completed.next().await; + }, + port, + ) + .await; + + // Wait for the first one + handle.await.unwrap(); + + // Verify that we have expected outcome + { + assert_eq!( + sqlx::query_scalar!("SELECT COUNT(*) FROM v2_job",) + .fetch_one(&db) + .await + .unwrap() + .unwrap(), + 2 + ); + + assert_eq!( + sqlx::query_scalar!("SELECT COUNT(*) FROM v2_job_completed",) + .fetch_one(&db) + .await + .unwrap() + .unwrap(), + 2 + ); + // Check that two jobs were executed sequentially + assert!(sqlx::query_scalar!( + " +SELECT + j1.completed_at < j2.started_at +FROM + v2_job_completed j1, + v2_job_completed j2 +WHERE + j1.id = $1 + AND j2.id = $2", + job_ids[0], + job_ids[1], + ) + .fetch_one(&db) + .await + .unwrap() + .unwrap()); + } + Ok(()) + } + } + + // TODO: Test debounce reassignment works + + /// ## Testing for Scripts + mod scripts { + use crate::common::{in_test_worker, init_client, listen_for_completed_jobs}; + use crate::job_debouncing::trigger_djob_for; + use std::time::Duration; + use tokio::time::sleep; + use tokio_stream::StreamExt; + + /// 1. LLF and RLF create two djobs for flow at the same and fall into single debounce + #[cfg(feature = "python")] + #[sqlx::test(fixtures("base", "djob_debouncing"))] + // TODO: Same test_but script fails. + async fn test_1(db: sqlx::Pool) -> anyhow::Result<()> { + // This tests if debouncing and consolidation works. + // Also makes sures that dependency job does not create new flow version + let (client, port, _s) = init_client(db.clone()).await; + let mut completed = listen_for_completed_jobs(&db).await; + + // Verify lock is empty + { + assert_eq!( + sqlx::query_scalar!( + "SELECT lock FROM script WHERE path = 'f/dre_script/script'" + ) + .fetch_one(&db) + .await + .unwrap(), + Some("".into()) + ); + } + + // Trigger both at the same time. + { + trigger_djob_for( + &client, + "f/dre_script/leaf_left", + "0000000000082398", + Some("#requirements:\n#bottle==0.13.2\ndef main():\npass".into()), + ) + .await; + trigger_djob_for( + &client, + "f/dre_script/leaf_right", + "000000000008239B", + Some("#requirements:\n#tiny==0.1.3\ndef main():\npass".into()), + ) + .await; + } + + sleep(Duration::from_secs(1)).await; + + in_test_worker( + &db, + async { + assert_eq!( + &sqlx::query_scalar!( + "SELECT runnable_path FROM v2_job WHERE id = $1", + completed.next().await.unwrap() + ) + .fetch_one(&db) + .await + .unwrap() + .unwrap(), + "f/dre_script/leaf_left" + ); + + assert_eq!( + &sqlx::query_scalar!( + "SELECT runnable_path FROM v2_job WHERE id = $1", + completed.next().await.unwrap() + ) + .fetch_one(&db) + .await + .unwrap() + .unwrap(), + "f/dre_script/leaf_right" + ); + + // handle.await.unwrap(); + + // Let jobs propagate + + tokio::select!( + _ = async { + while sqlx::query_scalar!("SELECT COUNT(*) FROM v2_job_queue WHERE running = false") + .fetch_one(&db) + .await + .unwrap() + .unwrap() + == 0 + { + sleep(Duration::from_secs(1)).await; + } + } => {}, + _ = sleep(Duration::from_secs(60)) => { panic!("Timeout") } + ); + // Verify there is only one queued job that is scheduled for atleast 3s ahead. + { + for r in + sqlx::query_scalar!("SELECT id FROM v2_job_queue WHERE running = false") + .fetch_all(&db) + .await + .unwrap() + { + dbg!( + sqlx::query!("SELECT runnable_path FROM v2_job WHERE id = $1", r) + .fetch_all(&db) + .await + .unwrap() + ); + } + for r in sqlx::query_scalar!("SELECT id FROM v2_job_completed") + .fetch_all(&db) + .await + .unwrap() + { + dbg!( + sqlx::query!("SELECT runnable_path FROM v2_job WHERE id = $1", r) + .fetch_all(&db) + .await + .unwrap() + ); + } + + dbg!(sqlx::query!("SELECT runnable_path FROM v2_job") + .fetch_all(&db) + .await + .unwrap()); + + let q = sqlx::query_scalar!( + "SELECT (scheduled_for - created_at) FROM v2_job_queue WHERE running = false" + ) + .fetch_all(&db) + .await + .unwrap(); + + assert_eq!(1, q.len()); + assert!(dbg!(q[0].unwrap().microseconds) > 1_000_000 /* 1 second */); + } + + // Verify debounce_stale_data and debounce_key + { + let q = sqlx::query_scalar!("SELECT key FROM debounce_key") + .fetch_all(&db) + .await + .unwrap(); + + assert_eq!(q.len(), 1); + + assert_eq!( + q[0].clone(), + "test-workspace:f/dre_script/script:dependency".to_owned(), + ); + + // Stale data is empty for scripts + assert_eq!( + sqlx::query_scalar!("SELECT COUNT(*) FROM debounce_stale_data") + .fetch_one(&db) + .await + .unwrap() + .unwrap(), + 0 + ); + } + + // Wait until debounce delay is complete + // sleep(Duration::from_secs(6)).await; + + assert_eq!( + &sqlx::query_scalar!( + "SELECT runnable_path FROM v2_job WHERE id = $1", + completed.next().await.unwrap() + ) + .fetch_one(&db) + .await + .unwrap() + .unwrap(), + "f/dre_script/script" + ); + }, + port, + ) + .await; + + // completed.next().await.unwrap(); + + // Verify + { + assert_eq!( + 0, + sqlx::query_scalar!("SELECT COUNT(*) from v2_job_queue") + .fetch_one(&db) + .await + .unwrap() + .unwrap() + ); + assert_eq!( + vec![533404], + dbg!(sqlx::query_scalar!( + "SELECT hash FROM script WHERE path = 'f/dre_script/script' AND archived = true" + ) + .fetch_all(&db) + .await + .unwrap()) + ); + + assert_ne!( + 533404, + sqlx::query_scalar!( + "SELECT hash FROM script WHERE path = 'f/dre_script/script' AND archived = false" + ) + .fetch_one(&db) + .await + .unwrap() + ); + + assert_eq!( + vec![533404], + sqlx::query_scalar!( + "SELECT parent_hashes FROM script WHERE path = 'f/dre_script/script' AND archived = false" + ) + .fetch_one(&db) + .await + .unwrap() + .unwrap() + ); + } + + // Verify cleanup + { + assert_eq!( + 0, + sqlx::query_scalar!("SELECT COUNT(*) from debounce_key") + .fetch_one(&db) + .await + .unwrap() + .unwrap() + ); + assert_eq!( + 0, + sqlx::query_scalar!("SELECT COUNT(*) from debounce_stale_data") + .fetch_one(&db) + .await + .unwrap() + .unwrap() + ); + } + + // handle.await.unwrap(); + Ok(()) + } + + #[cfg(feature = "python")] + #[sqlx::test(fixtures("base", "djob_debouncing"))] + async fn test_left(db: sqlx::Pool) -> anyhow::Result<()> { + use crate::common::RunJob; + + // TODO: We don't care about timer. If there is no timer, it will be set automatically for djobs?? + let (_client, port, _s) = init_client(db.clone()).await; + let mut completed = listen_for_completed_jobs(&db).await; + + // Trigger both at the same time. + { + let mut args = std::collections::HashMap::new(); + args.insert( + "dbg_djob_sleep".to_owned(), + // Execution should take this seconds + windmill_common::worker::to_raw_value(&20), + ); + + args.insert( + "triggered_by_relative_import".to_owned(), + // Execution should take this seconds + windmill_common::worker::to_raw_value(&()), + ); + + let (_flow_id, new_tx) = windmill_queue::push( + &db, + windmill_queue::PushIsolationLevel::IsolatedRoot(db.clone()), + "test-workspace", + windmill_common::jobs::JobPayload::Dependencies { + path: "f/dre_script/script".to_owned(), + dedicated_worker: None, + language: windmill_common::scripts::ScriptLang::Python3, + hash: 533404.into(), + }, + windmill_queue::PushArgs { args: &args, extra: None }, + "admin", + "admin@windmill.dev", + "admin".to_owned(), + Some("trigger.dependents.to.recompute.dependencies"), + // Debounce period + Some(chrono::Utc::now() + chrono::Duration::seconds(5)), + None, + None, + None, + None, + None, + false, + false, + None, + true, + Some("dependency".into()), + None, + None, + None, + None, + false, + None, + None, + ) + .await + .unwrap(); + new_tx.commit().await.unwrap(); + + let db2 = db.clone(); + in_test_worker( + &db2, + async { + // This job should execute and then try to start another job that will get debounced. + RunJob::from(windmill_common::jobs::JobPayload::Dependencies { + path: "f/dre_script/leaf_right".to_owned(), + hash: 533403.into(), + language: windmill_common::scripts::ScriptLang::Python3, + dedicated_worker: None, + }) + .run_until_complete(&db, port) + .await; + + // This one is supposed to be started after flow djob has debounced and started but haven't finished yet. + RunJob::from(windmill_common::jobs::JobPayload::Dependencies { + path: "f/dre_script/leaf_left".to_owned(), + hash: 533400.into(), + language: windmill_common::scripts::ScriptLang::Python3, + dedicated_worker: None, + }) + // So set it to this long + .arg("dbg_djob_sleep", serde_json::json!(10)) + .run_until_complete(&db, port) + .await; + + completed.next().await; // leaf_right + completed.next().await; // leaf_left + completed.next().await; // importer + completed.next().await; // importer + }, + port, + ) + .await; + } + + assert_eq!( + sqlx::query_scalar!("SELECT COUNT(*) FROM v2_job_queue") + .fetch_one(&db) + .await + .unwrap() + .unwrap(), + 0 + ); + + let r = sqlx::query_scalar!("SELECT runnable_id FROM v2_job ORDER BY created_at DESC") + .fetch_all(&db) + .await + .unwrap(); + + assert_eq!(r.len(), 4); + assert!(r.contains(&Some(-221349019907577876))); + assert!(r.contains(&Some(533400))); + assert!(r.contains(&Some(533403))); + assert!(r.contains(&Some(533404))); + + Ok(()) + } + + // // TODO: we don't need scripts to have concurrency limit + // /// 3. Same as second test, however first app djob will take longer than second debounce. + // /// NOTE: This test should be ran in debug mode. In release it will not work properly. + // #[cfg(all(feature = "python", feature = "private"))] + // #[sqlx::test(fixtures("base", "djob_debouncing"))] + // async fn test_3(db: sqlx::Pool) -> anyhow::Result<()> { + // // This tests checks if concurrency limit works correcly and there is no race conditions. + // let (client, port, _s) = init_client(db.clone()).await; + // let mut completed = listen_for_completed_jobs(&db).await; + + // // At this point we should have two + // let mut job_ids = vec![]; + // let push_job = |delay, db| async move { + // let mut args = std::collections::HashMap::new(); + // args.insert( + // "dbg_djob_sleep".to_owned(), + // // First one will create delay for 5 seconds + // // The second will have no delay at all. + // windmill_common::worker::to_raw_value(&delay), + // ); + + // args.insert( + // "triggered_by_relative_import".to_string(), + // windmill_common::worker::to_raw_value(&()), + // ); + + // let (job_uuid, new_tx) = windmill_queue::push( + // &db, + // windmill_queue::PushIsolationLevel::IsolatedRoot(db.clone()), + // "test-workspace", + // windmill_common::jobs::JobPayload::Dependencies { + // path: "f/dre_script/script".to_owned(), + // language: windmill_common::scripts::ScriptLang::Python3, + // dedicated_worker: None, + // hash: windmill_common::scripts::ScriptHash(533404), + // }, + // windmill_queue::PushArgs { args: &args, extra: None }, + // "admin", + // "admin@windmill.dev", + // "admin".to_owned(), + // Some("trigger.dependents.to.recompute.dependencies"), + // // Schedule for now. + // Some(chrono::Utc::now()), + // None, + // None, + // None, + // None, + // None, + // false, + // false, + // None, + // true, + // Some("dependency".into()), + // None, + // None, + // None, + // None, + // false, + // None, + // None, + // ) + // .await + // .unwrap(); + + // new_tx.commit().await.unwrap(); + + // job_uuid + // }; + + // // Push first + // job_ids.push(push_job(5, db.clone()).await); + // sleep(Duration::from_millis(300)).await; + + // // Verify debounce_stale_data and debounce_key + // { + // let q = sqlx::query_scalar!("SELECT key FROM debounce_key") + // .fetch_all(&db) + // .await + // .unwrap(); + + // assert_eq!(q.len(), 1); + + // assert_eq!( + // q[0].clone(), + // "test-workspace:f/dre_script/script:dependency".to_owned(), + // ); + + // // Stale data is empty for scripts + // assert_eq!( + // sqlx::query_scalar!("SELECT COUNT(*) FROM debounce_stale_data") + // .fetch_one(&db) + // .await + // .unwrap() + // .unwrap(), + // 0 + // ); + // } + + // // Start the first one in the background + // let handle = { + // let mut completed = listen_for_completed_jobs(&db).await; + // let db2 = db.clone(); + // tokio::spawn(async move { + // in_test_worker( + // &db2, + // // sleep(Duration::from_secs(7)), + // completed.next(), // Only wait for the single job. We are going to spawn another worker for second one. + // port, + // ) + // .await; + // }) + // }; + + // // Wait for the job to be created and started + // // This way next job is not going to be consumed by the first one. + // sleep(Duration::from_secs(1)).await; + + // // Push second + // job_ids.push(push_job(0, db.clone()).await); + + // // Wait for the second one to finish in separate worker. + // in_test_worker( + // &db, + // async { + // // First job will be pulled + // completed.next().await; + // // However since we have concurrency limit enabled it will get rescheduled by creation of new djob. + // // So we have to wait for that one as well. + // completed.next().await; + // }, + // port, + // ) + // .await; + + // // Wait for the first one + // handle.await.unwrap(); + + // // Verify that we have expected outcome + // { + // assert_eq!( + // sqlx::query_scalar!("SELECT COUNT(*) FROM v2_job_queue",) + // .fetch_one(&db) + // .await + // .unwrap() + // .unwrap(), + // 0 + // ); + // // Verify lock + // { + // assert_eq!( + // sqlx::query_scalar!( + // "SELECT lock FROM script WHERE path = 'f/dre_script/script'" + // ) + // .fetch_one(&db) + // .await + // .unwrap(), + // Some("# py: 3.11\nbottle==0.13.2\ntiny==0.1.3".into()) + // ); + // } + // assert_eq!( + // sqlx::query_scalar!("SELECT COUNT(*) FROM v2_job",) + // .fetch_one(&db) + // .await + // .unwrap() + // .unwrap(), + // 2 + // ); + + // assert_eq!( + // sqlx::query_scalar!("SELECT COUNT(*) FROM v2_job_completed",) + // .fetch_one(&db) + // .await + // .unwrap() + // .unwrap(), + // 2 + // ); + // // Check that two jobs were executed sequentially + // assert!(sqlx::query_scalar!( + // " + // SELECT + // j1.completed_at < j2.started_at + // FROM + // v2_job_completed j1, + // v2_job_completed j2 + // WHERE + // j1.id = $1 + // AND j2.id = $2", + // job_ids[0], + // job_ids[1], + // ) + // .fetch_one(&db) + // .await + // .unwrap() + // .unwrap()); + // } + // Ok(()) + // } + } + // TODO: Test git sync +} diff --git a/backend/tests/worker.rs b/backend/tests/worker.rs index 362df943a0a7f..72d753c48cfeb 100644 --- a/backend/tests/worker.rs +++ b/backend/tests/worker.rs @@ -20,7 +20,6 @@ use windmill_common::flows::InputTransform; #[cfg(any(feature = "python", feature = "deno_core"))] use windmill_common::flow_status::RestartedFrom; - use windmill_common::{ flows::FlowValue, jobs::{JobPayload, RawCode}, @@ -31,6 +30,8 @@ use common::*; #[cfg(feature = "enterprise")] use futures::StreamExt; +use windmill_common::flows::FlowModule; +use windmill_common::flows::FlowModuleValue; // async fn _print_job(id: Uuid, db: &Pool) -> Result<(), anyhow::Error> { // tracing::info!( @@ -332,9 +333,6 @@ async fn test_identity(db: Pool) -> anyhow::Result<()> { Ok(()) } -use windmill_common::flows::FlowModule; -use windmill_common::flows::FlowModuleValue; - #[cfg(feature = "deno_core")] #[sqlx::test(fixtures("base"))] async fn test_deno_flow_same_worker(db: Pool) -> anyhow::Result<()> { @@ -2083,14 +2081,14 @@ async fn test_flow_lock_all(db: Pool) -> anyhow::Result<()> { language: windmill_api_client::types::RawScriptLanguage::Bash, lock: Some(ref lock), .. - }) if lock == "") + }) if lock.is_empty()) || matches!( m.value, windmill_api_client::types::FlowModuleValue::RawScript(RawScript{ language: windmill_api_client::types::RawScriptLanguage::Go | windmill_api_client::types::RawScriptLanguage::Python3 | windmill_api_client::types::RawScriptLanguage::Deno, lock: Some(ref lock), .. - }) if lock.len() > 0), + }) if !lock.is_empty()), "{:?}", m.value ); }); @@ -2749,7 +2747,7 @@ async fn test_result_format(db: Pool) -> anyhow::Result<()> { assert_eq!(job_result.get(), correct_result); let response = windmill_api::jobs::run_wait_result( - &db.into(), + &db, Uuid::parse_str(ordered_result_job_id).unwrap(), "test-workspace".to_string(), None, @@ -2760,8 +2758,7 @@ async fn test_result_format(db: Pool) -> anyhow::Result<()> { let result: Box = serde_json::from_slice( &axum::body::to_bytes(response.into_body(), usize::MAX) .await - .unwrap() - .to_vec(), + .unwrap(), ) .unwrap(); assert_eq!(result.get(), correct_result); @@ -2805,7 +2802,7 @@ async fn test_job_labels(db: Pool) -> anyhow::Result<()> { restarted_from: None, }) .arg("world", json!("you")) - .run_until_complete_with(&db, port, |id| async move { + .run_until_complete_with(db, port, |id| async move { sqlx::query!( "UPDATE v2_job SET labels = $2 WHERE id = $1 AND $2::TEXT[] IS NOT NULL", id, @@ -2871,7 +2868,7 @@ async fn test_workflow_as_code(db: Pool) -> anyhow::Result<()> { // workflow as code require at least 2 workers: let db = &db; in_test_worker( - &db, + db, async move { let job = RunJob::from(JobPayload::Code(RawCode { language: ScriptLang::Python3, @@ -2879,7 +2876,7 @@ async fn test_workflow_as_code(db: Pool) -> anyhow::Result<()> { ..RawCode::default() })) .arg("n", json!(3)) - .run_until_complete(&db, port) + .run_until_complete(db, port) .await; assert_eq!(job.json_result().unwrap(), json!(["OK", 3])); diff --git a/backend/windmill-api/src/apps.rs b/backend/windmill-api/src/apps.rs index 15a89d762f585..1cd0750919df5 100644 --- a/backend/windmill-api/src/apps.rs +++ b/backend/windmill-api/src/apps.rs @@ -60,7 +60,7 @@ use windmill_common::{ users::username_to_permissioned_as, utils::{ http_get_from_hub, not_found_if_none, paginate, query_elems_from_hub, require_admin, - Pagination, RunnableKind, StripPath, + Pagination, RunnableKind, StripPath, WarnAfterExt, }, variables::{build_crypt, build_crypt_with_key_suffix, encrypt}, worker::{to_raw_value, CLOUD_HOSTED}, @@ -1240,6 +1240,7 @@ async fn create_app_internal<'a>( Some(&authed.clone().into()), false, None, + None, ) .await?; tracing::info!("Pushed app dependency job {}", dependency_job_uuid); @@ -1523,6 +1524,14 @@ async fn update_app_internal<'a>( path.to_owned() }; let v_id = if let Some(nvalue) = &ns.value { + // Row lock debounce key for path. We need this to make all updates of runnables sequential and predictable. + tokio::time::timeout( + core::time::Duration::from_secs(60), + windmill_common::jobs::lock_debounce_key(&w_id, &npath, &mut tx), + ) + .warn_after_seconds(10) + .await??; + let app_id = sqlx::query_scalar!( "SELECT id FROM app WHERE path = $1 AND workspace_id = $2", npath, @@ -1620,6 +1629,7 @@ async fn update_app_internal<'a>( Some(&authed.clone().into()), false, None, + None, ) .await?; tracing::info!("Pushed app dependency job {}", dependency_job_uuid); @@ -1938,6 +1948,7 @@ async fn execute_component( None, false, end_user_email, + None, ) .await?; tx.commit().await?; diff --git a/backend/windmill-api/src/flows.rs b/backend/windmill-api/src/flows.rs index c983fb8d1f008..4d1e7aecc1ee4 100644 --- a/backend/windmill-api/src/flows.rs +++ b/backend/windmill-api/src/flows.rs @@ -32,7 +32,7 @@ use sql_builder::prelude::*; use sqlx::{FromRow, Postgres, Transaction}; use windmill_audit::audit_oss::audit_log; use windmill_audit::ActionKind; -use windmill_common::utils::query_elems_from_hub; +use windmill_common::utils::{query_elems_from_hub, WarnAfterExt}; use windmill_common::worker::{to_raw_value, CLOUD_HOSTED}; use windmill_common::HUB_BASE_URL; use windmill_common::{ @@ -546,6 +546,7 @@ async fn create_flow( Some(&authed.clone().into()), false, None, + None, ) .await?; @@ -884,6 +885,15 @@ async fn update_flow( .await?; } + // Row lock debounce key for path. We need this to make all updates of runnables sequential and predictable. + tokio::time::timeout( + core::time::Duration::from_secs(60), + windmill_common::jobs::lock_debounce_key(&w_id, &nf.path, &mut tx), + ) + .warn_after_seconds(10) + .await??; + + // This will lock anyone who is trying to iterate on flow_versions with given path and parameters. let version = sqlx::query_scalar!( "INSERT INTO flow_version (workspace_id, path, value, schema, created_by) VALUES ($1, $2, $3, $4::text::json, $5) RETURNING id", w_id, @@ -900,6 +910,7 @@ async fn update_flow( )) })?; + // TODO: This should happen only after we are done with dependency job. sqlx::query!( "UPDATE flow SET versions = array_append(versions, $1) WHERE path = $2 AND workspace_id = $3", version, nf.path, w_id @@ -1014,8 +1025,10 @@ async fn update_flow( Some(&authed.clone().into()), false, None, + None, ) .await?; + sqlx::query!( "UPDATE flow SET dependency_job = $1 WHERE path = $2 AND workspace_id = $3", dependency_job_uuid, diff --git a/backend/windmill-api/src/jobs.rs b/backend/windmill-api/src/jobs.rs index 47c3260fc1121..e54a61df9fd67 100644 --- a/backend/windmill-api/src/jobs.rs +++ b/backend/windmill-api/src/jobs.rs @@ -4101,6 +4101,7 @@ pub async fn run_flow_by_path_inner( push_authed.as_ref(), false, None, + None, ) .await?; @@ -4217,6 +4218,7 @@ pub async fn restart_flow( Some(&authed.clone().into()), false, None, + None, ) .await?; tx.commit().await?; @@ -4320,6 +4322,7 @@ pub async fn run_script_by_path_inner( push_authed.as_ref(), false, None, + None, ) .await?; tx.commit().await?; @@ -4473,6 +4476,7 @@ pub async fn run_workflow_as_code( push_authed.as_ref(), false, None, + None, ) .await?; @@ -5017,6 +5021,7 @@ pub async fn run_wait_result_job_by_path_get( push_authed.as_ref(), false, None, + None, ) .await?; tx.commit().await?; @@ -5170,6 +5175,7 @@ pub async fn run_wait_result_script_by_path_internal( push_authed.as_ref(), false, None, + None, ) .await?; tx.commit().await?; @@ -5287,6 +5293,7 @@ pub async fn run_wait_result_script_by_hash( push_authed.as_ref(), false, None, + None, ) .await?; tx.commit().await?; @@ -5599,6 +5606,7 @@ pub async fn run_wait_result_flow_by_path_internal( push_authed.as_ref(), false, None, + None, ) .await?; @@ -5690,6 +5698,7 @@ async fn run_preview_script( Some(&authed.clone().into()), false, None, + None, ) .await?; tx.commit().await?; @@ -5807,6 +5816,7 @@ async fn run_bundle_preview_script( Some(&authed.clone().into()), false, None, + None, ) .await?; job_id = Some(uuid); @@ -5945,6 +5955,7 @@ async fn run_dependencies_job( Some(&authed.clone().into()), false, None, + None, ) .await?; tx.commit().await?; @@ -6013,6 +6024,7 @@ async fn run_flow_dependencies_job( Some(&authed.clone().into()), false, None, + None, ) .await?; tx.commit().await?; @@ -6357,6 +6369,7 @@ async fn run_preview_flow_job( Some(&authed.clone().into()), false, None, + None, ) .await?; tx.commit().await?; @@ -6531,6 +6544,7 @@ async fn run_dynamic_select( Some(&authed.clone().into()), false, None, + None, ) .await?; tx.commit().await?; @@ -6659,6 +6673,7 @@ pub async fn run_job_by_hash_inner( push_authed.as_ref(), false, None, + None, ) .await?; tx.commit().await?; diff --git a/backend/windmill-api/src/scripts.rs b/backend/windmill-api/src/scripts.rs index 56107f055ea5d..394e5e62d284f 100644 --- a/backend/windmill-api/src/scripts.rs +++ b/backend/windmill-api/src/scripts.rs @@ -769,6 +769,14 @@ async fn create_script_internal<'c>( } }; + // Row lock debounce key for path. We need this to make all updates of runnables sequential and predictable. + tokio::time::timeout( + core::time::Duration::from_secs(60), + windmill_common::jobs::lock_debounce_key(&w_id, &ns.path, &mut tx), + ) + .warn_after_seconds(10) + .await??; + sqlx::query!( "INSERT INTO script (workspace_id, hash, path, parent_hashes, summary, description, \ content, created_by, schema, is_template, extra_perms, lock, language, kind, tag, \ @@ -817,6 +825,7 @@ async fn create_script_internal<'c>( ) .execute(&mut *tx) .await?; + let p_path_opt = parent_hashes_and_perms.as_ref().map(|x| x.p_path.clone()); if let Some(ref p_path) = p_path_opt { sqlx::query!( @@ -1000,6 +1009,7 @@ async fn create_script_internal<'c>( Some(&authed.clone().into()), false, None, + None, ) .await?; Ok((hash, new_tx, None)) diff --git a/backend/windmill-api/src/triggers/trigger_helpers.rs b/backend/windmill-api/src/triggers/trigger_helpers.rs index 5da92541bb220..993f4583430e6 100644 --- a/backend/windmill-api/src/triggers/trigger_helpers.rs +++ b/backend/windmill-api/src/triggers/trigger_helpers.rs @@ -861,6 +861,7 @@ async fn trigger_script_with_retry_and_error_handler( push_authed.as_ref(), false, None, + None, ) .await?; tx.commit().await?; diff --git a/backend/windmill-common/src/error.rs b/backend/windmill-common/src/error.rs index 1c41bdc9b5dab..d9644fbba9591 100644 --- a/backend/windmill-common/src/error.rs +++ b/backend/windmill-common/src/error.rs @@ -188,6 +188,12 @@ impl From for Error { } } +impl From for Error { + fn from(value: tokio::time::error::Elapsed) -> Self { + Self::InternalErr(value.to_string()) + } +} + impl Error { /// https://docs.rs/anyhow/1/anyhow/struct.Error.html#display-representations pub fn alt(&self) -> String { diff --git a/backend/windmill-common/src/jobs.rs b/backend/windmill-common/src/jobs.rs index 4608fe72b36aa..edb9f1f0cf65e 100644 --- a/backend/windmill-common/src/jobs.rs +++ b/backend/windmill-common/src/jobs.rs @@ -327,6 +327,7 @@ pub enum JobPayload { path: String, apply_preprocessor: bool, }, + ScriptHash { hash: ScriptHash, path: String, @@ -339,51 +340,71 @@ pub enum JobPayload { priority: Option, apply_preprocessor: bool, }, + FlowScript { id: FlowNodeId, // flow_node(id). language: ScriptLang, + /// Override default concurrency key custom_concurrency_key: Option, + /// How many jobs can run at the same time concurrent_limit: Option, + /// In seconds concurrency_time_window_s: Option, cache_ttl: Option, dedicated_worker: Option, path: String, }, + FlowNode { id: FlowNodeId, // flow_node(id). path: String, // flow node inner path (e.g. `outer/branchall-42`). }, + AppScript { id: AppScriptId, // app_script(id). path: Option, language: ScriptLang, cache_ttl: Option, }, + Code(RawCode), + + /// Script Dependency Job Dependencies { path: String, hash: ScriptHash, language: ScriptLang, dedicated_worker: Option, }, + + /// Flow Dependency Job FlowDependencies { path: String, dedicated_worker: Option, version: i64, }, + + /// App Dependency Job AppDependencies { path: String, version: i64, }, + + /// Flow Dependency Job, exposed with API. Requirements can be partially or fully predefined RawFlowDependencies { path: String, flow_value: FlowValue, }, + + /// Dependency Job, exposed with API. Requirements can be predefined RawScriptDependencies { script_path: String, + /// Will reflect raw requirements content (e.g. requirements.txt) content: String, language: ScriptLang, }, + + /// Flow Job Flow { path: String, dedicated_worker: Option, @@ -400,6 +421,8 @@ pub enum JobPayload { path: Option, restarted_from: Option, }, + + /// Flow consisting of single script SingleStepFlow { path: String, hash: Option, @@ -545,7 +568,7 @@ pub async fn script_path_to_payload<'e>( custom_concurrency_key: concurrency_key, concurrent_limit, concurrency_time_window_s, - cache_ttl: cache_ttl, + cache_ttl, language, dedicated_worker, priority, @@ -775,3 +798,26 @@ pub async fn check_tag_available_for_workspace_internal( return Ok(()); } + +pub async fn lock_debounce_key<'c>( + w_id: &str, + runnable_path: &str, + tx: &mut sqlx::Transaction<'c, sqlx::Postgres>, +) -> error::Result> { + let key = format!("{w_id}:{runnable_path}:dependency"); + + tracing::debug!( + workspace_id = %w_id, + runnable_path = %runnable_path, + debounce_key = %key, + "Locking debounce_key for dependency job scheduling" + ); + + sqlx::query_scalar!( + "SELECT job_id FROM debounce_key WHERE key = $1 FOR UPDATE", + &key + ) + .fetch_optional(&mut **tx) + .await + .map_err(error::Error::from) +} diff --git a/backend/windmill-common/src/scripts.rs b/backend/windmill-common/src/scripts.rs index 32ed0c3c2d3cb..9c1af245d8809 100644 --- a/backend/windmill-common/src/scripts.rs +++ b/backend/windmill-common/src/scripts.rs @@ -9,6 +9,7 @@ use std::{ fmt::{self, Display}, hash::{Hash, Hasher}, + ops::Deref, str::FromStr, }; @@ -131,6 +132,13 @@ impl FromStr for ScriptLang { #[sqlx(transparent)] pub struct ScriptHash(pub i64); +impl Deref for ScriptHash { + type Target = i64; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + impl Into for ScriptHash { fn into(self) -> u64 { self.0 as u64 @@ -217,7 +225,10 @@ const PREVIEW_IS_ESM_CODEBASE_HASH: i64 = -44; const PREVIEW_IS_TAR_ESM_CODEBASE_HASH: i64 = -45; pub fn is_special_codebase_hash(hash: i64) -> bool { - hash == PREVIEW_IS_CODEBASE_HASH || hash == PREVIEW_IS_TAR_CODEBASE_HASH || hash == PREVIEW_IS_ESM_CODEBASE_HASH || hash == PREVIEW_IS_TAR_ESM_CODEBASE_HASH + hash == PREVIEW_IS_CODEBASE_HASH + || hash == PREVIEW_IS_TAR_CODEBASE_HASH + || hash == PREVIEW_IS_ESM_CODEBASE_HASH + || hash == PREVIEW_IS_TAR_ESM_CODEBASE_HASH } pub fn codebase_to_hash(is_tar: bool, is_esm: bool) -> i64 { @@ -236,7 +247,6 @@ pub fn codebase_to_hash(is_tar: bool, is_esm: bool) -> i64 { } } - pub fn hash_to_codebase_id(job_id: &str, hash: i64) -> Option { match hash { PREVIEW_IS_CODEBASE_HASH => Some(job_id.to_string()), @@ -247,7 +257,6 @@ pub fn hash_to_codebase_id(job_id: &str, hash: i64) -> Option { } } - pub struct CodebaseInfo { pub is_tar: bool, pub is_esm: bool, @@ -714,3 +723,91 @@ pub fn hash_script(ns: &NewScript) -> i64 { ns.hash(&mut dh); dh.finish() as i64 } + +pub async fn clone_script<'c>( + base_hash: ScriptHash, + w_id: &str, + deployment_message: Option, + tx: &mut sqlx::Transaction<'c, sqlx::Postgres>, +) -> crate::error::Result { + let s = + sqlx::query_as::<_, Script>("SELECT * FROM script WHERE hash = $1 AND workspace_id = $2") + .bind(base_hash.0) + .bind(w_id) + .fetch_one(&mut **tx) + .await?; + + let ns = NewScript { + path: s.path.clone(), + parent_hash: Some(base_hash), + summary: s.summary, + description: s.description, + content: s.content, + schema: s.schema, + is_template: Some(s.is_template), + // TODO: Make it either None everywhere (particularly when raw reqs are calculated) + // Or handle this case and conditionally make Some (only with raw reqs) + lock: None, + language: s.language, + kind: Some(s.kind), + tag: s.tag, + draft_only: s.draft_only, + envs: s.envs, + concurrent_limit: s.concurrent_limit, + concurrency_time_window_s: s.concurrency_time_window_s, + cache_ttl: s.cache_ttl, + dedicated_worker: s.dedicated_worker, + ws_error_handler_muted: s.ws_error_handler_muted, + priority: s.priority, + timeout: s.timeout, + delete_after_use: s.delete_after_use, + restart_unless_cancelled: s.restart_unless_cancelled, + deployment_message, + concurrency_key: s.concurrency_key, + visible_to_runner_only: s.visible_to_runner_only, + no_main_func: s.no_main_func, + codebase: s.codebase, + has_preprocessor: s.has_preprocessor, + on_behalf_of_email: s.on_behalf_of_email, + assets: s.assets, + }; + + let new_hash = hash_script(&ns); + + tracing::debug!( + "cloning script at path {} from '{}' to '{}'", + s.path, + *base_hash, + new_hash + ); + + sqlx::query!(" + INSERT INTO script + (workspace_id, hash, path, parent_hashes, summary, description, content, \ + created_by, schema, is_template, extra_perms, lock, language, kind, tag, \ + draft_only, envs, concurrent_limit, concurrency_time_window_s, cache_ttl, \ + dedicated_worker, ws_error_handler_muted, priority, restart_unless_cancelled, \ + delete_after_use, timeout, concurrency_key, visible_to_runner_only, no_main_func, \ + codebase, has_preprocessor, on_behalf_of_email, schema_validation, assets) + + SELECT workspace_id, $1, path, array_prepend($2::bigint, COALESCE(parent_hashes, '{}'::bigint[])), summary, description, \ + content, created_by, schema, is_template, extra_perms, NULL, language, kind, tag, \ + draft_only, envs, concurrent_limit, concurrency_time_window_s, cache_ttl, \ + dedicated_worker, ws_error_handler_muted, priority, restart_unless_cancelled, \ + delete_after_use, timeout, concurrency_key, visible_to_runner_only, no_main_func, \ + codebase, has_preprocessor, on_behalf_of_email, schema_validation, assets + + FROM script WHERE hash = $2 AND workspace_id = $3; + ", new_hash, base_hash.0, w_id).execute(&mut **tx).await?; + + // Archive base. + sqlx::query!( + "UPDATE script SET archived = true WHERE hash = $1 AND workspace_id = $2", + *base_hash, + w_id + ) + .execute(&mut **tx) + .await?; + + Ok(new_hash) +} diff --git a/backend/windmill-queue/src/jobs.rs b/backend/windmill-queue/src/jobs.rs index 5cec1ac6f24be..8dd2f4d3f8a66 100644 --- a/backend/windmill-queue/src/jobs.rs +++ b/backend/windmill-queue/src/jobs.rs @@ -111,6 +111,9 @@ lazy_static::lazy_static! { .ok() .and_then(|x| x.parse().ok()) .unwrap_or(false); + + // TODO: Remove + static ref WMDEBUG_NO_DJOB_DEBOUNCING: bool = std::env::var("WMDEBUG_NO_DJOB_DEBOUNCING").is_ok(); } #[cfg(feature = "cloud")] @@ -450,6 +453,7 @@ pub async fn push_init_job<'c>( None, false, None, + None, ) .await?; inner_tx.commit().await?; @@ -505,6 +509,7 @@ pub async fn push_periodic_bash_job<'c>( None, false, None, + None, ) .await?; inner_tx.commit().await?; @@ -1401,6 +1406,7 @@ async fn restart_job_if_perpetual_inner( None, false, None, + None, ) .await?; tx.commit().await?; @@ -1889,6 +1895,7 @@ pub async fn push_error_handler<'a, 'c, T: Serialize + Send + Sync>( None, false, None, + None, ) .await?; tx.commit().await?; @@ -2180,6 +2187,12 @@ impl std::ops::Deref for PulledJob { } } +impl std::ops::DerefMut for PulledJob { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.job + } +} + lazy_static::lazy_static! { pub static ref DISABLE_CONCURRENCY_LIMIT: bool = std::env::var("DISABLE_CONCURRENCY_LIMIT").is_ok_and(|s| s == "true"); } @@ -2270,10 +2283,13 @@ impl PulledJobResult { } } +/// Pull the job from queue pub async fn pull( db: &Pool, + // Whether or not try to pull from suspended jobs first suspend_first: bool, worker_name: &str, + // Execute queries supplied by caller instead of generic one query_o: Option<&(String, String)>, #[cfg(feature = "benchmark")] bench: &mut BenchmarkIter, ) -> windmill_common::error::Result { @@ -2292,6 +2308,7 @@ pub async fn pull( missing_concurrency_key: false, }); } + if let Some((query_suspended, query_no_suspend)) = query_o { let njob = { let job = if query_suspended.is_empty() { @@ -2313,9 +2330,13 @@ pub async fn pull( (job, false) }; - #[cfg(all(feature = "enterprise", feature = "private"))] let pulled_job_result = match job { - Some(job) if job.concurrent_limit.is_some() => { + #[cfg(feature = "private")] + Some(job) + if job.concurrent_limit.is_some() + // Concurrency limit is available for either enterprise job or dependency job + && (cfg!(feature = "enterprise") || (job.is_dependency() && !*WMDEBUG_NO_DJOB_DEBOUNCING)) => + { let job = crate::jobs_ee::apply_concurrency_limit( db, pull_loop_count, @@ -2332,10 +2353,6 @@ pub async fn pull( _ => PulledJobResult { job, suspended, missing_concurrency_key: false }, }; - #[cfg(not(all(feature = "enterprise", feature = "private")))] - let pulled_job_result = - PulledJobResult { job, suspended, missing_concurrency_key: false }; - Ok::<_, Error>(pulled_job_result) }?; @@ -2364,6 +2381,7 @@ pub async fn pull( } return Ok(njob); }; + let (job, suspended) = pull_single_job_and_mark_as_running_no_concurrency_limit( db, suspend_first, @@ -2372,7 +2390,6 @@ pub async fn pull( bench, ) .await?; - let Some(job) = job else { return Ok(PulledJobResult { job: None, suspended, missing_concurrency_key: false }); }; @@ -2380,12 +2397,14 @@ pub async fn pull( let has_concurent_limit = job.concurrent_limit.is_some(); #[cfg(not(feature = "enterprise"))] - if has_concurent_limit { + if has_concurent_limit && !job.is_dependency() { tracing::error!("Concurrent limits are an EE feature only, ignoring constraints") } #[cfg(not(feature = "enterprise"))] - let has_concurent_limit = false; + let has_concurent_limit = false + || (job.is_dependency() && cfg!(feature = "private") && !*WMDEBUG_NO_DJOB_DEBOUNCING); + // if we don't have private flag, we don't have concurrency limit // concurrency check. If more than X jobs for this path are already running, we re-queue and pull another job from the queue let pulled_job = job; @@ -2404,12 +2423,16 @@ pub async fn pull( }); } - #[cfg(all(feature = "enterprise", feature = "private"))] - if let Some(pulled_job) = - crate::jobs_ee::apply_concurrency_limit(db, pull_loop_count, suspended, pulled_job) - .await? + #[cfg(feature = "private")] + if cfg!(feature = "enterprise") + || (pulled_job.is_dependency() && !*WMDEBUG_NO_DJOB_DEBOUNCING) { - return Ok(pulled_job); + if let Some(pulled_job) = + crate::jobs_ee::apply_concurrency_limit(db, pull_loop_count, suspended, pulled_job) + .await? + { + return Ok(pulled_job); + } } } } @@ -2444,6 +2467,7 @@ async fn pull_single_job_and_mark_as_running_no_concurrency_limit<'c>( } else { None }; + if r.is_none() { // #[cfg(feature = "benchmark")] // let instant = Instant::now(); @@ -2531,6 +2555,69 @@ pub async fn concurrency_key( }) } +pub async fn custom_debounce_key( + db: &Pool, + job_id: &Uuid, +) -> Result, sqlx::Error> { + let fut = async || { + sqlx::query_scalar!("SELECT key FROM debounce_key WHERE job_id = $1", job_id) + .fetch_optional(db) // this should no longer be fetch optional + .await + }; + fut.retry( + ConstantBuilder::default() + .with_delay(std::time::Duration::from_secs(3)) + .with_max_times(5) + .build(), + ) + .notify(|err, dur| { + tracing::error!( + "Could not get debounce key for job {job_id}, retrying in {dur:#?}, err: {err:#?}" + ); + }) + .await +} + +/// Helper function to extract nodes/components to relock from job arguments +/// Returns the list of nodes to relock if present in either nodes_to_relock (flows) or components_to_relock (apps) +fn extract_to_relock_from_args(args: &HashMap>) -> Option> { + args.get("nodes_to_relock") // For flows + .or(args.get("components_to_relock")) // For apps + .and_then(|rv| { + serde_json::from_str::>(&rv.to_string()) + .map_err(|e| tracing::warn!("Failed to deserialize relock data: {}", e)) + .ok() + }) +} + +/// Helper function to accumulate nodes/components to relock for a debounced job +/// This merges new items with existing ones, removing duplicates +async fn accumulate_debounce_stale_data( + tx: &mut Transaction<'_, Postgres>, + job_id: &Uuid, + to_relock: &[String], +) -> Result<(), Error> { + sqlx::query!( + " + INSERT INTO debounce_stale_data (job_id, to_relock) + VALUES ($1, $2) + ON CONFLICT (job_id) + DO UPDATE SET to_relock = ( + SELECT array_agg(DISTINCT x) + FROM unnest( + -- Combine existing array with new values, removing duplicates + array_cat(debounce_stale_data.to_relock, EXCLUDED.to_relock) + ) AS x + ) + ", + job_id, + to_relock + ) + .execute(&mut **tx) + .await?; + Ok(()) +} + pub fn interpolate_args(x: String, args: &PushArgs, workspace_id: &str) -> String { // Save this value to avoid parsing twice let workspaced = x.as_str().replace("$workspace", workspace_id).to_string(); @@ -3183,6 +3270,8 @@ pub async fn push<'c, 'd>( authed: Option<&Authed>, running: bool, // whether the job is already running: only set this to true if you don't want the job to be picked up by a worker from the queue. It will also set started_at to now. end_user_email: Option, + // If we know there is already a debounce job, we can use this for debouncing. + debounce_job_id_o: Option, ) -> Result<(Uuid, Transaction<'c, Postgres>), Error> { #[cfg(feature = "cloud")] if *CLOUD_HOSTED { @@ -3383,8 +3472,8 @@ pub async fn push<'c, 'd>( raw_flow, flow_status, language, - custom_concurrency_key, - concurrent_limit, + mut custom_concurrency_key, + mut concurrent_limit, concurrency_time_window_s, cache_ttl, dedicated_worker, @@ -3555,7 +3644,7 @@ pub async fn push<'c, 'd>( ), JobPayload::Dependencies { hash, language, path, dedicated_worker } => ( Some(hash.0), - Some(path), + Some(path.clone()), None, JobKind::Dependencies, None, @@ -3568,6 +3657,8 @@ pub async fn push<'c, 'd>( dedicated_worker, None, ), + + // CLI usage, is not modifying db, no need for debouncing. JobPayload::RawScriptDependencies { script_path, content, language } => ( None, Some(script_path), @@ -3583,6 +3674,8 @@ pub async fn push<'c, 'd>( None, None, ), + + // CLI usage, is not modifying db, no need for debouncing. JobPayload::RawFlowDependencies { path, flow_value } => ( None, Some(path), @@ -3599,9 +3692,17 @@ pub async fn push<'c, 'd>( None, ), JobPayload::FlowDependencies { path, dedicated_worker, version } => { + #[cfg(test)] + let skip_compat = args + .args + .contains_key("dbg_create_job_for_unexistant_flow_version"); + + #[cfg(not(test))] + let skip_compat = false; + // Keep inserting `value` if not all workers are updated. // Starting at `v1.440`, the value is fetched on pull from the version id. - let value_o = if !*MIN_VERSION_IS_AT_LEAST_1_440.read().await { + let value_o = if !*MIN_VERSION_IS_AT_LEAST_1_440.read().await && !skip_compat { let mut ntx = tx.into_tx().await?; // The version has been inserted only within the transaction. let data = cache::flow::fetch_version(&mut *ntx, version).await?; @@ -3613,7 +3714,7 @@ pub async fn push<'c, 'd>( }; ( Some(version), - Some(path), + Some(path.clone()), None, JobKind::FlowDependencies, value_o, @@ -3629,7 +3730,7 @@ pub async fn push<'c, 'd>( } JobPayload::AppDependencies { path, version } => ( Some(version), - Some(path), + Some(path.clone()), None, JobKind::AppDependencies, None, @@ -4088,6 +4189,19 @@ pub async fn push<'c, 'd>( ), }; + // Enforce concurrency limit on all dependency jobs. + // TODO: We can ignore this for scripts djobs. The main reason we need all djobs to be sequential is because we have + // nodes_to_relock and we need all locks whose corresponding steps aren't in nodes_to_relock be already present. + // + // This is not the case for scripts, so we can potentially have multiple djobs for scripts at the same time. + if let (Some(path), true) = ( + &script_path, + cfg!(feature = "private") && job_kind.is_dependency() && !*WMDEBUG_NO_DJOB_DEBOUNCING, + ) { + custom_concurrency_key = Some(format!("dependency:{workspace_id}/{path}")); + concurrent_limit = Some(1); + } + let final_priority: Option; #[cfg(not(feature = "enterprise"))] { @@ -4218,6 +4332,184 @@ pub async fn push<'c, 'd>( Ulid::new().into() }; + // Dependency job debouncing: When multiple dependency jobs are scheduled for the same script/flow/app, + // we want to deduplicate them to avoid redundant work. The debouncing mechanism works by: + // 1. Creating a unique debounce key for each dependency target (dependency:workspace/type/path) + // 2. Reusing existing jobs when possible, or creating new ones when the existing job is already running + // 3. Accumulating the nodes/components that need relocking across all debounced requests + match ( + scheduled_for_o.is_some(), + job_kind.is_dependency(), + script_path.clone(), + *WMDEBUG_NO_DJOB_DEBOUNCING, + // We only do debouncing for jobs triggered by relative imports + // We do not want this be the case for normal djobs, since they will always be sequential. + args.args.contains_key("triggered_by_relative_import"), + ) { + // === DEPENDENCY JOB DEBOUNCING === + // + // Debouncing consolidates multiple dependency job requests into a single execution, + // reducing redundant work when many scripts/flows/apps are updated simultaneously. + // + // Prerequisites for debouncing (all must be true): + // 1. Job is scheduled in the future (debounce_delay is not None) - provides consolidation window + // 2. Job is a dependency job + // 3. Object path is provided (script/flow/app path) + // 4. Fallback mode is disabled (normal operation) + // 5. Job was created by relative imports (triggered by dependency chain) + // + // How it works: + // + // PHASE 1 - PUSH (in jobs.rs::push): + // When a dependency job is scheduled with delay, check debounce_key table + // - If key exists: Merge request into existing job, accumulate nodes/components + // - If key doesn't exist: Create new entry and store initial nodes/components + // + // PHASE 2 - ACCUMULATION: + // During the debounce window (typically 5-15 seconds), multiple requests merge + // - Each request adds nodes/components to debounce_stale_data table + // - SQL DISTINCT automatically removes duplicates during merge + // + // PHASE 3 - PULL (in jobs.rs::pull): + // When the delayed job finally executes: + // - Lock debounce_key FOR UPDATE to prevent races + // - Retrieve all accumulated nodes/components from debounce_stale_data + // - Process all collected dependencies in single execution + // - Clean up both debounce_key and debounce_stale_data entries + (true, true, Some(obj_path), false, true) => { + // Generate unique debounce key: "workspace_id:object_path:dependency" + // This ensures each workspace+path combination has independent debounce window + let debounce_key = format!("{workspace_id}:{obj_path}:dependency"); + + tracing::debug!( + workspace_id = %workspace_id, + object_path = %obj_path, + debounce_key = %debounce_key, + "Checking for existing debounced dependency job" + ); + + // Check if there's already a pending job registered for this debounce key + // The debounce_job_id_o is passed in by the caller after locking the key FOR UPDATE + // IMPORTANT: This is assumed that the caller will lock debounce_key row in this transaction. + // We do this to block puller from further actions until we are done with consolidation and stuff that we do here in push. + if let Some(debounce_job_id) = debounce_job_id_o { + tracing::debug!( + existing_job_id = %debounce_job_id, + new_job_id = %job_id, + "Found existing debounced job, merging this request" + ); + + // NOTE: Race condition handling: + // In rare cases, the debounce_key entry may still exist even though the job + // has been pulled and is running. This can happen because: + // - Job pull marks job as running first + // - Then debounce_key cleanup happens (without transaction for performance) + // - Between these steps, new requests might see the old debounce_key + // + // This is acceptable because the puller will be blocked and cannot proceed until this transaction finishes. + // This will give us some space to add consolidated data (if such) and debounce the request. + // Once tx is commited, the puller will be unblocked and continue execution. + // Accumulate the nodes/components that need relocking from this request + + // This ensures all dependency updates are handled even if jobs are debounced + if let Some(to_relock) = extract_to_relock_from_args(&args.args) { + tracing::debug!( + job_id = %debounce_job_id, + node_count = to_relock.len(), + nodes = ?to_relock, + "Accumulating nodes/components to existing debounced job" + ); + + accumulate_debounce_stale_data(&mut tx, &debounce_job_id, &to_relock) + .await + .map_err(|e| { + tracing::error!( + error = %e, + job_id = %debounce_job_id, + debounce_key = %debounce_key, + "Failed to accumulate stale data for debounced job" + ); + e + })?; + } else { + tracing::trace!( + job_id = %debounce_job_id, + "No nodes to relock in this request, skipping accumulation" + ); + } + + // Return the existing job ID, effectively debouncing this request + // The new job_id we generated won't be used + tracing::debug!( + returned_job_id = %debounce_job_id, + skipped_job_id = %job_id, + "Debounced: returning existing job ID instead of creating new job" + ); + + // We will skip some of the work downstream and just debounce the job. + return Ok((debounce_job_id, tx)); + } else { + // No existing debounce entry - this is the first request in the debounce window + tracing::debug!( + job_id = %job_id, + debounce_key = %debounce_key, + "Creating new debounce entry (first request in window)" + ); + + sqlx::query!( + "INSERT INTO debounce_key (key, job_id) VALUES ($1, $2)", + &debounce_key, + job_id, + ) + .execute(&mut *tx) + .await + .map_err(|e| { + tracing::error!( + error = %e, + debounce_key = %debounce_key, + job_id = %job_id, + "Failed to insert debounce_key entry" + ); + Error::InternalErr(format!("Failed to create debounce entry: {}", e)) + })?; + + // Store initial nodes/components to relock if provided + if let Some(to_relock) = extract_to_relock_from_args(&args.args) { + tracing::debug!( + job_id = %job_id, + node_count = to_relock.len(), + nodes = ?to_relock, + "Storing initial nodes/components for new debounced job" + ); + + accumulate_debounce_stale_data(&mut tx, &job_id, &to_relock) + .await + .map_err(|e| { + tracing::error!( + error = %e, + job_id = %job_id, + "Failed to store initial stale data for debounced job" + ); + e + })?; + } else { + tracing::trace!( + job_id = %job_id, + "No initial nodes to relock, debounce entry created without stale data" + ); + } + } + } + _ => { + // Debouncing not applicable - proceed with normal job creation + tracing::trace!( + job_id = %job_id, + job_kind = ?job_kind, + "Debouncing conditions not met, proceeding with normal job creation" + ); + } + }; + if concurrent_limit.is_some() { insert_concurrency_key( workspace_id, @@ -4230,7 +4522,6 @@ pub async fn push<'c, 'd>( ) .await?; } - let stringified_args = if *JOB_ARGS_AUDIT_LOGS { Some(serde_json::to_string(&args).map_err(|e| { Error::internal_err(format!( @@ -4520,6 +4811,39 @@ pub async fn insert_concurrency_key<'d, 'c>( Ok(()) } +// pub async fn insert_debounce_key<'d, 'c>( +// workspace_id: &str, +// args: &PushArgs<'d>, +// script_path: &Option, +// job_kind: JobKind, +// custom_concurrency_key: Option, +// tx: &mut Transaction<'c, Postgres>, +// job_id: Uuid, +// ) -> Result<(), Error> { +// let concurrency_key = custom_concurrency_key +// .map(|x| interpolate_args(x, args, workspace_id)) +// .unwrap_or(fullpath_with_workspace( +// workspace_id, +// script_path.as_ref(), +// &job_kind, +// )); +// sqlx::query!( +// "WITH inserted_concurrency_counter AS ( +// INSERT INTO concurrency_counter (concurrency_id, job_uuids) +// VALUES ($1, '{}'::jsonb) +// ON CONFLICT DO NOTHING +// ) +// INSERT INTO concurrency_key(key, job_id) VALUES ($1, $2)", +// concurrency_key, +// job_id, +// ) +// .execute(&mut **tx) +// .warn_after_seconds(3) +// .await +// .map_err(|e| Error::internal_err(format!("Could not insert concurrency_key={concurrency_key} for job_id={job_id} script_path={script_path:?} workspace_id={workspace_id}: {e:#}")))?; +// Ok(()) +// } + pub fn canceled_job_to_result(job: &MiniPulledJob) -> serde_json::Value { let reason = job .canceled_reason @@ -4809,3 +5133,232 @@ pub async fn get_same_worker_job( )) }) } + +pub async fn preprocess_dependency_job(job: &mut PulledJob, db: &DB) -> error::Result<()> { + let kind = job.kind; + // Handle dependency job debouncing cleanup when a job is pulled for execution + if kind.is_dependency() && !*WMDEBUG_NO_DJOB_DEBOUNCING { + // Only used for testing in tests/relative_imports.rs + // Give us some space to work with. + #[cfg(debug_assertions)] + if let Some(duration) = job + .args + .as_ref() + .map(|x| { + x.get("dbg_sleep_between_pull_and_debounce_key_removal") + .map(|v| serde_json::from_str::(v.get()).ok()) + .flatten() + }) + .flatten() + { + tracing::debug!("going to sleep",); + sleep(std::time::Duration::from_secs(duration as u64)).await; + } + + tracing::debug!( + "Processing debounce cleanup for dependency job {} at path {:?}", + &job.id, + &job.runnable_path + ); + + let key = format!("{}:{}:dependency", &job.workspace_id, job.runnable_path()); + let mut tx = db.begin().await?; + + // === DEBOUNCE CLEANUP === + // + // Clean up the debounce_key entry for this job (if it exists). + // + // IMPORTANT: We delete by key (not job_id) to avoid race conditions: + // If pusher has locked this row then this call will be blocked until all txs are commited. + // + // The idea is that the worker_lockfiles::trigger_dependents_to_recompute_locks will fetch the latest version of the obj. + // This object needs to be created before the djob is executed and it happens right here. + // + // This way the next pusher can fetch the latest version of object and base their djob payload on newest version. + // The concurrency limit on djobs will make sure that by the time next djob is started executing the base version it is referencing + // has already calculated all locks. This way even next djob will always use the fully finalized version of object. + // + // + // + // Note: We don't use a transaction here for performance (it's called during job pull). + // This means there's a tiny window where the job is running but key isn't deleted yet, + // which is acceptable because new requests will just accumulate data to this job. + tracing::debug!( + job_id = %job.id, + "Cleaning up debounce_key entry for completed/pulled job" + ); + + // This will either: + // 1. Block until pusher pushed. Which gives us: + // - If there was any stale data in pusher, then we will read it here (couple of lines below) + // 2. Block pusher until we are done here. This gives us: + // - We will clone objects and retrieve the latest version. So when we are done the pusher can read latest version. + sqlx::query!("DELETE FROM debounce_key WHERE key = $1", &key) + .execute(&mut *tx) + .await + .map_err(|e| { + tracing::error!( + error = %e, + job_id = %job.id, + "Failed to delete debounce_key" + ); + e + })?; + + if job + .args + .as_ref() + .map(|x| x.get("triggered_by_relative_import").is_some()) + .unwrap_or_default() + { + let Some(base_hash) = job.runnable_id else { + return Err(Error::InternalErr( + "Missing runnable_id for dependency job triggered by relative import" + .to_string(), + )); + }; + + tracing::debug!( + job_id = %job.id, + base_hash = %base_hash, + job_kind = ?kind, + "Creating new version for dependency job triggered by relative import" + ); + + let new_id = match kind { + JobKind::Dependencies => { + let deployment_message = job + .args + .clone() + .map(|hashmap| { + hashmap + .get("deployment_message") + .map(|map_value| { + serde_json::from_str::(map_value.get()).ok() + }) + .flatten() + }) + .flatten(); + + // This way we tell downstream which script we should archive when the resolution is finished. + // (not used at the moment) + job.args + .as_mut() + .map(|args| args.insert("base_hash".to_owned(), to_raw_value(&*base_hash))); + + let new_hash = windmill_common::scripts::clone_script( + base_hash, + &job.workspace_id, + deployment_message, + &mut tx, + ) + .await?; + + new_hash + } + JobKind::FlowDependencies => { + sqlx::query_scalar!( + "INSERT INTO flow_version + (workspace_id, path, value, schema, created_by) + + SELECT workspace_id, path, value, schema, created_by + FROM flow_version WHERE path = $1 AND workspace_id = $2 AND id = $3 + + RETURNING id + ", + job.runnable_path(), + job.workspace_id, + *base_hash, + ) + .fetch_one(&mut *tx) + .await? + } + JobKind::AppDependencies => { + sqlx::query_scalar!( + "INSERT INTO app_version + (app_id, value, created_by, raw_app) + SELECT app_id, value, created_by, raw_app + FROM app_version WHERE id = $1 + RETURNING id", + *base_hash + ) + .fetch_one(&mut *tx) + .await? + } + _ => { + return Err(Error::InternalErr(format!( + "Matched unexpected JobKind ({:?}). This is a bug!", + kind + ))) + } + }; + + job.runnable_id.replace(new_id.into()); + } + + // === RETRIEVE ACCUMULATED DEBOUNCE DATA === + // + // For flows and apps, retrieve all nodes/components that were accumulated + // during the debounce window. This data comes from requests that were merged + // into this job instead of creating their own jobs. + // + // Scripts don't need this because they don't have nodes/components to relock. + if let Some(to_relock_field) = match &job.kind { + JobKind::FlowDependencies => Some("nodes_to_relock"), + JobKind::AppDependencies => Some("components_to_relock"), + _ => None, // Scripts don't use accumulated stale data + } { + tracing::debug!( + job_id = %job.id, + job_kind = ?job.kind, + field = %to_relock_field, + "Retrieving accumulated stale data from debounced requests" + ); + + if let Some(stale_data) = sqlx::query_scalar!( + "DELETE FROM debounce_stale_data WHERE job_id = $1 RETURNING to_relock", + &job.id + ) + .fetch_optional(&mut *tx) + .await + .map_err(|e| { + tracing::error!( + error = %e, + job_id = %job.id, + "Failed to retrieve debounce_stale_data" + ); + e + })? + .flatten() + { + tracing::debug!( + job_id = %job.id, + node_count = stale_data.len(), + nodes = ?stale_data, + "Retrieved accumulated nodes/components from {} debounced requests", + stale_data.len() + ); + + // Replace the job's relock list with the accumulated data + // This ensures all nodes from all debounced requests are processed + if let Some(args) = job.args.as_mut() { + args.insert(to_relock_field.to_owned(), to_raw_value(&stale_data)); + tracing::debug!( + field = %to_relock_field, + "Updated job args with accumulated debounce data" + ); + } + } else { + tracing::trace!( + job_id = %job.id, + "No accumulated stale data found (no debounced requests or already cleaned up)" + ); + } + } + + // This will unblock pusher. + tx.commit().await?; + } + + Ok(()) +} diff --git a/backend/windmill-queue/src/schedule.rs b/backend/windmill-queue/src/schedule.rs index c35a94a2dc4d3..1aa4a11a10a30 100644 --- a/backend/windmill-queue/src/schedule.rs +++ b/backend/windmill-queue/src/schedule.rs @@ -23,8 +23,8 @@ use windmill_common::jobs::check_tag_available_for_workspace_internal; use windmill_common::jobs::JobPayload; use windmill_common::schedule::schedule_to_user; use windmill_common::scripts::ScriptHash; -use windmill_common::worker::to_raw_value; use windmill_common::utils::WarnAfterExt; +use windmill_common::worker::to_raw_value; use windmill_common::FlowVersionInfo; use windmill_common::DB; use windmill_common::{ @@ -39,13 +39,13 @@ async fn get_schedule_metadata<'c>( tx: &mut sqlx::Transaction<'c, sqlx::Postgres>, schedule: &Schedule, ) -> Result<( - Option, // tag - Option, // timeout - Option, // on_behalf_of_email - String, // created_by - Option, // hash (for scripts) - Option, // flow_version (for flows) - Option, // retry + Option, // tag + Option, // timeout + Option, // on_behalf_of_email + String, // created_by + Option, // hash (for scripts) + Option, // flow_version (for flows) + Option, // retry )> { let parsed_retry = schedule .retry @@ -62,20 +62,24 @@ async fn get_schedule_metadata<'c>( ) .await?; - let FlowVersionInfo { + let FlowVersionInfo { tag, on_behalf_of_email, edited_by, .. } = + get_latest_flow_version_info_for_path_from_version( + &mut **tx, + version, + &schedule.workspace_id, + &schedule.script_path, + ) + .await?; + + Ok(( tag, + None, on_behalf_of_email, edited_by, - .. - } = get_latest_flow_version_info_for_path_from_version( - &mut **tx, - version, - &schedule.workspace_id, - &schedule.script_path, - ) - .await?; - - Ok((tag, None, on_behalf_of_email, edited_by, None, Some(version), parsed_retry)) + None, + Some(version), + parsed_retry, + )) } else { let ( hash, @@ -98,7 +102,15 @@ async fn get_schedule_metadata<'c>( ) .await?; - Ok((tag, timeout, on_behalf_of_email, created_by, Some(hash), None, parsed_retry)) + Ok(( + tag, + timeout, + on_behalf_of_email, + created_by, + Some(hash), + None, + parsed_retry, + )) } } @@ -208,7 +220,9 @@ pub async fn push_scheduled_job<'c>( // If schedule handler is defined, wrap the scheduled job in a synthetic flow // with the handler as the first step (with stop_after_if to skip if handler returns false) - let (payload, tag, timeout, on_behalf_of_email, created_by) = if let Some(handler_path) = &schedule.dynamic_skip { + let (payload, tag, timeout, on_behalf_of_email, created_by) = if let Some(handler_path) = + &schedule.dynamic_skip + { // Build skip handler args let mut skip_handler_args = HashMap::>::new(); skip_handler_args.insert( @@ -472,6 +486,7 @@ pub async fn push_scheduled_job<'c>( push_authed, false, None, + None, ) .warn_after_seconds_with_sql(1, "push in push_scheduled_job".to_string()) .await?; diff --git a/backend/windmill-worker/src/ai_executor.rs b/backend/windmill-worker/src/ai_executor.rs index 01a4b36b71cb3..bbd40979fe50a 100644 --- a/backend/windmill-worker/src/ai_executor.rs +++ b/backend/windmill-worker/src/ai_executor.rs @@ -969,6 +969,7 @@ pub async fn run_agent( job_perms.as_ref(), true, None, + None, ) .await?; diff --git a/backend/windmill-worker/src/worker.rs b/backend/windmill-worker/src/worker.rs index 893cb9ef15aff..39f1944d6dbd0 100644 --- a/backend/windmill-worker/src/worker.rs +++ b/backend/windmill-worker/src/worker.rs @@ -11,6 +11,7 @@ use anyhow::anyhow; use futures::TryFutureExt; +use tokio::time::sleep; use tokio::time::timeout; use windmill_common::client::AuthedClient; use windmill_common::scripts::hash_to_codebase_id; @@ -57,6 +58,7 @@ use std::{ time::Duration, }; use windmill_parser::MainArgSignature; +use windmill_queue::preprocess_dependency_job; use windmill_queue::PulledJobResultToJobErr; use uuid::Uuid; @@ -954,6 +956,7 @@ pub async fn run_worker( ); } + dbg!("start"); let start_time = Instant::now(); let worker_dir = format!("{TMP_DIR}/{worker_name}"); @@ -994,6 +997,8 @@ pub async fn run_worker( }); } + dbg!("python stuff is done"); + if let Some(ref netrc) = *NETRC { tracing::info!(worker = %worker_name, hostname = %hostname, "Writing netrc at {}/.netrc", HOME_ENV.as_str()); write_file(&HOME_ENV, ".netrc", netrc).expect("could not write netrc"); @@ -1001,6 +1006,8 @@ pub async fn run_worker( create_directory_async(&worker_dir).await; + dbg!("worker dir created"); + if !*DISABLE_NSJAIL { let _ = write_file( &worker_dir, @@ -1383,6 +1390,7 @@ pub async fn run_worker( let mut killpill_rx2 = killpill_rx.resubscribe(); + dbg!("starting loop"); loop { let last_processing_duration_secs = last_processing_duration.load(Ordering::SeqCst); if last_processing_duration_secs > 5 { @@ -1503,7 +1511,6 @@ pub async fn run_worker( match &conn { Connection::Sql(db) => { let job = get_same_worker_job(db, &same_worker_job).await; - // tracing::error!("r: {:?}", r); if job.is_err() && !same_worker_job.recoverable { tracing::error!( worker = %worker_name, hostname = %hostname, @@ -1559,6 +1566,7 @@ pub async fn run_worker( Connection::Sql(db) => { let pull_time = Instant::now(); let likelihood_of_suspend = last_30jobs_suspended as f64 / 30.0; + let suspend_first = suspend_first_success || rand::random::() < likelihood_of_suspend || last_suspend_first.elapsed().as_secs_f64() > 5.0; @@ -1566,8 +1574,7 @@ pub async fn run_worker( if suspend_first { last_suspend_first = Instant::now(); } - - let job = match timeout( + let mut job = match timeout( Duration::from_secs(10), pull( &db, @@ -1589,6 +1596,31 @@ pub async fn run_worker( } }; + // Essential debouncing job preprocessing. + if let Ok(windmill_queue::PulledJobResult { + job: Some(ref mut pulled_job), + .. + }) = &mut job + { + match timeout( + core::time::Duration::from_secs(10), + preprocess_dependency_job(pulled_job, &db), + ) + .warn_after_seconds(2) + .await + { + Ok(Err(e)) => { + tracing::error!(worker = %worker_name, hostname = %hostname, "critical: debouncing job preprocessor failed: {e:?}"); + job = Err(e.into()); + } + Err(e) => { + tracing::error!(worker = %worker_name, hostname = %hostname, "critical: debouncing job preprocessor has timed out: {e:?}"); + job = Err(e.into()); + } + _ => {} + } + } + add_time!(bench, "job pulled from DB"); let duration_pull_s = pull_time.elapsed().as_secs_f64(); let err_pull = job.is_ok(); @@ -1658,6 +1690,7 @@ pub async fn run_worker( Err(err) => Err(err), } } + Connection::Http(client) => crate::agent_workers::pull_job(&client, None, None) .await .map_err(|e| error::Error::InternalErr(e.to_string())) @@ -2527,6 +2560,23 @@ pub async fn handle_queued_job( logs.push_str("---\n"); } + // Only used for testing in tests/relative_imports.rs + // Give us some space to work with. + #[cfg(debug_assertions)] + if let Some(dbg_djob_sleep) = job + .args + .as_ref() + .map(|x| { + x.get("dbg_djob_sleep") + .map(|v| serde_json::from_str::(v.get()).ok()) + .flatten() + }) + .flatten() + { + tracing::debug!("Debug: {} going to sleep for {}", job.id, dbg_djob_sleep); + sleep(std::time::Duration::from_secs(dbg_djob_sleep as u64)).await; + } + tracing::debug!( workspace_id = %job.workspace_id, "handling job {}", @@ -2564,7 +2614,7 @@ pub async fn handle_queued_job( JobKind::FlowDependencies => match conn { Connection::Sql(db) => { handle_flow_dependency_job( - &job, + (*job).clone(), preview_data.as_ref(), &mut mem_peak, &mut canceled_by, @@ -2586,7 +2636,7 @@ pub async fn handle_queued_job( }, JobKind::AppDependencies => match conn { Connection::Sql(db) => handle_app_dependency_job( - &job, + (*job).clone(), &mut mem_peak, &mut canceled_by, job_dir, diff --git a/backend/windmill-worker/src/worker_flow.rs b/backend/windmill-worker/src/worker_flow.rs index ad5b40ee4a5cb..7a75068fcf346 100644 --- a/backend/windmill-worker/src/worker_flow.rs +++ b/backend/windmill-worker/src/worker_flow.rs @@ -3160,6 +3160,7 @@ async fn push_next_flow_job( job_perms.as_ref(), false, None, + None, ) .warn_after_seconds(2) .await?; diff --git a/backend/windmill-worker/src/worker_lockfiles.rs b/backend/windmill-worker/src/worker_lockfiles.rs index 59f0f87552e95..f4fbf5bb92029 100644 --- a/backend/windmill-worker/src/worker_lockfiles.rs +++ b/backend/windmill-worker/src/worker_lockfiles.rs @@ -5,21 +5,23 @@ use std::path::{Component, Path, PathBuf}; #[cfg(feature = "python")] use crate::ansible_executor::{get_git_repos_lock, AnsibleDependencyLocks}; -use crate::scoped_dependency_map::{ScopedDependencyMap, WMDEBUG_NO_DMAP_DISSOLVE}; +use crate::scoped_dependency_map::ScopedDependencyMap; use async_recursion::async_recursion; +use chrono::{Duration, Utc}; use itertools::Itertools; use serde_json::value::RawValue; use serde_json::{from_value, json, Value}; use sha2::Digest; use sqlx::types::Json; +use tokio::time::timeout; use uuid::Uuid; use windmill_common::assets::{clear_asset_usage, insert_asset_usage, AssetUsageKind}; use windmill_common::error::Error; use windmill_common::error::Result; use windmill_common::flows::{FlowModule, FlowModuleValue, FlowNodeId}; -use windmill_common::get_latest_deployed_hash_for_path; use windmill_common::jobs::JobPayload; -use windmill_common::scripts::{hash_script, NewScript, ScriptHash}; +use windmill_common::scripts::ScriptHash; +use windmill_common::utils::WarnAfterExt; #[cfg(feature = "python")] use windmill_common::worker::PythonAnnotations; use windmill_common::worker::{to_raw_value, to_raw_value_owned, write_file, Connection}; @@ -46,6 +48,9 @@ lazy_static::lazy_static! { static ref WMDEBUG_NO_NEW_FLOW_VERSION_ON_DJ: bool = std::env::var("WMDEBUG_NO_NEW_FLOW_VERSION_ON_DJ").is_ok(); static ref WMDEBUG_NO_NEW_APP_VERSION_ON_DJ: bool = std::env::var("WMDEBUG_NO_NEW_APP_VERSION_ON_DJ").is_ok(); static ref WMDEBUG_NO_COMPONENTS_TO_RELOCK: bool = std::env::var("WMDEBUG_NO_COMPONENTS_TO_RELOCK").is_ok(); + static ref DEPENDENCY_JOB_DEBOUNCE_DELAY: usize = std::env::var("DEPENDENCY_JOB_DEBOUNCE_DELAY").ok().and_then(|flag| flag.parse().ok()).unwrap_or( + if cfg!(test) { /* if test we want increased debouncing delay */ 15 } else { 5 } + ); } use crate::common::OccupancyMetrics; @@ -144,6 +149,12 @@ pub async fn handle_dependency_job( token: &str, occupancy_metrics: &mut OccupancyMetrics, ) -> error::Result> { + // Processing a dependency job - these jobs handle lockfile generation and dependency updates + // for scripts, flows, and apps when their dependencies or imported scripts change + tracing::debug!( + "Processing dependency job for path: {:?}", + job.runnable_path() + ); let script_path = job.runnable_path(); let raw_deps = job .args @@ -243,141 +254,25 @@ pub async fn handle_dependency_job( let current_hash = job.runnable_id.unwrap_or(ScriptHash(0)); let w_id = &job.workspace_id; + let (deployment_message, parent_path) = get_deployment_msg_and_parent_path_from_args(job.args.clone()); - let script_info = sqlx::query_as::<_, windmill_common::scripts::Script>( - "SELECT * FROM script WHERE hash = $1 AND workspace_id = $2", + // We do not create new row for this update + // That means we can keep current hash and just update lock + sqlx::query!( + "UPDATE script SET lock = $1 WHERE hash = $2 AND workspace_id = $3", + &content, + ¤t_hash.0, + w_id ) - .bind(¤t_hash.0) - .bind(w_id) - .fetch_one(db) + .execute(db) .await?; - // DependencyJob can be triggered only from 2 places: - // 1. create_script function in windmill-api/src/scripts.rs - // 2. trigger_dependents_to_recompute_dependencies (in this file) - // - // First will **always** produce script with null in `lock` - // where Second will **always** do with lock being not null - let deployed_hash = if script_info.lock.is_some() && !*WMDEBUG_NO_HASH_CHANGE_ON_DJ { - let path = script_info.path.clone(); - - let mut tx = db.begin().await?; - // This entire section exists to solve following problem: - // - // 2 workers, one script that depend on another in python - // run the original script on both workers - // you update the dependenecy of a relative import, - // run it again until you ran it on both, normally it should fail on one of those - // - // It happens because every worker has cached their own script versions. - // However usual dependency job does not update hash of the script (and cache is keyed by the hash). - // This logical branch will create new script which will update the hash and automatically invalidate cache. - // - // IMPORTANT: This will **only** be triggered by another DependencyJob. It will never be triggered by script (re)deployement - - let ns = NewScript { - path: script_info.path, - parent_hash: Some(current_hash), - summary: script_info.summary, - description: script_info.description, - content: script_info.content, - schema: script_info.schema, - is_template: Some(script_info.is_template), - // TODO: Make it either None everywhere (particularely when raw reqs are calculated) - // Or handle this case and conditionally make Some (only with raw reqs) - lock: None, - language: script_info.language, - kind: Some(script_info.kind), - tag: script_info.tag, - draft_only: script_info.draft_only, - envs: script_info.envs, - concurrent_limit: script_info.concurrent_limit, - concurrency_time_window_s: script_info.concurrency_time_window_s, - cache_ttl: script_info.cache_ttl, - dedicated_worker: script_info.dedicated_worker, - ws_error_handler_muted: script_info.ws_error_handler_muted, - priority: script_info.priority, - timeout: script_info.timeout, - delete_after_use: script_info.delete_after_use, - restart_unless_cancelled: script_info.restart_unless_cancelled, - deployment_message: deployment_message.clone(), - concurrency_key: script_info.concurrency_key, - visible_to_runner_only: script_info.visible_to_runner_only, - no_main_func: script_info.no_main_func, - codebase: script_info.codebase, - has_preprocessor: script_info.has_preprocessor, - on_behalf_of_email: script_info.on_behalf_of_email, - assets: script_info.assets, - }; - - let new_hash = hash_script(&ns); - - sqlx::query!(" - INSERT INTO script - (workspace_id, hash, path, parent_hashes, summary, description, content, \ - created_by, schema, is_template, extra_perms, lock, language, kind, tag, \ - draft_only, envs, concurrent_limit, concurrency_time_window_s, cache_ttl, \ - dedicated_worker, ws_error_handler_muted, priority, restart_unless_cancelled, \ - delete_after_use, timeout, concurrency_key, visible_to_runner_only, no_main_func, \ - codebase, has_preprocessor, on_behalf_of_email, schema_validation, assets) - - SELECT workspace_id, $1, path, array_prepend($2::bigint, COALESCE(parent_hashes, '{}'::bigint[])), summary, description, \ - content, created_by, schema, is_template, extra_perms, $4, language, kind, tag, \ - draft_only, envs, concurrent_limit, concurrency_time_window_s, cache_ttl, \ - dedicated_worker, ws_error_handler_muted, priority, restart_unless_cancelled, \ - delete_after_use, timeout, concurrency_key, visible_to_runner_only, no_main_func, \ - codebase, has_preprocessor, on_behalf_of_email, schema_validation, assets - - FROM script WHERE hash = $2 AND workspace_id = $3; - ", - new_hash, current_hash.0, w_id, &content).execute(db).await?; - tracing::info!( - "Updated script at path {} with hash {} to new hash {}", - path, - current_hash.0, - new_hash - ); - // Archive current - sqlx::query!( - "UPDATE script SET archived = true WHERE hash = $1 AND workspace_id = $2", - current_hash.0, - w_id - ) - .execute(&mut *tx) - .await?; - tracing::info!( - "Archived script at path {} from dependency job {}", - path, - current_hash.0 - ); - tx.commit().await?; - - ScriptHash(new_hash) - } else { - // We do not create new row for this update - // That means we can keep current hash and just update lock - sqlx::query!( - "UPDATE script SET lock = $1 WHERE hash = $2 AND workspace_id = $3", - &content, - ¤t_hash.0, - w_id - ) - .execute(db) - .await?; - - // `lock` has been updated; invalidate the cache. - // Since only worker that ran this Dependency Job has the cache - // we do not need to think about invalidating cache for other workers. - cache::script::invalidate(current_hash); - - if *WMDEBUG_NO_HASH_CHANGE_ON_DJ { - tracing::warn!("WMDEBUG_NO_HASH_CHANGE_ON_DJ usually should not be used. Behavior might be unstable. Please contact Windmill Team for support.") - } - - current_hash - }; + // `lock` has been updated; invalidate the cache. + // Since only worker that ran this Dependency Job has the cache + // we do not need to think about invalidating cache for other workers. + cache::script::invalidate(current_hash); if let Err(e) = handle_deployment_metadata( &job.permissioned_as_email, @@ -385,7 +280,7 @@ pub async fn handle_dependency_job( &db, &w_id, DeployedObject::Script { - hash: deployed_hash, + hash: current_hash, path: script_path.to_string(), parent_path: parent_path.clone(), }, @@ -520,20 +415,30 @@ pub async fn process_relative_imports( // But currently we will do this extra db call for every script regardless of whether they have relative imports or not // Script might have no relative imports but still be referenced by someone else. - if let Err(e) = trigger_dependents_to_recompute_dependencies( - w_id, - script_path, - deployment_message, - parent_path, - permissioned_as_email, - created_by, - permissioned_as, - db, - already_visited, + match timeout( + core::time::Duration::from_secs(60), + trigger_dependents_to_recompute_dependencies( + w_id, + script_path, + deployment_message, + parent_path, + permissioned_as_email, + created_by, + permissioned_as, + db, + already_visited, + ), ) + .warn_after_seconds(10) .await { - tracing::error!(%e, "error triggering dependents to recompute dependencies"); + Ok(Err(e)) => { + tracing::error!(%e, "error triggering dependents to recompute dependencies") + } + Err(e) => { + tracing::error!(%e, "triggering dependents to recompute dependencies has timed out") + } + _ => {} } } @@ -551,6 +456,15 @@ pub async fn trigger_dependents_to_recompute_dependencies( db: &sqlx::Pool, mut already_visited: Vec, ) -> error::Result<()> { + // TODO: There is a race-condition. + // This can be old version. + // + // Check lines of code below, you will find that we get the latest version of the script/app/flow + // + // However the latest version does not necessarily mean that it is finalized. + // Instead we assume that this would be the version we would base on. + // + // So the script_importers might be behind. Thus some information like nodes_to_relock might be lost. let script_importers = sqlx::query!( "SELECT importer_path, importer_kind::text, array_agg(importer_node_id) as importer_node_ids FROM dependency_map WHERE imported_path = $1 @@ -562,13 +476,20 @@ pub async fn trigger_dependents_to_recompute_dependencies( .fetch_all(db) .await?; + tracing::debug!( + "Triggering dependents to recompute dependencies for: {}", + &script_path + ); + already_visited.push(script_path.to_string()); for s in script_importers.iter() { + tracing::trace!("Processing dependency: {:?}", &s); if already_visited.contains(&s.importer_path) { + tracing::trace!("Skipping already visited dependency"); continue; } - let tx = PushIsolationLevel::IsolatedRoot(db.clone()); + let mut tx = db.clone().begin().await?; let mut args: HashMap> = HashMap::new(); if let Some(ref dm) = deployment_message { args.insert("deployment_message".to_string(), to_raw_value(&dm)); @@ -586,137 +507,104 @@ pub async fn trigger_dependents_to_recompute_dependencies( to_raw_value(&already_visited), ); + args.insert( + "triggered_by_relative_import".to_string(), + to_raw_value(&()), + ); + + // Lock the debounce_key entry FOR UPDATE to coordinate with the push side. + // This prevents concurrent modifications during dependency job scheduling. + // + // The lock serves two purposes: + // 1. Ensures we get the current debounce_job_id atomically + // 2. Blocks new push requests from modifying this key until we commit + // 3. Blocks puller from actually starting the job and gives us a chance to still squeeze the debounce in. + // + // After our transaction commits, any pending push/pull requests can proceed with + // their debounce logic. + let debounce_job_id_o = + windmill_common::jobs::lock_debounce_key(w_id, &s.importer_path, &mut tx).await?; + + tracing::debug!( + debounce_job_id = ?debounce_job_id_o, + importer_path = %s.importer_path, + "Retrieved debounce job ID (if exists)" + ); + let kind = s.importer_kind.clone().unwrap_or_default(); let job_payload = if kind == "script" { - let r = - // TODO: Not sure if this is safe: - // might have race conditions in edge-cases - get_latest_deployed_hash_for_path(None, db.clone(), w_id, s.importer_path.as_str()) - .await; - match r { - // We will create Dependency job as is. But the Dep Job Handler will detect that the job originates - // from [[trigger_dependents_to_recompute_dependencies]] and will create new script with new hash instead - Ok(r) => JobPayload::Dependencies { - path: s.importer_path.clone(), - hash: ScriptHash(r.hash), - language: r.language, - dedicated_worker: r.dedicated_worker, - }, - Err(err) => { - tracing::error!( - "error getting latest deployed hash for path {path}: {err}", - path = s.importer_path, - err = err - ); + match sqlx::query_scalar!( + "SELECT hash FROM script WHERE path = $1 AND workspace_id = $2 AND deleted = false ORDER BY created_at DESC LIMIT 1", + s.importer_path.clone(), + w_id + ) + .fetch_optional(&mut *tx) + .await? + { + Some(hash) => { + tracing::debug!("newest hash for {} is: {hash}", &s.importer_path); + + let info = + windmill_common::get_script_info_for_hash(None, db, w_id, hash).await?; + + JobPayload::Dependencies { + path: s.importer_path.clone(), + hash: ScriptHash(hash), + language: info.language, + dedicated_worker: info.dedicated_worker, + } + } + None => { + ScopedDependencyMap::clear_map_for_item( + &s.importer_path, + w_id, + "script", + tx, + &None, + ) + .await + .commit() + .await?; continue; } } } else if kind == "flow" { - // Unlike 'script', 'flow' will not delegate redeployment of new flow to the Dep Job Handler. - // We will create new flow in-place. - // It would be harder to do otherwise. + tracing::debug!("Handling flow dependency update for: {}", s.importer_path); - // Create transaction to make operation atomic. - let mut flow_tx = db.begin().await?; args.insert( "nodes_to_relock".to_string(), to_raw_value(&s.importer_node_ids), ); - let r = sqlx::query_scalar!( - "SELECT versions[array_upper(versions, 1)] FROM flow WHERE path = $1 AND workspace_id = $2", - s.importer_path, - w_id, - ).fetch_optional(&mut *flow_tx) - .await - .map_err(to_anyhow).map(Option::flatten); - - match r { - // TODO: Fallback - remove eventually. - Ok(Some(version)) if *WMDEBUG_NO_NEW_FLOW_VERSION_ON_DJ => { - tracing::warn!("WMDEBUG_NO_NEW_FLOW_VERSION_ON_DJ usually should not be used. Behavior might be unstable. Please contact Windmill Team for support."); - JobPayload::FlowDependencies { - path: s.importer_path.clone(), - dedicated_worker: None, - version, - } - } - // Get current version of current flow. - Ok(Some(cur_version)) => { - // NOTE: Temporary solution. See the usage for more details. - args.insert( - "triggered_by_relative_import".to_string(), - to_raw_value(&()), - ); - // Find out what would be the next version. - // Also clone current flow_version to get new_version (which is usually c_v + 1). - // NOTE: It is fine if something goes wrong downstream and `flow` is not being appended with this new version. - // This version will just remain in db and cause no trouble. - let new_version = sqlx::query_scalar!( - "INSERT INTO flow_version - (workspace_id, path, value, schema, created_by) - - SELECT workspace_id, path, value, schema, created_by - FROM flow_version WHERE path = $1 AND workspace_id = $2 AND id = $3 - - RETURNING id", + match sqlx::query_scalar!( + "SELECT id FROM flow_version WHERE path = $1 AND workspace_id = $2 ORDER BY created_at DESC LIMIT 1", + s.importer_path.clone(), + w_id + ) + .fetch_optional(&mut *tx) + .await? + { + Some(version) => JobPayload::FlowDependencies { + path: s.importer_path.clone(), + version, + dedicated_worker: None, + }, + None => { + ScopedDependencyMap::clear_map_for_item( &s.importer_path, w_id, - cur_version + "flow", + tx, + &None, ) - .fetch_one(&mut *flow_tx) .await - .map_err(|e| { - error::Error::internal_err(format!( - "Error updating flow due to flow history insert: {e:#}" - )) - })?; - - // Commit the transaction. - // NOTE: - // We do not append flow.versions with new version. - // We will do this in the end of the dependency job handler. - // Otherwise it might become a source of race-conditions. - flow_tx.commit().await?; - JobPayload::FlowDependencies { - path: s.importer_path.clone(), - dedicated_worker: None, - // Point Dep Job to the new version. - // We do this since we want to assume old ones are immutable. - version: new_version, - } - } - Ok(None) => { - if *WMDEBUG_NO_DMAP_DISSOLVE { - tracing::warn!("WMDEBUG_NO_DMAP_DISSOLVE usually should not be used. Behavior might be unstable."); - } else { - // Remember the path we used to query the flow was fetched just now from dependency_map - // if dependency_map advertise unexistent path, as part of self-healing it should be removed - ScopedDependencyMap::clear_map_for_item( - &s.importer_path, - w_id, - "flow", - flow_tx, - &None, - ) - .await - .commit() - .await?; - } - continue; - } - Err(err) => { - tracing::error!( - "error getting latest deployed flow version for path {path}: {err}", - path = s.importer_path, - ); - // Do not commit the transaction. It will be dropped and rollbacked + .commit() + .await?; continue; } } } else if kind == "app" && !*WMDEBUG_NO_NEW_APP_VERSION_ON_DJ { - // Create transaction to make operation atomic. - let mut tx = db.begin().await?; + tracing::debug!("Handling flow dependency update for: {}", s.importer_path); args.insert( "components_to_relock".to_string(), @@ -724,77 +612,28 @@ pub async fn trigger_dependents_to_recompute_dependencies( to_raw_value(&s.importer_node_ids), ); - let r = sqlx::query_scalar!( - "SELECT versions[array_upper(versions, 1)] FROM app WHERE path = $1 AND workspace_id = $2", - s.importer_path, - w_id, - ).fetch_optional(&mut *tx) - .await - .map_err(to_anyhow).map(Option::flatten); - - match r { - // Get current version of current flow. - Ok(Some(cur_version)) => { - // NOTE: Temporary solution. See the usage for more details. - args.insert( - "triggered_by_relative_import".to_string(), - to_raw_value(&()), - ); - - let new_version = sqlx::query_scalar!( - "INSERT INTO app_version - (app_id, value, created_by, raw_app) - SELECT app_id, value, created_by, raw_app - FROM app_version WHERE id = $1 - RETURNING id", - cur_version + match sqlx::query_scalar!( + "SELECT id FROM app_version WHERE app_id = (SELECT id FROM app WHERE path = $1 AND workspace_id = $2) ORDER BY created_at DESC LIMIT 1", + s.importer_path.clone(), + w_id + ) + .fetch_optional(&mut *tx) + .await? + { + Some(version) => { + JobPayload::AppDependencies { path: s.importer_path.clone(), version } + } + None => { + ScopedDependencyMap::clear_map_for_item( + &s.importer_path, + w_id, + "app", + tx, + &None, ) - .fetch_one(&mut *tx) .await - .map_err(|e| { - error::Error::internal_err(format!( - "Error updating App due to App history insert: {e:#}" - )) - })?; - - // Commit the transaction. - // NOTE: - // We do not append app.versions with new version. - // We will do this in the end of the dependency job handler. - // Otherwise it might become a source of race-conditions. - tx.commit().await?; - JobPayload::AppDependencies { - path: s.importer_path.clone(), - // Point Dep Job to the new version. - // We do this since we want to assume old ones are immutable. - version: new_version, - } - } - Ok(None) => { - if *WMDEBUG_NO_DMAP_DISSOLVE { - tracing::warn!("WMDEBUG_NO_DMAP_DISSOLVE usually should not be used. Behavior might be unstable."); - } else { - // Remember the path we used to query the flow was fetched just now from dependency_map - // if dependency_map advertise unexistent path, as part of self-healing it should be removed - ScopedDependencyMap::clear_map_for_item( - &s.importer_path, - w_id, - "app", - tx, - &None, - ) - .await - .commit() - .await?; - } - continue; - } - Err(err) => { - tracing::error!( - "error getting latest deployed app version for path {path}: {err}", - path = s.importer_path, - ); - // Do not commit the transaction. It will be dropped and rollbacked + .commit() + .await?; continue; } } @@ -807,9 +646,10 @@ pub async fn trigger_dependents_to_recompute_dependencies( continue; }; + tracing::debug!("Pushing dependency job for: {}", s.importer_path); let (job_uuid, new_tx) = windmill_queue::push( db, - tx, + PushIsolationLevel::Transaction(tx), &w_id, job_payload, windmill_queue::PushArgs { args: &args, extra: None }, @@ -817,7 +657,8 @@ pub async fn trigger_dependents_to_recompute_dependencies( email, permissioned_as.to_string(), Some("trigger.dependents.to.recompute.dependencies"), - None, + // Schedule for future for debouncing. + Some(Utc::now() + Duration::seconds(*DEPENDENCY_JOB_DEBOUNCE_DELAY as i64)), None, None, None, @@ -827,15 +668,17 @@ pub async fn trigger_dependents_to_recompute_dependencies( false, None, true, - None, + Some("dependency".into()), None, None, None, None, false, None, + debounce_job_id_o, ) .await?; + tracing::info!( "pushed dependency job due to common python path: {job_uuid} for path {path}", path = s.importer_path, @@ -846,7 +689,7 @@ pub async fn trigger_dependents_to_recompute_dependencies( } pub async fn handle_flow_dependency_job( - job: &MiniPulledJob, + job: MiniPulledJob, preview_data: Option<&RawData>, mem_peak: &mut i32, canceled_by: &mut Option, @@ -858,6 +701,9 @@ pub async fn handle_flow_dependency_job( token: &str, occupancy_metrics: &mut OccupancyMetrics, ) -> error::Result> { + tracing::debug!("Processing flow dependency job"); + tracing::trace!("Job details: {:?}", &job); + tracing::trace!("Preview data: {:?}", &preview_data); let job_path = job.runnable_path.clone().ok_or_else(|| { error::Error::internal_err( "Cannot resolve flow dependencies for flow without path".to_string(), @@ -875,6 +721,12 @@ pub async fn handle_flow_dependency_job( .flatten() .unwrap_or(false); + let triggered_by_relative_import = job + .args + .as_ref() + .map(|x| x.get("triggered_by_relative_import").is_some()) + .unwrap_or_default(); + let version = if skip_flow_update { None } else { @@ -890,6 +742,7 @@ pub async fn handle_flow_dependency_job( ) }; + tracing::trace!("Job details: {:?}", &job); let (deployment_message, parent_path) = get_deployment_msg_and_parent_path_from_args(job.args.clone()); @@ -903,6 +756,7 @@ pub async fn handle_flow_dependency_job( }) .flatten(); + tracing::debug!("Nodes to relock: {:?}", &nodes_to_relock); let raw_deps = job .args .as_ref() @@ -913,12 +767,6 @@ pub async fn handle_flow_dependency_job( }) .flatten(); - let triggered_by_relative_import = job - .args - .as_ref() - .map(|x| x.get("triggered_by_relative_import").is_some()) - .unwrap_or_default(); - // `JobKind::FlowDependencies` job store either: // - A saved flow version `id` in the `script_hash` column. // - Preview raw flow in the `queue` or `job` table. @@ -959,7 +807,7 @@ pub async fn handle_flow_dependency_job( let errors; (flow.modules, tx, modified_ids, errors) = lock_modules( flow.modules, - job, + &job, mem_peak, canceled_by, job_dir, @@ -1094,6 +942,8 @@ pub async fn handle_flow_dependency_job( &job_path, &job.workspace_id, ).execute(&mut *tx).await?; + tracing::debug!("Marked flow version as latest"); + tracing::debug!("Flow version: {}", version); } tx.commit().await?; @@ -2091,7 +1941,7 @@ async fn lock_modules_app( } pub async fn handle_app_dependency_job( - job: &MiniPulledJob, + job: MiniPulledJob, mem_peak: &mut i32, canceled_by: &mut Option, job_dir: &str, @@ -2158,7 +2008,7 @@ pub async fn handle_app_dependency_job( if let Some((app_id, value)) = record { let value = lock_modules_app( value, - job, + &job, mem_peak, canceled_by, job_dir,