From 3681d0b674703151ea2ff1f1bb458ae11705f049 Mon Sep 17 00:00:00 2001 From: BoySanic Date: Thu, 15 May 2025 16:51:38 -0700 Subject: [PATCH 01/29] initial commit for validation feature Maybe fully implemented? Implement a few suggestions Implement fixes Fix from rebasing, cargo fmt and clippy Move sqlx dependent stuff into a feature Refactoring, fixes Add invalid state Update names, fix logic in validate_fetch Fix query refactor and fix Renaming More progress Endpoints seem to work now Fix to work with latest master Add additional error state Rename errors Rename error Implement new error handling Cargo clippy/fmt Partial refactor - just for dutchen to work on api stuff Finish major refactor Remove old code Implement dutchen fixes where still applicable Fix problems post-rebase Major refactor - again Commments and small formatting touch-ups Rename enum states for clarity Small adjustmnets Refactor some more... Re-add bounds check for additional_assignments_needed Fix bounds check Update gitignore, move validate_fetch_request to its own request Remove unneeded error states Refactor Fix sqlx prepare after rebase Fix formatting --- .gitignore | 1 + common/src/errors/mod.rs | 4 + common/src/errors/validate_fetch_error.rs | 8 + common/src/errors/validate_submit_err.rs | 16 ++ common/src/records/assignment.rs | 2 +- common/src/records/task.rs | 4 +- common/src/requests/mod.rs | 4 + common/src/requests/validate_fetch_request.rs | 8 + .../src/requests/validate_submit_request.rs | 10 + common/src/types/assignment_state.rs | 1 + ...7b2e6b5a31199b622f28ca3692fe60eec53c9.json | 6 +- ...d430966e8ae83a168de0d3444bb8a4c7b1051.json | 10 + ...b16b87bf12743e25be5d1707c07edd8d94dfd.json | 10 + ...9628db2f65fd407f22b21595bfb854c6e62fd.json | 62 +++++ ...7c47896571f1b0b9b844e84e298f87e3ed09b.json | 10 + ...d27ae319fe5ef20c765e854d9651d6f492d65.json | 15 ++ ...c5602d215db4e5cf61484cf7b80c84b0cfe5c.json | 3 +- ...503ac7ff74c04bed3c64f73331477298b8a1c.json | 22 ++ ...faf7ba6f659832bd727aa8faa69fe761254d1.json | 3 +- ...bdd3304bf58ea3b94ddf5e8590663c3d51ce6.json | 64 +++++ ...2c6ffe7554aef3f4f959cab1d6073882e22d4.json | 15 ++ server/migrations/20250426220809_init.sql | 7 +- server/src/main.rs | 13 +- server/src/result/status.rs | 15 +- server/src/routes/mod.rs | 6 +- server/src/routes/validate_fetch.rs | 63 +++++ server/src/routes/validate_submit.rs | 228 ++++++++++++++++++ 27 files changed, 592 insertions(+), 18 deletions(-) create mode 100644 common/src/errors/validate_fetch_error.rs create mode 100644 common/src/errors/validate_submit_err.rs create mode 100644 common/src/requests/validate_fetch_request.rs create mode 100644 common/src/requests/validate_submit_request.rs create mode 100644 server/.sqlx/query-5a7e0d775074335094b4e5e13fe9628db2f65fd407f22b21595bfb854c6e62fd.json create mode 100644 server/.sqlx/query-a22d963ca9e84a0a720221c2d58d27ae319fe5ef20c765e854d9651d6f492d65.json create mode 100644 server/.sqlx/query-cf1452bafd1a04e7d3786d423fe503ac7ff74c04bed3c64f73331477298b8a1c.json create mode 100644 server/.sqlx/query-e2c618e5823ea3f5483206317d7bdd3304bf58ea3b94ddf5e8590663c3d51ce6.json create mode 100644 server/.sqlx/query-fccf0d20f4d2c595f43e62104422c6ffe7554aef3f4f959cab1d6073882e22d4.json create mode 100644 server/src/routes/validate_fetch.rs create mode 100644 server/src/routes/validate_submit.rs diff --git a/.gitignore b/.gitignore index 860734b..62dee43 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ /target /server/.env +.env \ No newline at end of file diff --git a/common/src/errors/mod.rs b/common/src/errors/mod.rs index 9650814..27fa704 100644 --- a/common/src/errors/mod.rs +++ b/common/src/errors/mod.rs @@ -3,9 +3,13 @@ pub mod infallible; pub mod not_found; pub mod register_error; pub mod submit_result_error; +pub mod validate_fetch_error; +pub mod validate_submit_err; pub use fetch_tasks_error::FetchTasksError; pub use infallible::Infallible; pub use not_found::NotFound; pub use register_error::RegisterError; pub use submit_result_error::SubmitResultError; +pub use validate_fetch_error::ValidateFetchError; +pub use validate_submit_err::ValidateSubmitError; diff --git a/common/src/errors/validate_fetch_error.rs b/common/src/errors/validate_fetch_error.rs new file mode 100644 index 0000000..9fdc122 --- /dev/null +++ b/common/src/errors/validate_fetch_error.rs @@ -0,0 +1,8 @@ +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +#[derive(Clone, Hash, Debug, Serialize, Deserialize, Error)] +pub enum ValidateFetchError { + #[error("invalid project")] + InvalidProject, +} diff --git a/common/src/errors/validate_submit_err.rs b/common/src/errors/validate_submit_err.rs new file mode 100644 index 0000000..66e31a7 --- /dev/null +++ b/common/src/errors/validate_submit_err.rs @@ -0,0 +1,16 @@ +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +#[derive(Clone, Hash, Debug, Serialize, Deserialize, Error)] +pub enum ValidateSubmitError { + #[error("invalid assignment")] + InvalidAssignment, + #[error("invalid assignment state")] + InvalidAssignmentState, + #[error("provided assignments needed value out of bounds")] + AssignmentsNeededOutOfBounds, + #[error("result count is less than quorum")] + ResultCountLessThanQuorum, + #[error("state transition forbidden")] + StateTransitionForbidden, +} diff --git a/common/src/records/assignment.rs b/common/src/records/assignment.rs index b96d617..fab6ddd 100644 --- a/common/src/records/assignment.rs +++ b/common/src/records/assignment.rs @@ -5,7 +5,7 @@ use crate::types::{AssignmentState, Id}; use super::{Task, User}; -#[derive(Clone, Hash, Debug, Serialize, Deserialize)] +#[derive(Clone, Copy, Hash, Debug, Serialize, Deserialize)] pub struct Assignment { pub id: Id, pub created_at: DateTime, diff --git a/common/src/records/task.rs b/common/src/records/task.rs index 385f28e..18cc4f0 100644 --- a/common/src/records/task.rs +++ b/common/src/records/task.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use crate::types::{Id, Interval}; -use super::{Project, User}; +use super::{Project, Result, User}; #[derive(Clone, Hash, Debug, Serialize, Deserialize)] pub struct Task { @@ -14,6 +14,8 @@ pub struct Task { pub stdin: String, pub assignments_needed: i32, pub assignment_user_ids: Vec>, + pub canonical_result_id: Option>, + pub quorum: i32, } #[non_exhaustive] diff --git a/common/src/requests/mod.rs b/common/src/requests/mod.rs index bb3d8e7..e9c0a80 100644 --- a/common/src/requests/mod.rs +++ b/common/src/requests/mod.rs @@ -1,7 +1,11 @@ pub mod fetch_tasks_request; pub mod register_request; pub mod submit_result_request; +pub mod validate_fetch_request; +pub mod validate_submit_request; pub use fetch_tasks_request::FetchTasksRequest; pub use register_request::RegisterRequest; pub use submit_result_request::SubmitResultRequest; +pub use validate_fetch_request::ValidateFetchRequest; +pub use validate_submit_request::ValidateSubmitRequest; diff --git a/common/src/requests/validate_fetch_request.rs b/common/src/requests/validate_fetch_request.rs new file mode 100644 index 0000000..2bba424 --- /dev/null +++ b/common/src/requests/validate_fetch_request.rs @@ -0,0 +1,8 @@ +use serde::{Deserialize, Serialize}; + +use crate::{records::Project, types::Id}; + +#[derive(Clone, Hash, Debug, Serialize, Deserialize)] +pub struct ValidateFetchRequest { + pub project_ids: Vec>, +} diff --git a/common/src/requests/validate_submit_request.rs b/common/src/requests/validate_submit_request.rs new file mode 100644 index 0000000..13db817 --- /dev/null +++ b/common/src/requests/validate_submit_request.rs @@ -0,0 +1,10 @@ +use std::collections::HashMap; + +use serde::{Deserialize, Serialize}; + +use crate::{records::Assignment, types::Id}; + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ValidateSubmitRequest { + pub assignments: HashMap, Option>, +} diff --git a/common/src/types/assignment_state.rs b/common/src/types/assignment_state.rs index d5336bd..da3bbb5 100644 --- a/common/src/types/assignment_state.rs +++ b/common/src/types/assignment_state.rs @@ -14,4 +14,5 @@ pub enum AssignmentState { Valid, Invalid, Inconclusive, + Error, } diff --git a/server/.sqlx/query-0a85c57626456d79f6a57c607437b2e6b5a31199b622f28ca3692fe60eec53c9.json b/server/.sqlx/query-0a85c57626456d79f6a57c607437b2e6b5a31199b622f28ca3692fe60eec53c9.json index ced2773..9a6dc09 100644 --- a/server/.sqlx/query-0a85c57626456d79f6a57c607437b2e6b5a31199b622f28ca3692fe60eec53c9.json +++ b/server/.sqlx/query-0a85c57626456d79f6a57c607437b2e6b5a31199b622f28ca3692fe60eec53c9.json @@ -42,7 +42,8 @@ "submitted", "valid", "invalid", - "inconclusive" + "inconclusive", + "error" ] } } @@ -64,7 +65,8 @@ "submitted", "valid", "invalid", - "inconclusive" + "inconclusive", + "error" ] } } diff --git a/server/.sqlx/query-0d6d91c4e0acb78e4fda4df1804d430966e8ae83a168de0d3444bb8a4c7b1051.json b/server/.sqlx/query-0d6d91c4e0acb78e4fda4df1804d430966e8ae83a168de0d3444bb8a4c7b1051.json index 8117500..48ce028 100644 --- a/server/.sqlx/query-0d6d91c4e0acb78e4fda4df1804d430966e8ae83a168de0d3444bb8a4c7b1051.json +++ b/server/.sqlx/query-0d6d91c4e0acb78e4fda4df1804d430966e8ae83a168de0d3444bb8a4c7b1051.json @@ -37,6 +37,16 @@ "ordinal": 6, "name": "assignment_user_ids", "type_info": "Int8Array" + }, + { + "ordinal": 6, + "name": "canonical_result_id", + "type_info": "Int8" + }, + { + "ordinal": 7, + "name": "quorum", + "type_info": "Int4" } ], "parameters": { diff --git a/server/.sqlx/query-4f28c9855a87500c39fc4e88308b16b87bf12743e25be5d1707c07edd8d94dfd.json b/server/.sqlx/query-4f28c9855a87500c39fc4e88308b16b87bf12743e25be5d1707c07edd8d94dfd.json index b33cd31..267f09d 100644 --- a/server/.sqlx/query-4f28c9855a87500c39fc4e88308b16b87bf12743e25be5d1707c07edd8d94dfd.json +++ b/server/.sqlx/query-4f28c9855a87500c39fc4e88308b16b87bf12743e25be5d1707c07edd8d94dfd.json @@ -37,6 +37,16 @@ "ordinal": 6, "name": "assignment_user_ids", "type_info": "Int8Array" + }, + { + "ordinal": 6, + "name": "canonical_result_id", + "type_info": "Int8" + }, + { + "ordinal": 7, + "name": "quorum", + "type_info": "Int4" } ], "parameters": { diff --git a/server/.sqlx/query-5a7e0d775074335094b4e5e13fe9628db2f65fd407f22b21595bfb854c6e62fd.json b/server/.sqlx/query-5a7e0d775074335094b4e5e13fe9628db2f65fd407f22b21595bfb854c6e62fd.json new file mode 100644 index 0000000..fd28c2b --- /dev/null +++ b/server/.sqlx/query-5a7e0d775074335094b4e5e13fe9628db2f65fd407f22b21595bfb854c6e62fd.json @@ -0,0 +1,62 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n *\n FROM\n assignments\n WHERE\n id = ANY($1)\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 2, + "name": "task_id", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "user_id", + "type_info": "Int8" + }, + { + "ordinal": 4, + "name": "state", + "type_info": { + "Custom": { + "name": "assignment_state", + "kind": { + "Enum": [ + "init", + "canceled", + "expired", + "submitted", + "valid", + "invalid", + "inconclusive", + "error" + ] + } + } + } + } + ], + "parameters": { + "Left": [ + "Int8Array" + ] + }, + "nullable": [ + false, + false, + false, + false, + false + ] + }, + "hash": "5a7e0d775074335094b4e5e13fe9628db2f65fd407f22b21595bfb854c6e62fd" +} diff --git a/server/.sqlx/query-8660893ff85be731039fb2402bf7c47896571f1b0b9b844e84e298f87e3ed09b.json b/server/.sqlx/query-8660893ff85be731039fb2402bf7c47896571f1b0b9b844e84e298f87e3ed09b.json index 73ddf24..2788e90 100644 --- a/server/.sqlx/query-8660893ff85be731039fb2402bf7c47896571f1b0b9b844e84e298f87e3ed09b.json +++ b/server/.sqlx/query-8660893ff85be731039fb2402bf7c47896571f1b0b9b844e84e298f87e3ed09b.json @@ -37,6 +37,16 @@ "ordinal": 6, "name": "assignment_user_ids", "type_info": "Int8Array" + }, + { + "ordinal": 6, + "name": "canonical_result_id", + "type_info": "Int8" + }, + { + "ordinal": 7, + "name": "quorum", + "type_info": "Int4" } ], "parameters": { diff --git a/server/.sqlx/query-a22d963ca9e84a0a720221c2d58d27ae319fe5ef20c765e854d9651d6f492d65.json b/server/.sqlx/query-a22d963ca9e84a0a720221c2d58d27ae319fe5ef20c765e854d9651d6f492d65.json new file mode 100644 index 0000000..5450a5d --- /dev/null +++ b/server/.sqlx/query-a22d963ca9e84a0a720221c2d58d27ae319fe5ef20c765e854d9651d6f492d65.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE tasks\n SET assignments_needed = assignments_needed + $2\n WHERE\n id = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "a22d963ca9e84a0a720221c2d58d27ae319fe5ef20c765e854d9651d6f492d65" +} diff --git a/server/.sqlx/query-ce2921487afc54738ea394ab248c5602d215db4e5cf61484cf7b80c84b0cfe5c.json b/server/.sqlx/query-ce2921487afc54738ea394ab248c5602d215db4e5cf61484cf7b80c84b0cfe5c.json index c49c6dc..7ba3d62 100644 --- a/server/.sqlx/query-ce2921487afc54738ea394ab248c5602d215db4e5cf61484cf7b80c84b0cfe5c.json +++ b/server/.sqlx/query-ce2921487afc54738ea394ab248c5602d215db4e5cf61484cf7b80c84b0cfe5c.json @@ -42,7 +42,8 @@ "submitted", "valid", "invalid", - "inconclusive" + "inconclusive", + "error" ] } } diff --git a/server/.sqlx/query-cf1452bafd1a04e7d3786d423fe503ac7ff74c04bed3c64f73331477298b8a1c.json b/server/.sqlx/query-cf1452bafd1a04e7d3786d423fe503ac7ff74c04bed3c64f73331477298b8a1c.json new file mode 100644 index 0000000..208aebe --- /dev/null +++ b/server/.sqlx/query-cf1452bafd1a04e7d3786d423fe503ac7ff74c04bed3c64f73331477298b8a1c.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n count(*) as \"count!\"\n FROM\n projects\n WHERE\n id = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + null + ] + }, + "hash": "cf1452bafd1a04e7d3786d423fe503ac7ff74c04bed3c64f73331477298b8a1c" +} diff --git a/server/.sqlx/query-d3f83d3bf9b010cdf4a5c8c65b2faf7ba6f659832bd727aa8faa69fe761254d1.json b/server/.sqlx/query-d3f83d3bf9b010cdf4a5c8c65b2faf7ba6f659832bd727aa8faa69fe761254d1.json index 67cb28d..3b03451 100644 --- a/server/.sqlx/query-d3f83d3bf9b010cdf4a5c8c65b2faf7ba6f659832bd727aa8faa69fe761254d1.json +++ b/server/.sqlx/query-d3f83d3bf9b010cdf4a5c8c65b2faf7ba6f659832bd727aa8faa69fe761254d1.json @@ -16,7 +16,8 @@ "submitted", "valid", "invalid", - "inconclusive" + "inconclusive", + "error" ] } } diff --git a/server/.sqlx/query-e2c618e5823ea3f5483206317d7bdd3304bf58ea3b94ddf5e8590663c3d51ce6.json b/server/.sqlx/query-e2c618e5823ea3f5483206317d7bdd3304bf58ea3b94ddf5e8590663c3d51ce6.json new file mode 100644 index 0000000..e6ed6c3 --- /dev/null +++ b/server/.sqlx/query-e2c618e5823ea3f5483206317d7bdd3304bf58ea3b94ddf5e8590663c3d51ce6.json @@ -0,0 +1,64 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n t.*\n FROM\n tasks t\n JOIN assignments a ON\n a.task_id = t.id\n WHERE\n a.state not in ('canceled', 'expired')\n GROUP BY\n t.id\n HAVING\n t.project_id = ANY($1)\n AND (\n count(a.id) >= t.assignments_needed\n OR t.canonical_result_id IS NOT NULL\n )\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 2, + "name": "project_id", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "stdin", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "assignments_needed", + "type_info": "Int4" + }, + { + "ordinal": 5, + "name": "assignment_user_ids", + "type_info": "Int8Array" + }, + { + "ordinal": 6, + "name": "canonical_result_id", + "type_info": "Int8" + }, + { + "ordinal": 7, + "name": "quorum", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Int8Array" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + true, + false + ] + }, + "hash": "e2c618e5823ea3f5483206317d7bdd3304bf58ea3b94ddf5e8590663c3d51ce6" +} diff --git a/server/.sqlx/query-fccf0d20f4d2c595f43e62104422c6ffe7554aef3f4f959cab1d6073882e22d4.json b/server/.sqlx/query-fccf0d20f4d2c595f43e62104422c6ffe7554aef3f4f959cab1d6073882e22d4.json new file mode 100644 index 0000000..6f23323 --- /dev/null +++ b/server/.sqlx/query-fccf0d20f4d2c595f43e62104422c6ffe7554aef3f4f959cab1d6073882e22d4.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE \n tasks\n SET \n canonical_result_id = \n (SELECT \n r.id \n FROM \n results r\n JOIN assignments a\n ON a.id = r.assignment_id\n WHERE a.id = ANY($2)\n ORDER BY \n r.created_at DESC \n LIMIT 1\n )\n WHERE\n id = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8Array" + ] + }, + "nullable": [] + }, + "hash": "fccf0d20f4d2c595f43e62104422c6ffe7554aef3f4f959cab1d6073882e22d4" +} diff --git a/server/migrations/20250426220809_init.sql b/server/migrations/20250426220809_init.sql index 7088a1a..768eab3 100644 --- a/server/migrations/20250426220809_init.sql +++ b/server/migrations/20250426220809_init.sql @@ -38,7 +38,9 @@ CREATE TABLE tasks ( project_id int8 NOT NULL REFERENCES projects(id) ON DELETE RESTRICT ON UPDATE RESTRICT, stdin text NOT NULL, assignments_needed int4 NOT NULL, - assignment_user_ids int8[] NOT NULL DEFAULT ARRAY[]::int8[] + assignment_user_ids int8[] NOT NULL DEFAULT ARRAY[]::int8[], + canonical_result_id int8, + quorum int4 NOT NULL ); CREATE TYPE assignment_state AS ENUM ( @@ -48,7 +50,8 @@ CREATE TYPE assignment_state AS ENUM ( 'submitted', 'valid', 'invalid', - 'inconclusive' + 'inconclusive', + 'error' ); CREATE TABLE assignments ( diff --git a/server/src/main.rs b/server/src/main.rs index 91368df..6cb4232 100644 --- a/server/src/main.rs +++ b/server/src/main.rs @@ -13,7 +13,9 @@ use axum::{ use clusterizer_common::records::{ Assignment, Platform, Project, ProjectVersion, Result, Task, User, }; -use routes::{get_all, get_one}; + +use routes::*; + use sqlx::PgPool; use state::AppState; use tokio::{net::TcpListener, time}; @@ -54,10 +56,11 @@ async fn serve_task(state: AppState, address: String) { .route("/assignments/{id}", get(get_one::)) .route("/results", get(get_all::)) .route("/results/{id}", get(get_one::)) - .route("/register", post(routes::register)) - .route("/fetch_tasks", post(routes::fetch_tasks)) - .route("/submit_result/{id}", post(routes::submit_result)) - .layer(TraceLayer::new_for_http()) + .route("/register", post(register::register)) + .route("/fetch_tasks", post(fetch_tasks::fetch_tasks)) + .route("/submit_result/{id}", post(submit_result::submit_result)) + .route("/validate_fetch/{id}", get(validate_fetch::validate_fetch)) + .route("/validate_submit", post(validate_submit::validate_submit)) .with_state(state); let listener = TcpListener::bind(address).await.unwrap(); diff --git a/server/src/result/status.rs b/server/src/result/status.rs index d41991c..15b5baa 100644 --- a/server/src/result/status.rs +++ b/server/src/result/status.rs @@ -1,6 +1,7 @@ use axum::http::StatusCode; use clusterizer_common::errors::{ - FetchTasksError, Infallible, NotFound, RegisterError, SubmitResultError, + FetchTasksError, Infallible, NotFound, RegisterError, SubmitResultError, ValidateFetchError, + ValidateSubmitError, }; pub trait Status { @@ -36,3 +37,15 @@ impl Status for SubmitResultError { StatusCode::BAD_REQUEST } } + +impl Status for ValidateSubmitError { + fn status(&self) -> StatusCode { + StatusCode::BAD_REQUEST + } +} + +impl Status for ValidateFetchError { + fn status(&self) -> StatusCode { + StatusCode::BAD_REQUEST + } +} diff --git a/server/src/routes/mod.rs b/server/src/routes/mod.rs index 511e0bc..a2e8a55 100644 --- a/server/src/routes/mod.rs +++ b/server/src/routes/mod.rs @@ -16,10 +16,8 @@ use crate::{ pub mod fetch_tasks; pub mod register; pub mod submit_result; - -pub use fetch_tasks::fetch_tasks; -pub use register::register; -pub use submit_result::submit_result; +pub mod validate_fetch; +pub mod validate_submit; pub async fn get_all( State(state): State, diff --git a/server/src/routes/validate_fetch.rs b/server/src/routes/validate_fetch.rs new file mode 100644 index 0000000..388efc5 --- /dev/null +++ b/server/src/routes/validate_fetch.rs @@ -0,0 +1,63 @@ +use axum::{ + Json, + extract::{Path, State}, +}; +use clusterizer_common::{ + errors::ValidateFetchError, + records::{Project, Task}, + requests::ValidateFetchRequest, + types::Id, +}; + +use crate::{ + result::{AppResult, ResultExt}, + state::AppState, +}; + +pub async fn validate_fetch( + State(state): State, + Path(project_id): Path>, + Json(request): Json, +) -> AppResult>, ValidateFetchError> { + sqlx::query_scalar_unchecked!( + r#" + SELECT + count(*) as "count!" + FROM + projects + WHERE + id = $1 + "#, + project_id + ) + .fetch_one(&state.pool) + .await + .map_not_found(ValidateFetchError::InvalidProject)?; + + let task = sqlx::query_as_unchecked!( + Task, + r#" + SELECT + t.* + FROM + tasks t + JOIN assignments a ON + a.task_id = t.id + WHERE + a.state not in ('canceled', 'expired') + GROUP BY + t.id + HAVING + t.project_id = ANY($1) + AND ( + count(a.id) >= t.assignments_needed + OR t.canonical_result_id IS NOT NULL + ) + "#, + request.project_ids, + ) + .fetch_all(&state.pool) + .await?; + + Ok(Json(task.into_iter().collect())) +} diff --git a/server/src/routes/validate_submit.rs b/server/src/routes/validate_submit.rs new file mode 100644 index 0000000..c57e884 --- /dev/null +++ b/server/src/routes/validate_submit.rs @@ -0,0 +1,228 @@ +use axum::{Json, extract::State}; +use clusterizer_common::{ + errors::ValidateSubmitError, + records::{Assignment, Task}, + requests::ValidateSubmitRequest, + types::{AssignmentState, Id}, +}; + +use std::collections::HashMap; + +use crate::{ + result::{AppError, AppResult, ResultExt}, + state::AppState, + util::{Select, set_assignment_state}, +}; + +pub async fn validate_submit( + State(state): State, + Json(request): Json, +) -> AppResult<(), ValidateSubmitError> { + let assignment_ids: Vec> = request.assignments.keys().cloned().collect(); + + let assignments = sqlx::query_as_unchecked!( + Assignment, + r#" + SELECT + * + FROM + assignments + WHERE + id = ANY($1) + "#, + assignment_ids + ) + .fetch_all(&state.pool) + .await? as Vec; + + if assignment_ids.len() != assignments.len() { + Err(AppError::Specific(ValidateSubmitError::InvalidAssignment))? + } + + let mut task_ids = Vec::from_iter(assignments.iter().map(|assignment| assignment.task_id)); + task_ids.sort(); + task_ids.dedup(); + + let task_assignment_map = assignments.iter().fold( + HashMap::new(), + |mut map: HashMap, Vec>>, assignment| { + map.entry(assignment.task_id) + .or_default() + .push(assignment.id); + map + }, + ); + + for task_id in task_assignment_map.keys() { + let task = Select::select_one(*task_id).fetch_one(&state.pool).await?; + + let task_db_assignments = task_assignment_map[task_id].clone(); + + let mut group_map: HashMap = HashMap::new(); + let mut errored_assignments: Vec> = Vec::new(); + for assignment in task_db_assignments.iter() { + let group_num = request.assignments[assignment]; + if group_num.is_some() { + *group_map.entry(group_num.unwrap()).or_insert(0) += 1; + } else { + // Task errored, so add one to assignments needed + errored_assignments.push(*assignment); + } + } + let valid_groups: Vec = group_map + .iter() + .filter(|kvp| *kvp.1 >= task.quorum) + .map(|kvp| *kvp.0) + .collect(); + + let invalid_groups: Vec = group_map + .iter() + .filter(|kvp| *kvp.1 < task.quorum) + .map(|kvp| *kvp.0) + .collect(); + + if valid_groups.len() > 1 { + // Cannot have more than one valid group - this is inconsistent + } + if valid_groups.len() == 1 || task.canonical_result_id.is_some() { + // Valid + + let valid_assignments: Vec> = task_db_assignments + .iter() + .filter(|assignment| { + if let Some(group_num) = request.assignments[assignment] { + group_num == valid_groups[0] + } else { + false + } + }) + .cloned() + .collect(); + + let invalid_assignments: Vec> = task_db_assignments + .iter() + .filter(|assignment| { + invalid_groups + .iter() + .any(|group_num| *group_num == request.assignments[assignment].unwrap()) + }) + .copied() + .collect(); + + // If any assignments in db have states besides Submitted or Inconclusive and they have been submitted as valid, disallow it. + if valid_assignments.iter().any(|assignment| { + assignments.iter().any(|ass| { + ass.id == *assignment + && ass.state != AssignmentState::Submitted + && ass.state != AssignmentState::Inconclusive + }) || invalid_assignments.iter().any(|assignment| { + assignments.iter().any(|ass| { + ass.id == *assignment + && ass.state != AssignmentState::Submitted + && ass.state != AssignmentState::Inconclusive + }) + }) + }) { + Err(AppError::Specific( + ValidateSubmitError::StateTransitionForbidden, + ))? + } + + if task.canonical_result_id.is_none() { + // If we don't have a valid result yet + sqlx::query_unchecked!( + r#" + UPDATE + tasks + SET + canonical_result_id = + (SELECT + r.id + FROM + results r + JOIN assignments a + ON a.id = r.assignment_id + WHERE a.id = ANY($2) + ORDER BY + r.created_at DESC + LIMIT 1 + ) + WHERE + id = $1 + "#, + task.id, + valid_assignments + ) + .execute(&state.pool) + .await?; + } + set_assignment_state::set_assignment_state( + valid_assignments.as_slice(), + AssignmentState::Valid, + ) + .execute(&state.pool) + .await + .map_not_found(ValidateSubmitError::InvalidAssignment)?; + + // Mark invalid + set_assignment_state::set_assignment_state( + &invalid_assignments, + AssignmentState::Invalid, + ) + .execute(&state.pool) + .await?; + } + if valid_groups.is_empty() && !invalid_groups.is_empty() { + // No groups had a count at least equal to quorum + // Inconclusive + + // Cannot be inconclusive if a result is canonical already. Either it's valid or it's not. + if task.canonical_result_id.is_some() { + Err(AppError::Specific( + ValidateSubmitError::InvalidAssignmentState, + ))? + } + + let inconclusive_assignments: Vec> = task_db_assignments + .iter() + .filter(|assignment| { + invalid_groups + .iter() + .any(|group_num| *group_num == request.assignments[assignment].unwrap()) + }) + .copied() + .collect(); + + let mut max_count = 0; + for (_, count) in group_map { + if count > max_count { + max_count = count; + } + } + sqlx::query_unchecked!( + r#" + UPDATE tasks + SET assignments_needed = assignments_needed + $2 + WHERE + id = $1 + "#, + task.id, + task.quorum - (errored_assignments.len() as i32 + max_count) + ) + .execute(&state.pool) + .await?; + + // Mark assignments as inconclusive + set_assignment_state(&inconclusive_assignments, AssignmentState::Inconclusive) + .execute(&state.pool) + .await?; + + //Mark assignments as errored + set_assignment_state(&errored_assignments, AssignmentState::Error) + .execute(&state.pool) + .await?; + } + } + + Ok(()) +} From 8f4741150b49e712f4df870e0cd232b4ef209dc3 Mon Sep 17 00:00:00 2001 From: BoySanic Date: Fri, 23 May 2025 10:28:56 -0700 Subject: [PATCH 02/29] Mild refactor of validate_submit --- common/src/errors/validate_submit_err.rs | 10 +-- server/src/routes/validate_submit.rs | 93 +++++++++++++----------- 2 files changed, 54 insertions(+), 49 deletions(-) diff --git a/common/src/errors/validate_submit_err.rs b/common/src/errors/validate_submit_err.rs index 66e31a7..943761b 100644 --- a/common/src/errors/validate_submit_err.rs +++ b/common/src/errors/validate_submit_err.rs @@ -5,12 +5,10 @@ use thiserror::Error; pub enum ValidateSubmitError { #[error("invalid assignment")] InvalidAssignment, - #[error("invalid assignment state")] - InvalidAssignmentState, - #[error("provided assignments needed value out of bounds")] - AssignmentsNeededOutOfBounds, - #[error("result count is less than quorum")] - ResultCountLessThanQuorum, + #[error("task already validated and this result is not valid")] + InconsistentValidationState, + #[error("too many groups meeting quorum were provided")] + ValidityAmbiguous, #[error("state transition forbidden")] StateTransitionForbidden, } diff --git a/server/src/routes/validate_submit.rs b/server/src/routes/validate_submit.rs index c57e884..a3d3c7e 100644 --- a/server/src/routes/validate_submit.rs +++ b/server/src/routes/validate_submit.rs @@ -23,13 +23,13 @@ pub async fn validate_submit( let assignments = sqlx::query_as_unchecked!( Assignment, r#" - SELECT - * - FROM - assignments - WHERE - id = ANY($1) - "#, + SELECT + * + FROM + assignments + WHERE + id = ANY($1) + "#, assignment_ids ) .fetch_all(&state.pool) @@ -43,6 +43,7 @@ pub async fn validate_submit( task_ids.sort(); task_ids.dedup(); + // Generate a map of tasks to assignments with that task_id for use in the next loop let task_assignment_map = assignments.iter().fold( HashMap::new(), |mut map: HashMap, Vec>>, assignment| { @@ -53,22 +54,26 @@ pub async fn validate_submit( }, ); - for task_id in task_assignment_map.keys() { + // Loop through all tasks that have been submitted for validation via their assignments + // This avoids a MixedTasks error as we simply error with "too few results" instead for that particular task + for (task_id, task_assignments) in task_assignment_map.iter() { let task = Select::select_one(*task_id).fetch_one(&state.pool).await?; - let task_db_assignments = task_assignment_map[task_id].clone(); - let mut group_map: HashMap = HashMap::new(); let mut errored_assignments: Vec> = Vec::new(); - for assignment in task_db_assignments.iter() { - let group_num = request.assignments[assignment]; - if group_num.is_some() { - *group_map.entry(group_num.unwrap()).or_insert(0) += 1; - } else { + + // Get group number and count of assignments in each group + for assignment in task_assignments.iter() { + let group_num_option = request.assignments[assignment]; + + match group_num_option { + // Task has a group number, so use it. + Some(group_num) => *group_map.entry(group_num).or_insert(0) += 1, // Task errored, so add one to assignments needed - errored_assignments.push(*assignment); + None => errored_assignments.push(*assignment), } } + let valid_groups: Vec = group_map .iter() .filter(|kvp| *kvp.1 >= task.quorum) @@ -83,11 +88,12 @@ pub async fn validate_submit( if valid_groups.len() > 1 { // Cannot have more than one valid group - this is inconsistent + Err(AppError::Specific(ValidateSubmitError::ValidityAmbiguous))? } if valid_groups.len() == 1 || task.canonical_result_id.is_some() { // Valid - let valid_assignments: Vec> = task_db_assignments + let valid_assignments: Vec> = task_assignments .iter() .filter(|assignment| { if let Some(group_num) = request.assignments[assignment] { @@ -99,7 +105,7 @@ pub async fn validate_submit( .cloned() .collect(); - let invalid_assignments: Vec> = task_db_assignments + let invalid_assignments: Vec> = task_assignments .iter() .filter(|assignment| { invalid_groups @@ -132,24 +138,25 @@ pub async fn validate_submit( // If we don't have a valid result yet sqlx::query_unchecked!( r#" - UPDATE - tasks - SET - canonical_result_id = - (SELECT - r.id - FROM - results r - JOIN assignments a - ON a.id = r.assignment_id - WHERE a.id = ANY($2) - ORDER BY - r.created_at DESC - LIMIT 1 - ) - WHERE - id = $1 - "#, + UPDATE + tasks + SET + canonical_result_id = + ( + SELECT + r.id + FROM + results r + JOIN assignments a + ON a.id = r.assignment_id + WHERE a.id = ANY($2) + ORDER BY + r.created_at DESC + LIMIT 1 + ) + WHERE + id = $1 + "#, task.id, valid_assignments ) @@ -179,11 +186,11 @@ pub async fn validate_submit( // Cannot be inconclusive if a result is canonical already. Either it's valid or it's not. if task.canonical_result_id.is_some() { Err(AppError::Specific( - ValidateSubmitError::InvalidAssignmentState, + ValidateSubmitError::InconsistentValidationState, ))? } - let inconclusive_assignments: Vec> = task_db_assignments + let inconclusive_assignments: Vec> = task_assignments .iter() .filter(|assignment| { invalid_groups @@ -201,11 +208,11 @@ pub async fn validate_submit( } sqlx::query_unchecked!( r#" - UPDATE tasks - SET assignments_needed = assignments_needed + $2 - WHERE - id = $1 - "#, + UPDATE tasks + SET assignments_needed = assignments_needed + $2 + WHERE + id = $1 + "#, task.id, task.quorum - (errored_assignments.len() as i32 + max_count) ) From 1c5f22e3d637e641e2bfa4f29fe65125316e0674 Mon Sep 17 00:00:00 2001 From: BoySanic Date: Fri, 23 May 2025 10:41:22 -0700 Subject: [PATCH 03/29] Fix bug in validate_fetch since we're not using a vec of project_ids anymore --- common/src/requests/mod.rs | 2 - common/src/requests/validate_fetch_request.rs | 8 --- server/src/routes/validate_fetch.rs | 70 ++++++++++--------- 3 files changed, 38 insertions(+), 42 deletions(-) delete mode 100644 common/src/requests/validate_fetch_request.rs diff --git a/common/src/requests/mod.rs b/common/src/requests/mod.rs index e9c0a80..6bbfe7e 100644 --- a/common/src/requests/mod.rs +++ b/common/src/requests/mod.rs @@ -1,11 +1,9 @@ pub mod fetch_tasks_request; pub mod register_request; pub mod submit_result_request; -pub mod validate_fetch_request; pub mod validate_submit_request; pub use fetch_tasks_request::FetchTasksRequest; pub use register_request::RegisterRequest; pub use submit_result_request::SubmitResultRequest; -pub use validate_fetch_request::ValidateFetchRequest; pub use validate_submit_request::ValidateSubmitRequest; diff --git a/common/src/requests/validate_fetch_request.rs b/common/src/requests/validate_fetch_request.rs deleted file mode 100644 index 2bba424..0000000 --- a/common/src/requests/validate_fetch_request.rs +++ /dev/null @@ -1,8 +0,0 @@ -use serde::{Deserialize, Serialize}; - -use crate::{records::Project, types::Id}; - -#[derive(Clone, Hash, Debug, Serialize, Deserialize)] -pub struct ValidateFetchRequest { - pub project_ids: Vec>, -} diff --git a/server/src/routes/validate_fetch.rs b/server/src/routes/validate_fetch.rs index 388efc5..6245798 100644 --- a/server/src/routes/validate_fetch.rs +++ b/server/src/routes/validate_fetch.rs @@ -5,24 +5,24 @@ use axum::{ use clusterizer_common::{ errors::ValidateFetchError, records::{Project, Task}, - requests::ValidateFetchRequest, types::Id, }; use crate::{ - result::{AppResult, ResultExt}, + result::{AppError, AppResult, ResultExt}, state::AppState, }; pub async fn validate_fetch( State(state): State, Path(project_id): Path>, - Json(request): Json, ) -> AppResult>, ValidateFetchError> { - sqlx::query_scalar_unchecked!( + let mut tx = state.pool.begin().await?; + let project_result = sqlx::query_as_unchecked!( + Project, r#" SELECT - count(*) as "count!" + * FROM projects WHERE @@ -30,34 +30,40 @@ pub async fn validate_fetch( "#, project_id ) - .fetch_one(&state.pool) - .await - .map_not_found(ValidateFetchError::InvalidProject)?; + .fetch_optional(&mut *tx) + .await?; - let task = sqlx::query_as_unchecked!( - Task, - r#" - SELECT - t.* - FROM - tasks t - JOIN assignments a ON - a.task_id = t.id - WHERE - a.state not in ('canceled', 'expired') - GROUP BY - t.id - HAVING - t.project_id = ANY($1) - AND ( - count(a.id) >= t.assignments_needed - OR t.canonical_result_id IS NOT NULL + match project_result{ + Some(project) => { + let task = sqlx::query_as_unchecked!( + Task, + r#" + SELECT + t.* + FROM + tasks t + JOIN assignments a ON + a.task_id = t.id + WHERE + a.state not in ('canceled', 'expired') + GROUP BY + t.id + HAVING + t.project_id = $1 + AND ( + count(a.id) >= t.assignments_needed + OR t.canonical_result_id IS NOT NULL + ) + "#, + project.id ) - "#, - request.project_ids, - ) - .fetch_all(&state.pool) - .await?; + .fetch_all(&state.pool) + .await?; + tx.commit().await?; + Ok(Json(task.into_iter().collect())) + }, + None => Err(AppError::Specific(ValidateFetchError::InvalidProject)) + } - Ok(Json(task.into_iter().collect())) + } From 8944ccf3e90fa0df032a85daf08cf9412a9ca284 Mon Sep 17 00:00:00 2001 From: BoySanic Date: Fri, 23 May 2025 10:42:23 -0700 Subject: [PATCH 04/29] fixup! Fix bug in validate_fetch since we're not using a vec of project_ids anymore --- ...f0e211625d90dad1d3d07dd17fcf1e817b9f.json} | 4 +- ...6f417b321a029483787bc505297e0de325641.json | 15 +++++++ ...d27ae319fe5ef20c765e854d9651d6f492d65.json | 15 ------- ...5e0f013f4440bd4bb4ca1c8f57f1c78ea21c.json} | 6 +-- ...503ac7ff74c04bed3c64f73331477298b8a1c.json | 22 ---------- ...befe495c216b5f5aee18a5812b41c57f0eb4f.json | 15 +++++++ ...b829c0a9d46d6dc4aa18e76f9fd309e207d54.json | 40 +++++++++++++++++++ ...2c6ffe7554aef3f4f959cab1d6073882e22d4.json | 15 ------- server/src/routes/validate_fetch.rs | 28 ++++++------- 9 files changed, 88 insertions(+), 72 deletions(-) rename server/.sqlx/{query-5a7e0d775074335094b4e5e13fe9628db2f65fd407f22b21595bfb854c6e62fd.json => query-54a247ee9e473bf6e4b0de3035c0f0e211625d90dad1d3d07dd17fcf1e817b9f.json} (78%) create mode 100644 server/.sqlx/query-59a324ac031640e3b5f37b9be0e6f417b321a029483787bc505297e0de325641.json delete mode 100644 server/.sqlx/query-a22d963ca9e84a0a720221c2d58d27ae319fe5ef20c765e854d9651d6f492d65.json rename server/.sqlx/{query-e2c618e5823ea3f5483206317d7bdd3304bf58ea3b94ddf5e8590663c3d51ce6.json => query-cb502560268c20c4f10f9791bd265e0f013f4440bd4bb4ca1c8f57f1c78ea21c.json} (58%) delete mode 100644 server/.sqlx/query-cf1452bafd1a04e7d3786d423fe503ac7ff74c04bed3c64f73331477298b8a1c.json create mode 100644 server/.sqlx/query-e50af1fefea907cbd07f1e038aebefe495c216b5f5aee18a5812b41c57f0eb4f.json create mode 100644 server/.sqlx/query-f7b43273fca1553daea58cb93f9b829c0a9d46d6dc4aa18e76f9fd309e207d54.json delete mode 100644 server/.sqlx/query-fccf0d20f4d2c595f43e62104422c6ffe7554aef3f4f959cab1d6073882e22d4.json diff --git a/server/.sqlx/query-5a7e0d775074335094b4e5e13fe9628db2f65fd407f22b21595bfb854c6e62fd.json b/server/.sqlx/query-54a247ee9e473bf6e4b0de3035c0f0e211625d90dad1d3d07dd17fcf1e817b9f.json similarity index 78% rename from server/.sqlx/query-5a7e0d775074335094b4e5e13fe9628db2f65fd407f22b21595bfb854c6e62fd.json rename to server/.sqlx/query-54a247ee9e473bf6e4b0de3035c0f0e211625d90dad1d3d07dd17fcf1e817b9f.json index fd28c2b..9fa1e26 100644 --- a/server/.sqlx/query-5a7e0d775074335094b4e5e13fe9628db2f65fd407f22b21595bfb854c6e62fd.json +++ b/server/.sqlx/query-54a247ee9e473bf6e4b0de3035c0f0e211625d90dad1d3d07dd17fcf1e817b9f.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n *\n FROM\n assignments\n WHERE\n id = ANY($1)\n ", + "query": "\n SELECT\n *\n FROM\n assignments\n WHERE\n id = ANY($1)\n ", "describe": { "columns": [ { @@ -58,5 +58,5 @@ false ] }, - "hash": "5a7e0d775074335094b4e5e13fe9628db2f65fd407f22b21595bfb854c6e62fd" + "hash": "54a247ee9e473bf6e4b0de3035c0f0e211625d90dad1d3d07dd17fcf1e817b9f" } diff --git a/server/.sqlx/query-59a324ac031640e3b5f37b9be0e6f417b321a029483787bc505297e0de325641.json b/server/.sqlx/query-59a324ac031640e3b5f37b9be0e6f417b321a029483787bc505297e0de325641.json new file mode 100644 index 0000000..95d4ee1 --- /dev/null +++ b/server/.sqlx/query-59a324ac031640e3b5f37b9be0e6f417b321a029483787bc505297e0de325641.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE tasks\n SET assignments_needed = assignments_needed + $2\n WHERE\n id = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "59a324ac031640e3b5f37b9be0e6f417b321a029483787bc505297e0de325641" +} diff --git a/server/.sqlx/query-a22d963ca9e84a0a720221c2d58d27ae319fe5ef20c765e854d9651d6f492d65.json b/server/.sqlx/query-a22d963ca9e84a0a720221c2d58d27ae319fe5ef20c765e854d9651d6f492d65.json deleted file mode 100644 index 5450a5d..0000000 --- a/server/.sqlx/query-a22d963ca9e84a0a720221c2d58d27ae319fe5ef20c765e854d9651d6f492d65.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tasks\n SET assignments_needed = assignments_needed + $2\n WHERE\n id = $1\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int4" - ] - }, - "nullable": [] - }, - "hash": "a22d963ca9e84a0a720221c2d58d27ae319fe5ef20c765e854d9651d6f492d65" -} diff --git a/server/.sqlx/query-e2c618e5823ea3f5483206317d7bdd3304bf58ea3b94ddf5e8590663c3d51ce6.json b/server/.sqlx/query-cb502560268c20c4f10f9791bd265e0f013f4440bd4bb4ca1c8f57f1c78ea21c.json similarity index 58% rename from server/.sqlx/query-e2c618e5823ea3f5483206317d7bdd3304bf58ea3b94ddf5e8590663c3d51ce6.json rename to server/.sqlx/query-cb502560268c20c4f10f9791bd265e0f013f4440bd4bb4ca1c8f57f1c78ea21c.json index e6ed6c3..327038f 100644 --- a/server/.sqlx/query-e2c618e5823ea3f5483206317d7bdd3304bf58ea3b94ddf5e8590663c3d51ce6.json +++ b/server/.sqlx/query-cb502560268c20c4f10f9791bd265e0f013f4440bd4bb4ca1c8f57f1c78ea21c.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n t.*\n FROM\n tasks t\n JOIN assignments a ON\n a.task_id = t.id\n WHERE\n a.state not in ('canceled', 'expired')\n GROUP BY\n t.id\n HAVING\n t.project_id = ANY($1)\n AND (\n count(a.id) >= t.assignments_needed\n OR t.canonical_result_id IS NOT NULL\n )\n ", + "query": "\n SELECT\n t.*\n FROM\n tasks t\n JOIN assignments a ON\n a.task_id = t.id\n WHERE\n a.state not in ('canceled', 'expired')\n GROUP BY\n t.id\n HAVING\n t.project_id = $1\n AND (\n count(a.id) >= t.assignments_needed\n OR t.canonical_result_id IS NOT NULL\n )\n ", "describe": { "columns": [ { @@ -46,7 +46,7 @@ ], "parameters": { "Left": [ - "Int8Array" + "Int8" ] }, "nullable": [ @@ -60,5 +60,5 @@ false ] }, - "hash": "e2c618e5823ea3f5483206317d7bdd3304bf58ea3b94ddf5e8590663c3d51ce6" + "hash": "cb502560268c20c4f10f9791bd265e0f013f4440bd4bb4ca1c8f57f1c78ea21c" } diff --git a/server/.sqlx/query-cf1452bafd1a04e7d3786d423fe503ac7ff74c04bed3c64f73331477298b8a1c.json b/server/.sqlx/query-cf1452bafd1a04e7d3786d423fe503ac7ff74c04bed3c64f73331477298b8a1c.json deleted file mode 100644 index 208aebe..0000000 --- a/server/.sqlx/query-cf1452bafd1a04e7d3786d423fe503ac7ff74c04bed3c64f73331477298b8a1c.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n count(*) as \"count!\"\n FROM\n projects\n WHERE\n id = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "count!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - null - ] - }, - "hash": "cf1452bafd1a04e7d3786d423fe503ac7ff74c04bed3c64f73331477298b8a1c" -} diff --git a/server/.sqlx/query-e50af1fefea907cbd07f1e038aebefe495c216b5f5aee18a5812b41c57f0eb4f.json b/server/.sqlx/query-e50af1fefea907cbd07f1e038aebefe495c216b5f5aee18a5812b41c57f0eb4f.json new file mode 100644 index 0000000..60b31f6 --- /dev/null +++ b/server/.sqlx/query-e50af1fefea907cbd07f1e038aebefe495c216b5f5aee18a5812b41c57f0eb4f.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE \n tasks\n SET \n canonical_result_id = \n (\n SELECT \n r.id \n FROM \n results r\n JOIN assignments a\n ON a.id = r.assignment_id\n WHERE a.id = ANY($2)\n ORDER BY \n r.created_at DESC \n LIMIT 1\n )\n WHERE\n id = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8Array" + ] + }, + "nullable": [] + }, + "hash": "e50af1fefea907cbd07f1e038aebefe495c216b5f5aee18a5812b41c57f0eb4f" +} diff --git a/server/.sqlx/query-f7b43273fca1553daea58cb93f9b829c0a9d46d6dc4aa18e76f9fd309e207d54.json b/server/.sqlx/query-f7b43273fca1553daea58cb93f9b829c0a9d46d6dc4aa18e76f9fd309e207d54.json new file mode 100644 index 0000000..6cdadc0 --- /dev/null +++ b/server/.sqlx/query-f7b43273fca1553daea58cb93f9b829c0a9d46d6dc4aa18e76f9fd309e207d54.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n *\n FROM\n projects\n WHERE\n id = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 2, + "name": "disabled_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 3, + "name": "name", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + false, + true, + false + ] + }, + "hash": "f7b43273fca1553daea58cb93f9b829c0a9d46d6dc4aa18e76f9fd309e207d54" +} diff --git a/server/.sqlx/query-fccf0d20f4d2c595f43e62104422c6ffe7554aef3f4f959cab1d6073882e22d4.json b/server/.sqlx/query-fccf0d20f4d2c595f43e62104422c6ffe7554aef3f4f959cab1d6073882e22d4.json deleted file mode 100644 index 6f23323..0000000 --- a/server/.sqlx/query-fccf0d20f4d2c595f43e62104422c6ffe7554aef3f4f959cab1d6073882e22d4.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE \n tasks\n SET \n canonical_result_id = \n (SELECT \n r.id \n FROM \n results r\n JOIN assignments a\n ON a.id = r.assignment_id\n WHERE a.id = ANY($2)\n ORDER BY \n r.created_at DESC \n LIMIT 1\n )\n WHERE\n id = $1\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8Array" - ] - }, - "nullable": [] - }, - "hash": "fccf0d20f4d2c595f43e62104422c6ffe7554aef3f4f959cab1d6073882e22d4" -} diff --git a/server/src/routes/validate_fetch.rs b/server/src/routes/validate_fetch.rs index 6245798..201d7f8 100644 --- a/server/src/routes/validate_fetch.rs +++ b/server/src/routes/validate_fetch.rs @@ -9,7 +9,7 @@ use clusterizer_common::{ }; use crate::{ - result::{AppError, AppResult, ResultExt}, + result::{AppError, AppResult}, state::AppState, }; @@ -33,11 +33,11 @@ pub async fn validate_fetch( .fetch_optional(&mut *tx) .await?; - match project_result{ + match project_result { Some(project) => { - let task = sqlx::query_as_unchecked!( - Task, - r#" + let task = sqlx::query_as_unchecked!( + Task, + r#" SELECT t.* FROM @@ -55,15 +55,13 @@ pub async fn validate_fetch( OR t.canonical_result_id IS NOT NULL ) "#, - project.id - ) - .fetch_all(&state.pool) - .await?; - tx.commit().await?; - Ok(Json(task.into_iter().collect())) - }, - None => Err(AppError::Specific(ValidateFetchError::InvalidProject)) + project.id + ) + .fetch_all(&state.pool) + .await?; + tx.commit().await?; + Ok(Json(task.into_iter().collect())) + } + None => Err(AppError::Specific(ValidateFetchError::InvalidProject)), } - - } From f900b30254799892d770a2134f0f391612b6173d Mon Sep 17 00:00:00 2001 From: BoySanic Date: Sat, 24 May 2025 00:13:49 -0700 Subject: [PATCH 05/29] Implement dutchen's refactor suggestions --- .gitignore | 1 - common/src/errors/validate_submit_err.rs | 2 + common/src/records/assignment.rs | 2 +- ...7b9f00627ee7fdbc36d923db1cdf7f06aedf.json} | 4 +- ...f94f224b083c68ff691b6bbeffdb171a83a47.json | 15 ++ ...6f417b321a029483787bc505297e0de325641.json | 15 -- ...f388ef07ce490f99c4a9127e30153eb840824.json | 15 ++ ...1a5f19cd315d0a1881afb695861f4221cf4ca.json | 14 + ...41deed46496723b2ee94260ec1721cf8e8187.json | 52 ++++ ...befe495c216b5f5aee18a5812b41c57f0eb4f.json | 15 -- server/src/main.rs | 11 +- server/src/routes/mod.rs | 6 + server/src/routes/validate_fetch.rs | 66 +++-- server/src/routes/validate_submit.rs | 249 +++++++++--------- 14 files changed, 264 insertions(+), 203 deletions(-) rename server/.sqlx/{query-cb502560268c20c4f10f9791bd265e0f013f4440bd4bb4ca1c8f57f1c78ea21c.json => query-10780f6fbc25a26cb46dec7bb5d47b9f00627ee7fdbc36d923db1cdf7f06aedf.json} (59%) create mode 100644 server/.sqlx/query-237bcc75f090ec14f8703b28013f94f224b083c68ff691b6bbeffdb171a83a47.json delete mode 100644 server/.sqlx/query-59a324ac031640e3b5f37b9be0e6f417b321a029483787bc505297e0de325641.json create mode 100644 server/.sqlx/query-83491616ec19de1b1ac1a01dc64f388ef07ce490f99c4a9127e30153eb840824.json create mode 100644 server/.sqlx/query-86323f9f5243c3f94a679b49c361a5f19cd315d0a1881afb695861f4221cf4ca.json create mode 100644 server/.sqlx/query-a6f03cff43feaa8d19fdf3f010741deed46496723b2ee94260ec1721cf8e8187.json delete mode 100644 server/.sqlx/query-e50af1fefea907cbd07f1e038aebefe495c216b5f5aee18a5812b41c57f0eb4f.json diff --git a/.gitignore b/.gitignore index 62dee43..860734b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,2 @@ /target /server/.env -.env \ No newline at end of file diff --git a/common/src/errors/validate_submit_err.rs b/common/src/errors/validate_submit_err.rs index 943761b..6f18ec4 100644 --- a/common/src/errors/validate_submit_err.rs +++ b/common/src/errors/validate_submit_err.rs @@ -7,6 +7,8 @@ pub enum ValidateSubmitError { InvalidAssignment, #[error("task already validated and this result is not valid")] InconsistentValidationState, + #[error("multi-task validation in a single request is currently not implemented")] + MultipleTasksDisallowed, #[error("too many groups meeting quorum were provided")] ValidityAmbiguous, #[error("state transition forbidden")] diff --git a/common/src/records/assignment.rs b/common/src/records/assignment.rs index fab6ddd..b96d617 100644 --- a/common/src/records/assignment.rs +++ b/common/src/records/assignment.rs @@ -5,7 +5,7 @@ use crate::types::{AssignmentState, Id}; use super::{Task, User}; -#[derive(Clone, Copy, Hash, Debug, Serialize, Deserialize)] +#[derive(Clone, Hash, Debug, Serialize, Deserialize)] pub struct Assignment { pub id: Id, pub created_at: DateTime, diff --git a/server/.sqlx/query-cb502560268c20c4f10f9791bd265e0f013f4440bd4bb4ca1c8f57f1c78ea21c.json b/server/.sqlx/query-10780f6fbc25a26cb46dec7bb5d47b9f00627ee7fdbc36d923db1cdf7f06aedf.json similarity index 59% rename from server/.sqlx/query-cb502560268c20c4f10f9791bd265e0f013f4440bd4bb4ca1c8f57f1c78ea21c.json rename to server/.sqlx/query-10780f6fbc25a26cb46dec7bb5d47b9f00627ee7fdbc36d923db1cdf7f06aedf.json index 327038f..7362358 100644 --- a/server/.sqlx/query-cb502560268c20c4f10f9791bd265e0f013f4440bd4bb4ca1c8f57f1c78ea21c.json +++ b/server/.sqlx/query-10780f6fbc25a26cb46dec7bb5d47b9f00627ee7fdbc36d923db1cdf7f06aedf.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n t.*\n FROM\n tasks t\n JOIN assignments a ON\n a.task_id = t.id\n WHERE\n a.state not in ('canceled', 'expired')\n GROUP BY\n t.id\n HAVING\n t.project_id = $1\n AND (\n count(a.id) >= t.assignments_needed\n OR t.canonical_result_id IS NOT NULL\n )\n ", + "query": "\n SELECT\n t.*\n FROM\n tasks t\n JOIN assignments a ON\n a.task_id = t.id\n WHERE\n a.state not in ('canceled', 'init', 'expired')\n GROUP BY\n t.id\n HAVING\n t.project_id = $1\n AND (\n count(a.id) >= t.assignments_needed\n OR t.canonical_result_id IS NOT NULL\n )\n ", "describe": { "columns": [ { @@ -60,5 +60,5 @@ false ] }, - "hash": "cb502560268c20c4f10f9791bd265e0f013f4440bd4bb4ca1c8f57f1c78ea21c" + "hash": "10780f6fbc25a26cb46dec7bb5d47b9f00627ee7fdbc36d923db1cdf7f06aedf" } diff --git a/server/.sqlx/query-237bcc75f090ec14f8703b28013f94f224b083c68ff691b6bbeffdb171a83a47.json b/server/.sqlx/query-237bcc75f090ec14f8703b28013f94f224b083c68ff691b6bbeffdb171a83a47.json new file mode 100644 index 0000000..01acf58 --- /dev/null +++ b/server/.sqlx/query-237bcc75f090ec14f8703b28013f94f224b083c68ff691b6bbeffdb171a83a47.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE \n tasks\n SET \n assignments_needed = assignments_needed + $2\n WHERE\n id = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "237bcc75f090ec14f8703b28013f94f224b083c68ff691b6bbeffdb171a83a47" +} diff --git a/server/.sqlx/query-59a324ac031640e3b5f37b9be0e6f417b321a029483787bc505297e0de325641.json b/server/.sqlx/query-59a324ac031640e3b5f37b9be0e6f417b321a029483787bc505297e0de325641.json deleted file mode 100644 index 95d4ee1..0000000 --- a/server/.sqlx/query-59a324ac031640e3b5f37b9be0e6f417b321a029483787bc505297e0de325641.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tasks\n SET assignments_needed = assignments_needed + $2\n WHERE\n id = $1\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int4" - ] - }, - "nullable": [] - }, - "hash": "59a324ac031640e3b5f37b9be0e6f417b321a029483787bc505297e0de325641" -} diff --git a/server/.sqlx/query-83491616ec19de1b1ac1a01dc64f388ef07ce490f99c4a9127e30153eb840824.json b/server/.sqlx/query-83491616ec19de1b1ac1a01dc64f388ef07ce490f99c4a9127e30153eb840824.json new file mode 100644 index 0000000..3fea3bc --- /dev/null +++ b/server/.sqlx/query-83491616ec19de1b1ac1a01dc64f388ef07ce490f99c4a9127e30153eb840824.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE \n tasks\n SET \n canonical_result_id = $2\n WHERE\n id = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "83491616ec19de1b1ac1a01dc64f388ef07ce490f99c4a9127e30153eb840824" +} diff --git a/server/.sqlx/query-86323f9f5243c3f94a679b49c361a5f19cd315d0a1881afb695861f4221cf4ca.json b/server/.sqlx/query-86323f9f5243c3f94a679b49c361a5f19cd315d0a1881afb695861f4221cf4ca.json new file mode 100644 index 0000000..a91e0e5 --- /dev/null +++ b/server/.sqlx/query-86323f9f5243c3f94a679b49c361a5f19cd315d0a1881afb695861f4221cf4ca.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE \n tasks\n SET \n assignments_needed = assignments_needed + 1\n WHERE\n id = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "86323f9f5243c3f94a679b49c361a5f19cd315d0a1881afb695861f4221cf4ca" +} diff --git a/server/.sqlx/query-a6f03cff43feaa8d19fdf3f010741deed46496723b2ee94260ec1721cf8e8187.json b/server/.sqlx/query-a6f03cff43feaa8d19fdf3f010741deed46496723b2ee94260ec1721cf8e8187.json new file mode 100644 index 0000000..9d2afd0 --- /dev/null +++ b/server/.sqlx/query-a6f03cff43feaa8d19fdf3f010741deed46496723b2ee94260ec1721cf8e8187.json @@ -0,0 +1,52 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT \n *\n FROM \n results r\n WHERE \n r.assignment_id = ANY($1)\n ORDER BY \n r.created_at ASC\n LIMIT 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 2, + "name": "assignment_id", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "stdout", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "stderr", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "exit_code", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Int8Array" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + true + ] + }, + "hash": "a6f03cff43feaa8d19fdf3f010741deed46496723b2ee94260ec1721cf8e8187" +} diff --git a/server/.sqlx/query-e50af1fefea907cbd07f1e038aebefe495c216b5f5aee18a5812b41c57f0eb4f.json b/server/.sqlx/query-e50af1fefea907cbd07f1e038aebefe495c216b5f5aee18a5812b41c57f0eb4f.json deleted file mode 100644 index 60b31f6..0000000 --- a/server/.sqlx/query-e50af1fefea907cbd07f1e038aebefe495c216b5f5aee18a5812b41c57f0eb4f.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE \n tasks\n SET \n canonical_result_id = \n (\n SELECT \n r.id \n FROM \n results r\n JOIN assignments a\n ON a.id = r.assignment_id\n WHERE a.id = ANY($2)\n ORDER BY \n r.created_at DESC \n LIMIT 1\n )\n WHERE\n id = $1\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8Array" - ] - }, - "nullable": [] - }, - "hash": "e50af1fefea907cbd07f1e038aebefe495c216b5f5aee18a5812b41c57f0eb4f" -} diff --git a/server/src/main.rs b/server/src/main.rs index 6cb4232..226cc75 100644 --- a/server/src/main.rs +++ b/server/src/main.rs @@ -15,7 +15,6 @@ use clusterizer_common::records::{ }; use routes::*; - use sqlx::PgPool; use state::AppState; use tokio::{net::TcpListener, time}; @@ -56,11 +55,11 @@ async fn serve_task(state: AppState, address: String) { .route("/assignments/{id}", get(get_one::)) .route("/results", get(get_all::)) .route("/results/{id}", get(get_one::)) - .route("/register", post(register::register)) - .route("/fetch_tasks", post(fetch_tasks::fetch_tasks)) - .route("/submit_result/{id}", post(submit_result::submit_result)) - .route("/validate_fetch/{id}", get(validate_fetch::validate_fetch)) - .route("/validate_submit", post(validate_submit::validate_submit)) + .route("/register", post(routes::register)) + .route("/fetch_tasks", post(routes::fetch_tasks)) + .route("/submit_result/{id}", post(routes::submit_result)) + .route("/validate_fetch/{id}", get(routes::validate_fetch)) + .route("/validate_submit", post(routes::validate_submit)) .with_state(state); let listener = TcpListener::bind(address).await.unwrap(); diff --git a/server/src/routes/mod.rs b/server/src/routes/mod.rs index a2e8a55..2bdf9e2 100644 --- a/server/src/routes/mod.rs +++ b/server/src/routes/mod.rs @@ -19,6 +19,12 @@ pub mod submit_result; pub mod validate_fetch; pub mod validate_submit; +pub use fetch_tasks::fetch_tasks; +pub use register::register; +pub use submit_result::submit_result; +pub use validate_fetch::validate_fetch; +pub use validate_submit::validate_submit; + pub async fn get_all( State(state): State, Query(filter): Query, diff --git a/server/src/routes/validate_fetch.rs b/server/src/routes/validate_fetch.rs index 201d7f8..9817b3e 100644 --- a/server/src/routes/validate_fetch.rs +++ b/server/src/routes/validate_fetch.rs @@ -9,7 +9,7 @@ use clusterizer_common::{ }; use crate::{ - result::{AppError, AppResult}, + result::{AppResult, ResultExt}, state::AppState, }; @@ -17,8 +17,7 @@ pub async fn validate_fetch( State(state): State, Path(project_id): Path>, ) -> AppResult>, ValidateFetchError> { - let mut tx = state.pool.begin().await?; - let project_result = sqlx::query_as_unchecked!( + let project = sqlx::query_as_unchecked!( Project, r#" SELECT @@ -30,38 +29,33 @@ pub async fn validate_fetch( "#, project_id ) - .fetch_optional(&mut *tx) - .await?; + .fetch_one(&state.pool) + .await + .map_not_found(ValidateFetchError::InvalidProject)?; - match project_result { - Some(project) => { - let task = sqlx::query_as_unchecked!( - Task, - r#" - SELECT - t.* - FROM - tasks t - JOIN assignments a ON - a.task_id = t.id - WHERE - a.state not in ('canceled', 'expired') - GROUP BY - t.id - HAVING - t.project_id = $1 - AND ( - count(a.id) >= t.assignments_needed - OR t.canonical_result_id IS NOT NULL - ) - "#, - project.id - ) - .fetch_all(&state.pool) - .await?; - tx.commit().await?; - Ok(Json(task.into_iter().collect())) - } - None => Err(AppError::Specific(ValidateFetchError::InvalidProject)), - } + let tasks = sqlx::query_as_unchecked!( + Task, + r#" + SELECT + t.* + FROM + tasks t + JOIN assignments a ON + a.task_id = t.id + WHERE + a.state not in ('canceled', 'init', 'expired') + GROUP BY + t.id + HAVING + t.project_id = $1 + AND ( + count(a.id) >= t.assignments_needed + OR t.canonical_result_id IS NOT NULL + ) + "#, + project.id + ) + .fetch_all(&state.pool) + .await?; + Ok(Json(tasks)) } diff --git a/server/src/routes/validate_submit.rs b/server/src/routes/validate_submit.rs index a3d3c7e..b0654ba 100644 --- a/server/src/routes/validate_submit.rs +++ b/server/src/routes/validate_submit.rs @@ -9,7 +9,7 @@ use clusterizer_common::{ use std::collections::HashMap; use crate::{ - result::{AppError, AppResult, ResultExt}, + result::{AppError, AppResult}, state::AppState, util::{Select, set_assignment_state}, }; @@ -18,7 +18,7 @@ pub async fn validate_submit( State(state): State, Json(request): Json, ) -> AppResult<(), ValidateSubmitError> { - let assignment_ids: Vec> = request.assignments.keys().cloned().collect(); + let assignment_ids: Vec<_> = request.assignments.keys().copied().collect(); let assignments = sqlx::query_as_unchecked!( Assignment, @@ -33,143 +33,117 @@ pub async fn validate_submit( assignment_ids ) .fetch_all(&state.pool) - .await? as Vec; + .await?; if assignment_ids.len() != assignments.len() { Err(AppError::Specific(ValidateSubmitError::InvalidAssignment))? } - let mut task_ids = Vec::from_iter(assignments.iter().map(|assignment| assignment.task_id)); - task_ids.sort(); - task_ids.dedup(); + // Disallow state transitions via validation unless the assignment is one of these states + if assignments.iter().any(|assignment| { + assignment.state != AssignmentState::Submitted + && assignment.state != AssignmentState::Inconclusive + }) { + Err(AppError::Specific( + ValidateSubmitError::StateTransitionForbidden, + ))? + } // Generate a map of tasks to assignments with that task_id for use in the next loop - let task_assignment_map = assignments.iter().fold( - HashMap::new(), - |mut map: HashMap, Vec>>, assignment| { - map.entry(assignment.task_id) - .or_default() - .push(assignment.id); - map - }, - ); + let mut task_assignment_map: HashMap, Vec>> = HashMap::new(); + for assignment in &assignments { + task_assignment_map + .entry(assignment.task_id) + .or_default() + .push(assignment.id); + } + if task_assignment_map.keys().len() > 1 { + Err(AppError::Specific( + ValidateSubmitError::MultipleTasksDisallowed, + ))? + } // Loop through all tasks that have been submitted for validation via their assignments // This avoids a MixedTasks error as we simply error with "too few results" instead for that particular task - for (task_id, task_assignments) in task_assignment_map.iter() { - let task = Select::select_one(*task_id).fetch_one(&state.pool).await?; + for (task_id, task_assignments) in &task_assignment_map { + let task = Task::select_one(*task_id).fetch_one(&state.pool).await?; - let mut group_map: HashMap = HashMap::new(); + let mut group_map: HashMap>> = HashMap::new(); let mut errored_assignments: Vec> = Vec::new(); // Get group number and count of assignments in each group - for assignment in task_assignments.iter() { + for assignment in task_assignments { let group_num_option = request.assignments[assignment]; match group_num_option { // Task has a group number, so use it. - Some(group_num) => *group_map.entry(group_num).or_insert(0) += 1, + Some(group_num) => group_map.entry(group_num).or_default().push(*assignment), // Task errored, so add one to assignments needed None => errored_assignments.push(*assignment), } } - let valid_groups: Vec = group_map + let valid_groups: HashMap>> = group_map .iter() - .filter(|kvp| *kvp.1 >= task.quorum) - .map(|kvp| *kvp.0) + .filter(|(_, assignments)| assignments.len() as i32 >= task.quorum) + .map(|(group_num, assignments)| (*group_num, assignments.clone())) .collect(); - let invalid_groups: Vec = group_map + let invalid_groups: HashMap>> = group_map .iter() - .filter(|kvp| *kvp.1 < task.quorum) - .map(|kvp| *kvp.0) + .filter(|(_, assignments)| (assignments.len() as i32) < task.quorum) + .map(|(group_num, assignments)| (*group_num, assignments.clone())) .collect(); - if valid_groups.len() > 1 { - // Cannot have more than one valid group - this is inconsistent - Err(AppError::Specific(ValidateSubmitError::ValidityAmbiguous))? - } if valid_groups.len() == 1 || task.canonical_result_id.is_some() { // Valid - - let valid_assignments: Vec> = task_assignments - .iter() - .filter(|assignment| { - if let Some(group_num) = request.assignments[assignment] { - group_num == valid_groups[0] - } else { - false - } - }) - .cloned() - .collect(); - - let invalid_assignments: Vec> = task_assignments - .iter() - .filter(|assignment| { - invalid_groups - .iter() - .any(|group_num| *group_num == request.assignments[assignment].unwrap()) - }) - .copied() - .collect(); - - // If any assignments in db have states besides Submitted or Inconclusive and they have been submitted as valid, disallow it. - if valid_assignments.iter().any(|assignment| { - assignments.iter().any(|ass| { - ass.id == *assignment - && ass.state != AssignmentState::Submitted - && ass.state != AssignmentState::Inconclusive - }) || invalid_assignments.iter().any(|assignment| { - assignments.iter().any(|ass| { - ass.id == *assignment - && ass.state != AssignmentState::Submitted - && ass.state != AssignmentState::Inconclusive - }) - }) - }) { - Err(AppError::Specific( - ValidateSubmitError::StateTransitionForbidden, - ))? + let valid_assignments = &valid_groups[&0]; + let mut invalid_assignments = Vec::new(); + for invalid_assignment_ids in invalid_groups.values() { + for invalid_assignment_id in invalid_assignment_ids { + invalid_assignments.push(*invalid_assignment_id); + } } if task.canonical_result_id.is_none() { // If we don't have a valid result yet + + let canonical_result = sqlx::query_as_unchecked!( + clusterizer_common::records::Result, + r#" + SELECT + * + FROM + results r + WHERE + r.assignment_id = ANY($1) + ORDER BY + r.created_at ASC + LIMIT 1 + "#, + valid_assignments + ) + .fetch_one(&state.pool) + .await?; + sqlx::query_unchecked!( r#" UPDATE tasks SET - canonical_result_id = - ( - SELECT - r.id - FROM - results r - JOIN assignments a - ON a.id = r.assignment_id - WHERE a.id = ANY($2) - ORDER BY - r.created_at DESC - LIMIT 1 - ) + canonical_result_id = $2 WHERE id = $1 "#, task.id, - valid_assignments + canonical_result.id ) .execute(&state.pool) .await?; } - set_assignment_state::set_assignment_state( - valid_assignments.as_slice(), - AssignmentState::Valid, - ) - .execute(&state.pool) - .await - .map_not_found(ValidateSubmitError::InvalidAssignment)?; + set_assignment_state::set_assignment_state(valid_assignments, AssignmentState::Valid) + .execute(&state.pool) + .await?; // Mark invalid set_assignment_state::set_assignment_state( @@ -179,8 +153,8 @@ pub async fn validate_submit( .execute(&state.pool) .await?; } - if valid_groups.is_empty() && !invalid_groups.is_empty() { - // No groups had a count at least equal to quorum + if valid_groups.len() > 1 || !invalid_groups.is_empty() { + // Either no groups met quorum, or more than one did and we need to break the tie. // Inconclusive // Cannot be inconclusive if a result is canonical already. Either it's valid or it's not. @@ -189,45 +163,66 @@ pub async fn validate_submit( ValidateSubmitError::InconsistentValidationState, ))? } - - let inconclusive_assignments: Vec> = task_assignments - .iter() - .filter(|assignment| { - invalid_groups - .iter() - .any(|group_num| *group_num == request.assignments[assignment].unwrap()) - }) - .copied() - .collect(); - - let mut max_count = 0; - for (_, count) in group_map { - if count > max_count { - max_count = count; - } - } - sqlx::query_unchecked!( - r#" - UPDATE tasks - SET assignments_needed = assignments_needed + $2 - WHERE - id = $1 - "#, - task.id, - task.quorum - (errored_assignments.len() as i32 + max_count) - ) - .execute(&state.pool) - .await?; - - // Mark assignments as inconclusive - set_assignment_state(&inconclusive_assignments, AssignmentState::Inconclusive) + // If there are two or more valid groups, submit another assignment and hope that breaks the tie. + if valid_groups.len() > 1 { + sqlx::query_unchecked!( + r#" + UPDATE + tasks + SET + assignments_needed = assignments_needed + 1 + WHERE + id = $1 + "#, + task.id, + ) .execute(&state.pool) .await?; + for (_, inconclusive_assignments) in valid_groups { + // Mark assignments as inconclusive + set_assignment_state(&inconclusive_assignments, AssignmentState::Inconclusive) + .execute(&state.pool) + .await?; + } + } else { + let mut invalid_assignments = Vec::new(); + for (_, invalid_assignment_ids) in invalid_groups { + for invalid_assignment_id in invalid_assignment_ids { + invalid_assignments.push(invalid_assignment_id); + } + } - //Mark assignments as errored - set_assignment_state(&errored_assignments, AssignmentState::Error) + let mut max_count = 0; + for (_, assignments) in group_map { + if assignments.len() as i32 > max_count { + max_count = assignments.len() as i32; + } + } + sqlx::query_unchecked!( + r#" + UPDATE + tasks + SET + assignments_needed = assignments_needed + $2 + WHERE + id = $1 + "#, + task.id, + task.quorum - max_count + ) .execute(&state.pool) .await?; + + // Mark assignments as inconclusive + set_assignment_state(&invalid_assignments, AssignmentState::Inconclusive) + .execute(&state.pool) + .await?; + + // Mark assignments as errored + set_assignment_state(&errored_assignments, AssignmentState::Error) + .execute(&state.pool) + .await?; + } } } From 5eb990197b2b1b8edf28ec25c7d4d1279720018f Mon Sep 17 00:00:00 2001 From: BoySanic Date: Mon, 4 Aug 2025 22:31:55 -0700 Subject: [PATCH 06/29] Big refactor to final design as discussed in VC in June --- common/src/errors/validate_submit_err.rs | 10 +- common/src/records/result.rs | 1 + .../src/requests/validate_submit_request.rs | 3 +- ...f94f224b083c68ff691b6bbeffdb171a83a47.json | 15 - ...31e6b9e29f27da54f8ed525dc71e8f3162a0a.json | 6 + ...64fab1d44214a7f136306282a38c25536b22f.json | 14 + ...f388ef07ce490f99c4a9127e30153eb840824.json | 15 - ...1a5f19cd315d0a1881afb695861f4221cf4ca.json | 14 - ...ffd170787045c3233daad045204b9aa53f66c.json | 6 + ...bf6e50cca4edd9b9c2b475201cc208720a2d.json} | 4 +- ...649f020fa9d4c4ed8ac9dd0ec0cd93163ccbc.json | 15 + ...77cd9a47553e3637302bf590b651e161415f6.json | 15 + ...ee527847ded0826f42467a48fa81503c3d5f.json} | 10 +- server/migrations/20250426220809_init.sql | 3 +- server/src/main.rs | 2 +- server/src/routes/validate_fetch.rs | 34 +- server/src/routes/validate_submit.rs | 329 ++++++++++-------- 17 files changed, 275 insertions(+), 221 deletions(-) delete mode 100644 server/.sqlx/query-237bcc75f090ec14f8703b28013f94f224b083c68ff691b6bbeffdb171a83a47.json create mode 100644 server/.sqlx/query-388759d95b0eab3626a0b06330964fab1d44214a7f136306282a38c25536b22f.json delete mode 100644 server/.sqlx/query-83491616ec19de1b1ac1a01dc64f388ef07ce490f99c4a9127e30153eb840824.json delete mode 100644 server/.sqlx/query-86323f9f5243c3f94a679b49c361a5f19cd315d0a1881afb695861f4221cf4ca.json rename server/.sqlx/{query-10780f6fbc25a26cb46dec7bb5d47b9f00627ee7fdbc36d923db1cdf7f06aedf.json => query-9ee75dbb5012a5310b8af01a05b6bf6e50cca4edd9b9c2b475201cc208720a2d.json} (63%) create mode 100644 server/.sqlx/query-b82ba3eb5ab066193b65d2d635d649f020fa9d4c4ed8ac9dd0ec0cd93163ccbc.json create mode 100644 server/.sqlx/query-ba285dca9c9d770e00a3e6817fb77cd9a47553e3637302bf590b651e161415f6.json rename server/.sqlx/{query-a6f03cff43feaa8d19fdf3f010741deed46496723b2ee94260ec1721cf8e8187.json => query-e9abbf6cde84157e48c9c7f3307fee527847ded0826f42467a48fa81503c3d5f.json} (66%) diff --git a/common/src/errors/validate_submit_err.rs b/common/src/errors/validate_submit_err.rs index 6f18ec4..75edca7 100644 --- a/common/src/errors/validate_submit_err.rs +++ b/common/src/errors/validate_submit_err.rs @@ -7,10 +7,12 @@ pub enum ValidateSubmitError { InvalidAssignment, #[error("task already validated and this result is not valid")] InconsistentValidationState, - #[error("multi-task validation in a single request is currently not implemented")] - MultipleTasksDisallowed, - #[error("too many groups meeting quorum were provided")] - ValidityAmbiguous, + #[error("all results are inconclusive, and no new assignment has finished to solve it")] + ValidationImpossibleError, + #[error("validation group contained assignments belonging to multiple tasks")] + ValidationGroupTaskInconsistency, + #[error("assignments referred to by group id cannot refer to an assignment other than itself")] + ValidationGroupAssociationInconsistency, #[error("state transition forbidden")] StateTransitionForbidden, } diff --git a/common/src/records/result.rs b/common/src/records/result.rs index c32e590..d4b2391 100644 --- a/common/src/records/result.rs +++ b/common/src/records/result.rs @@ -13,6 +13,7 @@ pub struct Result { pub stdout: String, pub stderr: String, pub exit_code: Option, + pub group_assignment_id: Option> } #[non_exhaustive] diff --git a/common/src/requests/validate_submit_request.rs b/common/src/requests/validate_submit_request.rs index 13db817..ce1ebcc 100644 --- a/common/src/requests/validate_submit_request.rs +++ b/common/src/requests/validate_submit_request.rs @@ -6,5 +6,6 @@ use crate::{records::Assignment, types::Id}; #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ValidateSubmitRequest { - pub assignments: HashMap, Option>, + //First id is the assignment id that will change state, second is the "group id" it belongs with + pub assignments: HashMap, Id>, } diff --git a/server/.sqlx/query-237bcc75f090ec14f8703b28013f94f224b083c68ff691b6bbeffdb171a83a47.json b/server/.sqlx/query-237bcc75f090ec14f8703b28013f94f224b083c68ff691b6bbeffdb171a83a47.json deleted file mode 100644 index 01acf58..0000000 --- a/server/.sqlx/query-237bcc75f090ec14f8703b28013f94f224b083c68ff691b6bbeffdb171a83a47.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE \n tasks\n SET \n assignments_needed = assignments_needed + $2\n WHERE\n id = $1\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int4" - ] - }, - "nullable": [] - }, - "hash": "237bcc75f090ec14f8703b28013f94f224b083c68ff691b6bbeffdb171a83a47" -} diff --git a/server/.sqlx/query-2daeacfdb74c4d12e2e5801d58931e6b9e29f27da54f8ed525dc71e8f3162a0a.json b/server/.sqlx/query-2daeacfdb74c4d12e2e5801d58931e6b9e29f27da54f8ed525dc71e8f3162a0a.json index 1c1d7db..1755ff7 100644 --- a/server/.sqlx/query-2daeacfdb74c4d12e2e5801d58931e6b9e29f27da54f8ed525dc71e8f3162a0a.json +++ b/server/.sqlx/query-2daeacfdb74c4d12e2e5801d58931e6b9e29f27da54f8ed525dc71e8f3162a0a.json @@ -32,6 +32,11 @@ "ordinal": 5, "name": "exit_code", "type_info": "Int4" + }, + { + "ordinal": 6, + "name": "group_assignment_id", + "type_info": "Int8" } ], "parameters": { @@ -45,6 +50,7 @@ false, false, false, + true, true ] }, diff --git a/server/.sqlx/query-388759d95b0eab3626a0b06330964fab1d44214a7f136306282a38c25536b22f.json b/server/.sqlx/query-388759d95b0eab3626a0b06330964fab1d44214a7f136306282a38c25536b22f.json new file mode 100644 index 0000000..18675b5 --- /dev/null +++ b/server/.sqlx/query-388759d95b0eab3626a0b06330964fab1d44214a7f136306282a38c25536b22f.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE\n tasks\n SET\n assignments_needed = assignments_needed + 1\n WHERE\n id = $1\n \n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "388759d95b0eab3626a0b06330964fab1d44214a7f136306282a38c25536b22f" +} diff --git a/server/.sqlx/query-83491616ec19de1b1ac1a01dc64f388ef07ce490f99c4a9127e30153eb840824.json b/server/.sqlx/query-83491616ec19de1b1ac1a01dc64f388ef07ce490f99c4a9127e30153eb840824.json deleted file mode 100644 index 3fea3bc..0000000 --- a/server/.sqlx/query-83491616ec19de1b1ac1a01dc64f388ef07ce490f99c4a9127e30153eb840824.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE \n tasks\n SET \n canonical_result_id = $2\n WHERE\n id = $1\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "83491616ec19de1b1ac1a01dc64f388ef07ce490f99c4a9127e30153eb840824" -} diff --git a/server/.sqlx/query-86323f9f5243c3f94a679b49c361a5f19cd315d0a1881afb695861f4221cf4ca.json b/server/.sqlx/query-86323f9f5243c3f94a679b49c361a5f19cd315d0a1881afb695861f4221cf4ca.json deleted file mode 100644 index a91e0e5..0000000 --- a/server/.sqlx/query-86323f9f5243c3f94a679b49c361a5f19cd315d0a1881afb695861f4221cf4ca.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE \n tasks\n SET \n assignments_needed = assignments_needed + 1\n WHERE\n id = $1\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [] - }, - "hash": "86323f9f5243c3f94a679b49c361a5f19cd315d0a1881afb695861f4221cf4ca" -} diff --git a/server/.sqlx/query-9d2127cb05e7631e969e289ff57ffd170787045c3233daad045204b9aa53f66c.json b/server/.sqlx/query-9d2127cb05e7631e969e289ff57ffd170787045c3233daad045204b9aa53f66c.json index 7dbd4cf..49254fd 100644 --- a/server/.sqlx/query-9d2127cb05e7631e969e289ff57ffd170787045c3233daad045204b9aa53f66c.json +++ b/server/.sqlx/query-9d2127cb05e7631e969e289ff57ffd170787045c3233daad045204b9aa53f66c.json @@ -32,6 +32,11 @@ "ordinal": 5, "name": "exit_code", "type_info": "Int4" + }, + { + "ordinal": 6, + "name": "group_assignment_id", + "type_info": "Int8" } ], "parameters": { @@ -45,6 +50,7 @@ false, false, false, + true, true ] }, diff --git a/server/.sqlx/query-10780f6fbc25a26cb46dec7bb5d47b9f00627ee7fdbc36d923db1cdf7f06aedf.json b/server/.sqlx/query-9ee75dbb5012a5310b8af01a05b6bf6e50cca4edd9b9c2b475201cc208720a2d.json similarity index 63% rename from server/.sqlx/query-10780f6fbc25a26cb46dec7bb5d47b9f00627ee7fdbc36d923db1cdf7f06aedf.json rename to server/.sqlx/query-9ee75dbb5012a5310b8af01a05b6bf6e50cca4edd9b9c2b475201cc208720a2d.json index 7362358..1b6cc1e 100644 --- a/server/.sqlx/query-10780f6fbc25a26cb46dec7bb5d47b9f00627ee7fdbc36d923db1cdf7f06aedf.json +++ b/server/.sqlx/query-9ee75dbb5012a5310b8af01a05b6bf6e50cca4edd9b9c2b475201cc208720a2d.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n t.*\n FROM\n tasks t\n JOIN assignments a ON\n a.task_id = t.id\n WHERE\n a.state not in ('canceled', 'init', 'expired')\n GROUP BY\n t.id\n HAVING\n t.project_id = $1\n AND (\n count(a.id) >= t.assignments_needed\n OR t.canonical_result_id IS NOT NULL\n )\n ", + "query": "\n SELECT\n t.*\n FROM\n tasks t\n JOIN assignments a ON\n a.task_id = t.id\n WHERE\n a.state not in ('canceled', 'init', 'expired')\n GROUP BY\n t.id\n HAVING\n t.project_id = $1\n AND (\n count(a.id) >= t.assignments_needed\n OR t.canonical_result_id IS NOT NULL\n )\n ", "describe": { "columns": [ { @@ -60,5 +60,5 @@ false ] }, - "hash": "10780f6fbc25a26cb46dec7bb5d47b9f00627ee7fdbc36d923db1cdf7f06aedf" + "hash": "9ee75dbb5012a5310b8af01a05b6bf6e50cca4edd9b9c2b475201cc208720a2d" } diff --git a/server/.sqlx/query-b82ba3eb5ab066193b65d2d635d649f020fa9d4c4ed8ac9dd0ec0cd93163ccbc.json b/server/.sqlx/query-b82ba3eb5ab066193b65d2d635d649f020fa9d4c4ed8ac9dd0ec0cd93163ccbc.json new file mode 100644 index 0000000..d7425f4 --- /dev/null +++ b/server/.sqlx/query-b82ba3eb5ab066193b65d2d635d649f020fa9d4c4ed8ac9dd0ec0cd93163ccbc.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE\n tasks\n SET\n canonical_result_id = $1\n WHERE\n id = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "b82ba3eb5ab066193b65d2d635d649f020fa9d4c4ed8ac9dd0ec0cd93163ccbc" +} diff --git a/server/.sqlx/query-ba285dca9c9d770e00a3e6817fb77cd9a47553e3637302bf590b651e161415f6.json b/server/.sqlx/query-ba285dca9c9d770e00a3e6817fb77cd9a47553e3637302bf590b651e161415f6.json new file mode 100644 index 0000000..1d968c6 --- /dev/null +++ b/server/.sqlx/query-ba285dca9c9d770e00a3e6817fb77cd9a47553e3637302bf590b651e161415f6.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE\n tasks\n SET\n canonical_result_id = $1\n WHERE\n id = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "ba285dca9c9d770e00a3e6817fb77cd9a47553e3637302bf590b651e161415f6" +} diff --git a/server/.sqlx/query-a6f03cff43feaa8d19fdf3f010741deed46496723b2ee94260ec1721cf8e8187.json b/server/.sqlx/query-e9abbf6cde84157e48c9c7f3307fee527847ded0826f42467a48fa81503c3d5f.json similarity index 66% rename from server/.sqlx/query-a6f03cff43feaa8d19fdf3f010741deed46496723b2ee94260ec1721cf8e8187.json rename to server/.sqlx/query-e9abbf6cde84157e48c9c7f3307fee527847ded0826f42467a48fa81503c3d5f.json index 9d2afd0..6a37b38 100644 --- a/server/.sqlx/query-a6f03cff43feaa8d19fdf3f010741deed46496723b2ee94260ec1721cf8e8187.json +++ b/server/.sqlx/query-e9abbf6cde84157e48c9c7f3307fee527847ded0826f42467a48fa81503c3d5f.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT \n *\n FROM \n results r\n WHERE \n r.assignment_id = ANY($1)\n ORDER BY \n r.created_at ASC\n LIMIT 1\n ", + "query": "\n SELECT\n *\n FROM\n results\n WHERE\n assignment_id = ANY($1)\n ", "describe": { "columns": [ { @@ -32,6 +32,11 @@ "ordinal": 5, "name": "exit_code", "type_info": "Int4" + }, + { + "ordinal": 6, + "name": "group_assignment_id", + "type_info": "Int8" } ], "parameters": { @@ -45,8 +50,9 @@ false, false, false, + true, true ] }, - "hash": "a6f03cff43feaa8d19fdf3f010741deed46496723b2ee94260ec1721cf8e8187" + "hash": "e9abbf6cde84157e48c9c7f3307fee527847ded0826f42467a48fa81503c3d5f" } diff --git a/server/migrations/20250426220809_init.sql b/server/migrations/20250426220809_init.sql index 768eab3..f40535a 100644 --- a/server/migrations/20250426220809_init.sql +++ b/server/migrations/20250426220809_init.sql @@ -73,7 +73,8 @@ CREATE TABLE results ( assignment_id int8 NOT NULL UNIQUE REFERENCES assignments(id) ON DELETE RESTRICT ON UPDATE RESTRICT, stdout text NOT NULL, stderr text NOT NULL, - exit_code int4 + exit_code int4, + group_assignment_id int8 REFERENCES assignments(id) ON DELETE RESTRICT ON UPDATE RESTRICT ); CREATE FUNCTION trigger_function_tasks_remove_assignment_user_id() diff --git a/server/src/main.rs b/server/src/main.rs index 226cc75..7cba180 100644 --- a/server/src/main.rs +++ b/server/src/main.rs @@ -14,7 +14,7 @@ use clusterizer_common::records::{ Assignment, Platform, Project, ProjectVersion, Result, Task, User, }; -use routes::*; +use routes::{get_all, get_one}; use sqlx::PgPool; use state::AppState; use tokio::{net::TcpListener, time}; diff --git a/server/src/routes/validate_fetch.rs b/server/src/routes/validate_fetch.rs index 9817b3e..d640743 100644 --- a/server/src/routes/validate_fetch.rs +++ b/server/src/routes/validate_fetch.rs @@ -36,23 +36,23 @@ pub async fn validate_fetch( let tasks = sqlx::query_as_unchecked!( Task, r#" - SELECT - t.* - FROM - tasks t - JOIN assignments a ON - a.task_id = t.id - WHERE - a.state not in ('canceled', 'init', 'expired') - GROUP BY - t.id - HAVING - t.project_id = $1 - AND ( - count(a.id) >= t.assignments_needed - OR t.canonical_result_id IS NOT NULL - ) - "#, + SELECT + t.* + FROM + tasks t + JOIN assignments a ON + a.task_id = t.id + WHERE + a.state not in ('canceled', 'init', 'expired') + GROUP BY + t.id + HAVING + t.project_id = $1 + AND ( + count(a.id) >= t.assignments_needed + OR t.canonical_result_id IS NOT NULL + ) + "#, project.id ) .fetch_all(&state.pool) diff --git a/server/src/routes/validate_submit.rs b/server/src/routes/validate_submit.rs index b0654ba..684f54c 100644 --- a/server/src/routes/validate_submit.rs +++ b/server/src/routes/validate_submit.rs @@ -1,12 +1,12 @@ use axum::{Json, extract::State}; use clusterizer_common::{ errors::ValidateSubmitError, - records::{Assignment, Task}, + records::{Assignment, Result, Task}, requests::ValidateSubmitRequest, types::{AssignmentState, Id}, }; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use crate::{ result::{AppError, AppResult}, @@ -39,6 +39,9 @@ pub async fn validate_submit( Err(AppError::Specific(ValidateSubmitError::InvalidAssignment))? } + let assignment_by_id: HashMap, &Assignment> = + assignments.iter().map(|ass| (ass.id, ass)).collect(); + // Disallow state transitions via validation unless the assignment is one of these states if assignments.iter().any(|assignment| { assignment.state != AssignmentState::Submitted @@ -48,183 +51,211 @@ pub async fn validate_submit( ValidateSubmitError::StateTransitionForbidden, ))? } + /* + 1. Obtain unique group number from group number, split request into assignment_ids split by their group number + 2. Error if assignments for different tasks were given the same group number + 3. Error if assignments for the same task were given different group ids but those group ids are the other assignment + { "1": 2, "2": 1 } + Without changing anything it should just think quorum hasn't been met. + The special case is that it's a circular reference. + Check that + Per group processing: + 1. Determine which assignment within a group submitted a result first, that's the potential canonical result + 2. Determine if enough results are within that group to meet quorum + a. if there are + 1. Mark the assignments as valid. + 2. Set task.canonical_result = the result determined in #1 + 3. Set all other assignments for that task that are in different groups to invalid + b. if there are not + 1. Set all assignments to inconclusive + 2. Increment assignments_needed by 1 - // Generate a map of tasks to assignments with that task_id for use in the next loop - let mut task_assignment_map: HashMap, Vec>> = HashMap::new(); - for assignment in &assignments { - task_assignment_map - .entry(assignment.task_id) - .or_default() - .push(assignment.id); - } - if task_assignment_map.keys().len() > 1 { - Err(AppError::Specific( - ValidateSubmitError::MultipleTasksDisallowed, - ))? - } - // Loop through all tasks that have been submitted for validation via their assignments - // This avoids a MixedTasks error as we simply error with "too few results" instead for that particular task - for (task_id, task_assignments) in &task_assignment_map { - let task = Task::select_one(*task_id).fetch_one(&state.pool).await?; - let mut group_map: HashMap>> = HashMap::new(); - let mut errored_assignments: Vec> = Vec::new(); + */ - // Get group number and count of assignments in each group - for assignment in task_assignments { - let group_num_option = request.assignments[assignment]; - - match group_num_option { - // Task has a group number, so use it. - Some(group_num) => group_map.entry(group_num).or_default().push(*assignment), - // Task errored, so add one to assignments needed - None => errored_assignments.push(*assignment), + // 1. Start with assignment_id to group_number hashmap + // Need to create a vec of group_number and dedup it + let mut group_ids: Vec> = request.assignments.values().copied().collect(); + group_ids.sort(); + group_ids.dedup(); + //Create inverse of request - group_id first, then vec of assignment_ids + let mut group_assignment_map: HashMap, Vec>> = HashMap::new(); + for group_id in group_ids { + group_assignment_map.entry(group_id).or_default().extend( + request + .assignments + .iter() + .filter(|x| *x.1 == group_id) + .map(|x| *x.1), + ); + //Error checking + let mut task_unique: HashSet> = HashSet::new(); + for assignment_id in &group_assignment_map[&group_id] { + if let Some(a) = assignments.iter().find(|a| &a.id == assignment_id) { + task_unique.insert(a.task_id); + } + if request.assignments[&request.assignments[assignment_id]] + != request.assignments[assignment_id] + { + Err(AppError::Specific( + ValidateSubmitError::ValidationGroupAssociationInconsistency, + ))? } } + if task_unique.len() > 1 { + Err(AppError::Specific( + ValidateSubmitError::ValidationGroupTaskInconsistency, + ))? + } + let task: Task = Task::select_one( + task_unique + .iter() + .next() + .copied() // or `.cloned()` if Id isn't Copy + .expect("task_unique must contain exactly one task_id"), + ) + .fetch_one(&state.pool) + .await?; + //Are there enough for quorum + if (group_assignment_map[&group_id].len() as i32) < task.quorum { + // Inconclusive + let ids = &group_assignment_map[&group_id]; + let by_id: HashMap, AssignmentState> = + assignments.iter().map(|a| (a.id, a.state)).collect(); + if ids.iter().all(|&aid| { + matches!( + by_id.get(&aid).copied().expect("assignment id must exist"), + AssignmentState::Inconclusive + ) + }) { + // Cannot run the inconclusive part if we've already submitted a new one. + Err(AppError::Specific( + ValidateSubmitError::ValidationImpossibleError, + ))? + } + set_assignment_state( + &group_assignment_map[&group_id], + AssignmentState::Inconclusive, + ) + .execute(&state.pool) + .await?; + sqlx::query_unchecked!( + r#" + UPDATE + tasks + SET + assignments_needed = assignments_needed + 1 + WHERE + id = $1 + + "#, + task.id + ) + .execute(&state.pool) + .await?; - let valid_groups: HashMap>> = group_map + break; + } + // There are enough for quorum + // Get assignments for our task_id, regardless of group + let task_assignments: Vec = assignments .iter() - .filter(|(_, assignments)| assignments.len() as i32 >= task.quorum) - .map(|(group_num, assignments)| (*group_num, assignments.clone())) + .filter(|ass| ass.task_id == task.id) + .cloned() .collect(); - - let invalid_groups: HashMap>> = group_map + // Get their Ids + let task_assignment_ids: Vec> = + task_assignments.iter().map(|ass| ass.id).collect(); + // Use their ids to get the results for each + let task_results: Vec = sqlx::query_as_unchecked!( + Result, + r#" + SELECT + * + FROM + results + WHERE + assignment_id = ANY($1) + "#, + &task_assignment_ids + ) + .fetch_all(&state.pool) + .await?; + // Get results only for this group + let group_results: Vec = task_results .iter() - .filter(|(_, assignments)| (assignments.len() as i32) < task.quorum) - .map(|(group_num, assignments)| (*group_num, assignments.clone())) + .filter(|res| group_assignment_map[&group_id].contains(&res.assignment_id)) + .cloned() .collect(); + // Get the earliest submitted result within that group + let earliest_group_result = group_results + .iter() + .min_by_key(|r| r.created_at) + .cloned() + .expect("this should not be hit since we're only grabbing the min"); + let mut relevant_task_results: Vec<&Result> = Vec::new(); - if valid_groups.len() == 1 || task.canonical_result_id.is_some() { - // Valid - let valid_assignments = &valid_groups[&0]; - let mut invalid_assignments = Vec::new(); - for invalid_assignment_ids in invalid_groups.values() { - for invalid_assignment_id in invalid_assignment_ids { - invalid_assignments.push(*invalid_assignment_id); + // iterate over task results to find other groups with the same task + for res in task_results.iter() { + if let Some(ass) = assignment_by_id.get(&res.assignment_id) { + // 1. Its actual group_id is not the current_group_id + // 2. It's associated with the target task.id + if ass.id != group_id && ass.task_id == task.id { + relevant_task_results.push(res); } } + } - if task.canonical_result_id.is_none() { - // If we don't have a valid result yet - - let canonical_result = sqlx::query_as_unchecked!( - clusterizer_common::records::Result, - r#" - SELECT - * - FROM - results r - WHERE - r.assignment_id = ANY($1) - ORDER BY - r.created_at ASC - LIMIT 1 - "#, - valid_assignments - ) - .fetch_one(&state.pool) - .await?; - + let earliest_relevant_result: Option<&Result> = relevant_task_results + .iter() + .min_by_key(|r| r.created_at) + .copied(); + if let Some(earliest_result) = earliest_relevant_result { + // There is another result for another group for this task + if earliest_group_result.created_at <= earliest_result.created_at { + // This can be canonical + // Set group to valid + set_assignment_state(&group_assignment_map[&group_id], AssignmentState::Valid) + .execute(&state.pool) + .await?; + // Set task's canonical result id sqlx::query_unchecked!( r#" - UPDATE + UPDATE tasks - SET - canonical_result_id = $2 + SET + canonical_result_id = $1 WHERE - id = $1 + id = $2 "#, - task.id, - canonical_result.id + earliest_group_result.id, + task.id ) .execute(&state.pool) .await?; } - set_assignment_state::set_assignment_state(valid_assignments, AssignmentState::Valid) + } else { + // This is the only relevant group, set to valid + set_assignment_state(&group_assignment_map[&group_id], AssignmentState::Valid) .execute(&state.pool) .await?; - - // Mark invalid - set_assignment_state::set_assignment_state( - &invalid_assignments, - AssignmentState::Invalid, + // Set task's canonical result id + sqlx::query_unchecked!( + r#" + UPDATE + tasks + SET + canonical_result_id = $1 + WHERE + id = $2 + "#, + earliest_group_result.id, + task.id ) .execute(&state.pool) .await?; } - if valid_groups.len() > 1 || !invalid_groups.is_empty() { - // Either no groups met quorum, or more than one did and we need to break the tie. - // Inconclusive - - // Cannot be inconclusive if a result is canonical already. Either it's valid or it's not. - if task.canonical_result_id.is_some() { - Err(AppError::Specific( - ValidateSubmitError::InconsistentValidationState, - ))? - } - // If there are two or more valid groups, submit another assignment and hope that breaks the tie. - if valid_groups.len() > 1 { - sqlx::query_unchecked!( - r#" - UPDATE - tasks - SET - assignments_needed = assignments_needed + 1 - WHERE - id = $1 - "#, - task.id, - ) - .execute(&state.pool) - .await?; - for (_, inconclusive_assignments) in valid_groups { - // Mark assignments as inconclusive - set_assignment_state(&inconclusive_assignments, AssignmentState::Inconclusive) - .execute(&state.pool) - .await?; - } - } else { - let mut invalid_assignments = Vec::new(); - for (_, invalid_assignment_ids) in invalid_groups { - for invalid_assignment_id in invalid_assignment_ids { - invalid_assignments.push(invalid_assignment_id); - } - } - - let mut max_count = 0; - for (_, assignments) in group_map { - if assignments.len() as i32 > max_count { - max_count = assignments.len() as i32; - } - } - sqlx::query_unchecked!( - r#" - UPDATE - tasks - SET - assignments_needed = assignments_needed + $2 - WHERE - id = $1 - "#, - task.id, - task.quorum - max_count - ) - .execute(&state.pool) - .await?; - - // Mark assignments as inconclusive - set_assignment_state(&invalid_assignments, AssignmentState::Inconclusive) - .execute(&state.pool) - .await?; - - // Mark assignments as errored - set_assignment_state(&errored_assignments, AssignmentState::Error) - .execute(&state.pool) - .await?; - } - } } - Ok(()) } From ba72e02053a57d5bcf42878a847ca075ef44ca41 Mon Sep 17 00:00:00 2001 From: BoySanic Date: Mon, 4 Aug 2025 22:41:10 -0700 Subject: [PATCH 07/29] Rebased for sqlx stuff, fixed comments on route --- ...804d430966e8ae83a168de0d3444bb8a4c7b1051.json | 6 ++++-- ...308b16b87bf12743e25be5d1707c07edd8d94dfd.json | 6 ++++-- ...35c0f0e211625d90dad1d3d07dd17fcf1e817b9f.json | 10 ++++++++-- ...2bf7c47896571f1b0b9b844e84e298f87e3ed09b.json | 6 ++++-- ...05b6bf6e50cca4edd9b9c2b475201cc208720a2d.json | 16 +++++++++++----- server/src/routes/validate_submit.rs | 2 +- 6 files changed, 32 insertions(+), 14 deletions(-) diff --git a/server/.sqlx/query-0d6d91c4e0acb78e4fda4df1804d430966e8ae83a168de0d3444bb8a4c7b1051.json b/server/.sqlx/query-0d6d91c4e0acb78e4fda4df1804d430966e8ae83a168de0d3444bb8a4c7b1051.json index 48ce028..eb6a480 100644 --- a/server/.sqlx/query-0d6d91c4e0acb78e4fda4df1804d430966e8ae83a168de0d3444bb8a4c7b1051.json +++ b/server/.sqlx/query-0d6d91c4e0acb78e4fda4df1804d430966e8ae83a168de0d3444bb8a4c7b1051.json @@ -39,12 +39,12 @@ "type_info": "Int8Array" }, { - "ordinal": 6, + "ordinal": 7, "name": "canonical_result_id", "type_info": "Int8" }, { - "ordinal": 7, + "ordinal": 8, "name": "quorum", "type_info": "Int4" } @@ -61,6 +61,8 @@ false, false, false, + false, + true, false ] }, diff --git a/server/.sqlx/query-4f28c9855a87500c39fc4e88308b16b87bf12743e25be5d1707c07edd8d94dfd.json b/server/.sqlx/query-4f28c9855a87500c39fc4e88308b16b87bf12743e25be5d1707c07edd8d94dfd.json index 267f09d..a86be38 100644 --- a/server/.sqlx/query-4f28c9855a87500c39fc4e88308b16b87bf12743e25be5d1707c07edd8d94dfd.json +++ b/server/.sqlx/query-4f28c9855a87500c39fc4e88308b16b87bf12743e25be5d1707c07edd8d94dfd.json @@ -39,12 +39,12 @@ "type_info": "Int8Array" }, { - "ordinal": 6, + "ordinal": 7, "name": "canonical_result_id", "type_info": "Int8" }, { - "ordinal": 7, + "ordinal": 8, "name": "quorum", "type_info": "Int4" } @@ -61,6 +61,8 @@ false, false, false, + false, + true, false ] }, diff --git a/server/.sqlx/query-54a247ee9e473bf6e4b0de3035c0f0e211625d90dad1d3d07dd17fcf1e817b9f.json b/server/.sqlx/query-54a247ee9e473bf6e4b0de3035c0f0e211625d90dad1d3d07dd17fcf1e817b9f.json index 9fa1e26..d0d8db8 100644 --- a/server/.sqlx/query-54a247ee9e473bf6e4b0de3035c0f0e211625d90dad1d3d07dd17fcf1e817b9f.json +++ b/server/.sqlx/query-54a247ee9e473bf6e4b0de3035c0f0e211625d90dad1d3d07dd17fcf1e817b9f.json @@ -15,16 +15,21 @@ }, { "ordinal": 2, + "name": "deadline_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 3, "name": "task_id", "type_info": "Int8" }, { - "ordinal": 3, + "ordinal": 4, "name": "user_id", "type_info": "Int8" }, { - "ordinal": 4, + "ordinal": 5, "name": "state", "type_info": { "Custom": { @@ -55,6 +60,7 @@ false, false, false, + false, false ] }, diff --git a/server/.sqlx/query-8660893ff85be731039fb2402bf7c47896571f1b0b9b844e84e298f87e3ed09b.json b/server/.sqlx/query-8660893ff85be731039fb2402bf7c47896571f1b0b9b844e84e298f87e3ed09b.json index 2788e90..d190667 100644 --- a/server/.sqlx/query-8660893ff85be731039fb2402bf7c47896571f1b0b9b844e84e298f87e3ed09b.json +++ b/server/.sqlx/query-8660893ff85be731039fb2402bf7c47896571f1b0b9b844e84e298f87e3ed09b.json @@ -39,12 +39,12 @@ "type_info": "Int8Array" }, { - "ordinal": 6, + "ordinal": 7, "name": "canonical_result_id", "type_info": "Int8" }, { - "ordinal": 7, + "ordinal": 8, "name": "quorum", "type_info": "Int4" } @@ -63,6 +63,8 @@ false, false, false, + false, + true, false ] }, diff --git a/server/.sqlx/query-9ee75dbb5012a5310b8af01a05b6bf6e50cca4edd9b9c2b475201cc208720a2d.json b/server/.sqlx/query-9ee75dbb5012a5310b8af01a05b6bf6e50cca4edd9b9c2b475201cc208720a2d.json index 1b6cc1e..bc9e7e2 100644 --- a/server/.sqlx/query-9ee75dbb5012a5310b8af01a05b6bf6e50cca4edd9b9c2b475201cc208720a2d.json +++ b/server/.sqlx/query-9ee75dbb5012a5310b8af01a05b6bf6e50cca4edd9b9c2b475201cc208720a2d.json @@ -15,31 +15,36 @@ }, { "ordinal": 2, + "name": "deadline", + "type_info": "Interval" + }, + { + "ordinal": 3, "name": "project_id", "type_info": "Int8" }, { - "ordinal": 3, + "ordinal": 4, "name": "stdin", "type_info": "Text" }, { - "ordinal": 4, + "ordinal": 5, "name": "assignments_needed", "type_info": "Int4" }, { - "ordinal": 5, + "ordinal": 6, "name": "assignment_user_ids", "type_info": "Int8Array" }, { - "ordinal": 6, + "ordinal": 7, "name": "canonical_result_id", "type_info": "Int8" }, { - "ordinal": 7, + "ordinal": 8, "name": "quorum", "type_info": "Int4" } @@ -56,6 +61,7 @@ false, false, false, + false, true, false ] diff --git a/server/src/routes/validate_submit.rs b/server/src/routes/validate_submit.rs index 684f54c..1c7c54f 100644 --- a/server/src/routes/validate_submit.rs +++ b/server/src/routes/validate_submit.rs @@ -117,7 +117,7 @@ pub async fn validate_submit( ) .fetch_one(&state.pool) .await?; - //Are there enough for quorum + // Are there enough for quorum if (group_assignment_map[&group_id].len() as i32) < task.quorum { // Inconclusive let ids = &group_assignment_map[&group_id]; From 241fd6685cba6697006a8f95f795a3b454689ffe Mon Sep 17 00:00:00 2001 From: BoySanic Date: Mon, 4 Aug 2025 22:47:59 -0700 Subject: [PATCH 08/29] Minor fix for cargo fmt --- common/src/records/result.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/src/records/result.rs b/common/src/records/result.rs index d4b2391..4f4a7b4 100644 --- a/common/src/records/result.rs +++ b/common/src/records/result.rs @@ -13,7 +13,7 @@ pub struct Result { pub stdout: String, pub stderr: String, pub exit_code: Option, - pub group_assignment_id: Option> + pub group_assignment_id: Option>, } #[non_exhaustive] From 27a22e550819f4f74e14b59c9bcecae8deeadf78 Mon Sep 17 00:00:00 2001 From: BoySanic Date: Tue, 5 Aug 2025 15:05:25 -0700 Subject: [PATCH 09/29] Add ability for validator to report errors --- .../src/requests/validate_submit_request.rs | 2 +- server/src/routes/validate_submit.rs | 52 +++++++++++++++---- 2 files changed, 42 insertions(+), 12 deletions(-) diff --git a/common/src/requests/validate_submit_request.rs b/common/src/requests/validate_submit_request.rs index ce1ebcc..67f601a 100644 --- a/common/src/requests/validate_submit_request.rs +++ b/common/src/requests/validate_submit_request.rs @@ -7,5 +7,5 @@ use crate::{records::Assignment, types::Id}; #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ValidateSubmitRequest { //First id is the assignment id that will change state, second is the "group id" it belongs with - pub assignments: HashMap, Id>, + pub assignments: HashMap, Option>>, } diff --git a/server/src/routes/validate_submit.rs b/server/src/routes/validate_submit.rs index 1c7c54f..b00eb8e 100644 --- a/server/src/routes/validate_submit.rs +++ b/server/src/routes/validate_submit.rs @@ -18,7 +18,40 @@ pub async fn validate_submit( State(state): State, Json(request): Json, ) -> AppResult<(), ValidateSubmitError> { - let assignment_ids: Vec<_> = request.assignments.keys().copied().collect(); + let mut group_ids: Vec> = Vec::new(); + let mut assignment_ids: Vec> = Vec::new(); + let mut group_id_by_assignment: HashMap, Id> = HashMap::new(); + // The purpose of doing it this way is to only add assignments to assignment_ids if they had a group number. That way we can know that any assignment in assignments (or assignment_ids, or assignment_by_id) did not error. + for (ass, g_id) in request.assignments { + match g_id { + Some(g) => { + // Add group id for that assignment to group_ids + // Add assignment id to assignment_ids + // Add assignment id and group id to new HashMap which filters out errored results + group_ids.push(g); + assignment_ids.push(ass); + group_id_by_assignment.insert(ass, g); + } + None => { + // Validation error + // Confirm the assignment exists at all before attempting to set its value + let err_assignment = Assignment::select_one(ass).fetch_one(&state.pool).await; + match err_assignment { + Ok(_) => { + set_assignment_state(&[ass], AssignmentState::Error) + .execute(&state.pool) + .await?; + } + Err(_) => Err(AppError::Specific(ValidateSubmitError::InvalidAssignment))?, + } + set_assignment_state(&[ass], AssignmentState::Error) + .execute(&state.pool) + .await?; + } + } + } + group_ids.sort(); + group_ids.dedup(); let assignments = sqlx::query_as_unchecked!( Assignment, @@ -76,27 +109,24 @@ pub async fn validate_submit( // 1. Start with assignment_id to group_number hashmap // Need to create a vec of group_number and dedup it - let mut group_ids: Vec> = request.assignments.values().copied().collect(); - group_ids.sort(); - group_ids.dedup(); + //Create inverse of request - group_id first, then vec of assignment_ids let mut group_assignment_map: HashMap, Vec>> = HashMap::new(); for group_id in group_ids { group_assignment_map.entry(group_id).or_default().extend( - request - .assignments + group_id_by_assignment .iter() - .filter(|x| *x.1 == group_id) - .map(|x| *x.1), + .filter(|(_, g_id)| **g_id == group_id) + .map(|(_, g_id)| *g_id), ); - //Error checking + // Error checking let mut task_unique: HashSet> = HashSet::new(); for assignment_id in &group_assignment_map[&group_id] { if let Some(a) = assignments.iter().find(|a| &a.id == assignment_id) { task_unique.insert(a.task_id); } - if request.assignments[&request.assignments[assignment_id]] - != request.assignments[assignment_id] + if group_id_by_assignment[&group_id_by_assignment[assignment_id]] + != group_id_by_assignment[assignment_id] { Err(AppError::Specific( ValidateSubmitError::ValidationGroupAssociationInconsistency, From c9c733267a3a79041d3c0349889429ea6d5a0564 Mon Sep 17 00:00:00 2001 From: BoySanic Date: Tue, 5 Aug 2025 15:19:38 -0700 Subject: [PATCH 10/29] Refactored a hashmap away since we never used it --- server/src/routes/validate_submit.rs | 59 +++++++++++++--------------- 1 file changed, 28 insertions(+), 31 deletions(-) diff --git a/server/src/routes/validate_submit.rs b/server/src/routes/validate_submit.rs index b00eb8e..1b4966c 100644 --- a/server/src/routes/validate_submit.rs +++ b/server/src/routes/validate_submit.rs @@ -33,8 +33,12 @@ pub async fn validate_submit( group_id_by_assignment.insert(ass, g); } None => { - // Validation error + // Validation error (Not invalid) // Confirm the assignment exists at all before attempting to set its value + // I don't think we should increment assignments_needed in this case because this alerts us to a fundamental problem with the data we tried to validate + // We should investigate why that happened, not just run another through. + // If the error is transient and the other result was able to run through the validator, it'll increment it anyway at quorum higher than 1. + let err_assignment = Assignment::select_one(ass).fetch_one(&state.pool).await; match err_assignment { Ok(_) => { @@ -106,27 +110,20 @@ pub async fn validate_submit( */ - - // 1. Start with assignment_id to group_number hashmap - // Need to create a vec of group_number and dedup it - - //Create inverse of request - group_id first, then vec of assignment_ids - let mut group_assignment_map: HashMap, Vec>> = HashMap::new(); for group_id in group_ids { - group_assignment_map.entry(group_id).or_default().extend( - group_id_by_assignment - .iter() - .filter(|(_, g_id)| **g_id == group_id) - .map(|(_, g_id)| *g_id), - ); + let group_assignments: Vec> = group_id_by_assignment + .iter() + .filter(|(_, g_id)| **g_id == group_id) + .map(|(_, g_id)| *g_id) + .collect(); // Error checking let mut task_unique: HashSet> = HashSet::new(); - for assignment_id in &group_assignment_map[&group_id] { - if let Some(a) = assignments.iter().find(|a| &a.id == assignment_id) { + for assignment_id in group_assignments.clone() { + if let Some(a) = assignments.iter().find(|a| a.id == assignment_id) { task_unique.insert(a.task_id); } - if group_id_by_assignment[&group_id_by_assignment[assignment_id]] - != group_id_by_assignment[assignment_id] + if group_id_by_assignment[&group_id_by_assignment[&assignment_id]] + != group_id_by_assignment[&assignment_id] { Err(AppError::Specific( ValidateSubmitError::ValidationGroupAssociationInconsistency, @@ -134,6 +131,7 @@ pub async fn validate_submit( } } if task_unique.len() > 1 { + // Cannot validate assignments cross-task. Only within the same task. Err(AppError::Specific( ValidateSubmitError::ValidationGroupTaskInconsistency, ))? @@ -148,14 +146,16 @@ pub async fn validate_submit( .fetch_one(&state.pool) .await?; // Are there enough for quorum - if (group_assignment_map[&group_id].len() as i32) < task.quorum { + if (group_assignments.clone().len() as i32) < task.quorum { // Inconclusive - let ids = &group_assignment_map[&group_id]; - let by_id: HashMap, AssignmentState> = + let assignment_state_by_id: HashMap, AssignmentState> = assignments.iter().map(|a| (a.id, a.state)).collect(); - if ids.iter().all(|&aid| { + if group_assignments.iter().all(|&aid| { matches!( - by_id.get(&aid).copied().expect("assignment id must exist"), + assignment_state_by_id + .get(&aid) + .copied() + .expect("assignment id must exist"), AssignmentState::Inconclusive ) }) { @@ -164,12 +164,9 @@ pub async fn validate_submit( ValidateSubmitError::ValidationImpossibleError, ))? } - set_assignment_state( - &group_assignment_map[&group_id], - AssignmentState::Inconclusive, - ) - .execute(&state.pool) - .await?; + set_assignment_state(&group_assignments, AssignmentState::Inconclusive) + .execute(&state.pool) + .await?; sqlx::query_unchecked!( r#" UPDATE @@ -215,7 +212,7 @@ pub async fn validate_submit( // Get results only for this group let group_results: Vec = task_results .iter() - .filter(|res| group_assignment_map[&group_id].contains(&res.assignment_id)) + .filter(|res| group_assignments.contains(&res.assignment_id)) .cloned() .collect(); // Get the earliest submitted result within that group @@ -246,7 +243,7 @@ pub async fn validate_submit( if earliest_group_result.created_at <= earliest_result.created_at { // This can be canonical // Set group to valid - set_assignment_state(&group_assignment_map[&group_id], AssignmentState::Valid) + set_assignment_state(&group_assignments, AssignmentState::Valid) .execute(&state.pool) .await?; // Set task's canonical result id @@ -267,7 +264,7 @@ pub async fn validate_submit( } } else { // This is the only relevant group, set to valid - set_assignment_state(&group_assignment_map[&group_id], AssignmentState::Valid) + set_assignment_state(&group_assignments, AssignmentState::Valid) .execute(&state.pool) .await?; // Set task's canonical result id From 43234d1702de90628276fe60a40df6280659ddfd Mon Sep 17 00:00:00 2001 From: BoySanic Date: Tue, 5 Aug 2025 16:31:18 -0700 Subject: [PATCH 11/29] Add use of group_assignment_id in result, create method to validate against canonical result --- common/src/errors/validate_submit_err.rs | 2 + ...af4801a86cf6a31db67e4531e677aeea5f1e9.json | 58 ++++++++++++ ...c378da1246c428764a7f45d1f04cedc996f4a.json | 15 ++++ ...7682719facd4993839554627298bc7c208adc.json | 15 ++++ server/src/routes/validate_submit.rs | 90 ++++++++++++++++++- 5 files changed, 179 insertions(+), 1 deletion(-) create mode 100644 server/.sqlx/query-3dd9d1f11dbcdecaf9c74aae7e7af4801a86cf6a31db67e4531e677aeea5f1e9.json create mode 100644 server/.sqlx/query-57cc377f1f18d08a4cfe4dcd6bbc378da1246c428764a7f45d1f04cedc996f4a.json create mode 100644 server/.sqlx/query-ac1e8b9eb49f79f1cf5a8cadbd17682719facd4993839554627298bc7c208adc.json diff --git a/common/src/errors/validate_submit_err.rs b/common/src/errors/validate_submit_err.rs index 75edca7..0640b73 100644 --- a/common/src/errors/validate_submit_err.rs +++ b/common/src/errors/validate_submit_err.rs @@ -15,4 +15,6 @@ pub enum ValidateSubmitError { ValidationGroupAssociationInconsistency, #[error("state transition forbidden")] StateTransitionForbidden, + #[error("cannot validate against a result other than the canonical result")] + NonCanonicalResultError, } diff --git a/server/.sqlx/query-3dd9d1f11dbcdecaf9c74aae7e7af4801a86cf6a31db67e4531e677aeea5f1e9.json b/server/.sqlx/query-3dd9d1f11dbcdecaf9c74aae7e7af4801a86cf6a31db67e4531e677aeea5f1e9.json new file mode 100644 index 0000000..d8cd5ca --- /dev/null +++ b/server/.sqlx/query-3dd9d1f11dbcdecaf9c74aae7e7af4801a86cf6a31db67e4531e677aeea5f1e9.json @@ -0,0 +1,58 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n *\n FROM\n results\n WHERE\n id = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 2, + "name": "assignment_id", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "stdout", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "stderr", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "exit_code", + "type_info": "Int4" + }, + { + "ordinal": 6, + "name": "group_assignment_id", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + true, + true + ] + }, + "hash": "3dd9d1f11dbcdecaf9c74aae7e7af4801a86cf6a31db67e4531e677aeea5f1e9" +} diff --git a/server/.sqlx/query-57cc377f1f18d08a4cfe4dcd6bbc378da1246c428764a7f45d1f04cedc996f4a.json b/server/.sqlx/query-57cc377f1f18d08a4cfe4dcd6bbc378da1246c428764a7f45d1f04cedc996f4a.json new file mode 100644 index 0000000..81ec1ca --- /dev/null +++ b/server/.sqlx/query-57cc377f1f18d08a4cfe4dcd6bbc378da1246c428764a7f45d1f04cedc996f4a.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE\n results\n SET\n group_assignment_id = $1\n WHERE\n assignment_id = ANY($2)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8Array" + ] + }, + "nullable": [] + }, + "hash": "57cc377f1f18d08a4cfe4dcd6bbc378da1246c428764a7f45d1f04cedc996f4a" +} diff --git a/server/.sqlx/query-ac1e8b9eb49f79f1cf5a8cadbd17682719facd4993839554627298bc7c208adc.json b/server/.sqlx/query-ac1e8b9eb49f79f1cf5a8cadbd17682719facd4993839554627298bc7c208adc.json new file mode 100644 index 0000000..b8fcd8f --- /dev/null +++ b/server/.sqlx/query-ac1e8b9eb49f79f1cf5a8cadbd17682719facd4993839554627298bc7c208adc.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE\n results\n SET\n group_assignment_id = $1\n WHERE\n assignment_id = ANY($2)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8Array" + ] + }, + "nullable": [] + }, + "hash": "ac1e8b9eb49f79f1cf5a8cadbd17682719facd4993839554627298bc7c208adc" +} diff --git a/server/src/routes/validate_submit.rs b/server/src/routes/validate_submit.rs index 1b4966c..f1ee75d 100644 --- a/server/src/routes/validate_submit.rs +++ b/server/src/routes/validate_submit.rs @@ -150,6 +150,9 @@ pub async fn validate_submit( // Inconclusive let assignment_state_by_id: HashMap, AssignmentState> = assignments.iter().map(|a| (a.id, a.state)).collect(); + // Determine if all of the submitted assignments are in the "Inconclusive" state. + // If they are, it's likely there's another assignment pending that we needed to validate with, but it has not returned yet. + // This indicates we're validating too early on an inconclusive batch. if group_assignments.iter().all(|&aid| { matches!( assignment_state_by_id @@ -181,8 +184,65 @@ pub async fn validate_submit( ) .execute(&state.pool) .await?; - + sqlx::query_unchecked!( + r#" + UPDATE + results + SET + group_assignment_id = $1 + WHERE + assignment_id = ANY($2) + "#, + group_id, + &group_assignments + ) + .execute(&state.pool) + .await?; + // Exit if inconclusive break; + } else if let Some(canonical_result_id) = task.canonical_result_id { + // We should validate against the canonical result instead. + // Get the result for the group_id (assignment_id) + let canonical_result: Result = sqlx::query_as_unchecked!( + Result, + r#" + SELECT + * + FROM + results + WHERE + id = $1 + "#, + &group_id + ) + .fetch_one(&state.pool) + .await?; + + // Is the current group_id the canonical_result's assignnment_id? + if canonical_result.id != canonical_result_id { + // Error state + Err(AppError::Specific( + ValidateSubmitError::NonCanonicalResultError, + ))? + } + // Validate + set_assignment_state(&group_assignments, AssignmentState::Valid) + .execute(&state.pool) + .await?; + sqlx::query_unchecked!( + r#" + UPDATE + results + SET + group_assignment_id = $1 + WHERE + assignment_id = ANY($2) + "#, + group_id, + &group_assignments + ) + .execute(&state.pool) + .await?; } // There are enough for quorum // Get assignments for our task_id, regardless of group @@ -261,6 +321,20 @@ pub async fn validate_submit( ) .execute(&state.pool) .await?; + sqlx::query_unchecked!( + r#" + UPDATE + results + SET + group_assignment_id = $1 + WHERE + assignment_id = ANY($2) + "#, + group_id, + &group_assignments + ) + .execute(&state.pool) + .await?; } } else { // This is the only relevant group, set to valid @@ -282,6 +356,20 @@ pub async fn validate_submit( ) .execute(&state.pool) .await?; + sqlx::query_unchecked!( + r#" + UPDATE + results + SET + group_assignment_id = $1 + WHERE + assignment_id = ANY($2) + "#, + group_id, + &group_assignments + ) + .execute(&state.pool) + .await?; } } Ok(()) From abeaa2cc5650e021edb15229c8d7d0c686d4c865 Mon Sep 17 00:00:00 2001 From: BoySanic Date: Tue, 5 Aug 2025 16:40:09 -0700 Subject: [PATCH 12/29] Mark invalid, don't error --- common/src/errors/validate_submit_err.rs | 2 -- server/src/routes/validate_submit.rs | 9 +++++---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/common/src/errors/validate_submit_err.rs b/common/src/errors/validate_submit_err.rs index 0640b73..75edca7 100644 --- a/common/src/errors/validate_submit_err.rs +++ b/common/src/errors/validate_submit_err.rs @@ -15,6 +15,4 @@ pub enum ValidateSubmitError { ValidationGroupAssociationInconsistency, #[error("state transition forbidden")] StateTransitionForbidden, - #[error("cannot validate against a result other than the canonical result")] - NonCanonicalResultError, } diff --git a/server/src/routes/validate_submit.rs b/server/src/routes/validate_submit.rs index f1ee75d..4036dd9 100644 --- a/server/src/routes/validate_submit.rs +++ b/server/src/routes/validate_submit.rs @@ -220,10 +220,11 @@ pub async fn validate_submit( // Is the current group_id the canonical_result's assignnment_id? if canonical_result.id != canonical_result_id { - // Error state - Err(AppError::Specific( - ValidateSubmitError::NonCanonicalResultError, - ))? + // Invalid, then. + + set_assignment_state(&group_assignments, AssignmentState::Invalid) + .execute(&state.pool) + .await?; } // Validate set_assignment_state(&group_assignments, AssignmentState::Valid) From b2b3ea571ac2828917cedf217cf96594a6fab1b7 Mon Sep 17 00:00:00 2001 From: BoySanic Date: Tue, 5 Aug 2025 23:16:30 -0700 Subject: [PATCH 13/29] Refactoring again, still in progress --- common/src/errors/validate_submit_err.rs | 2 +- common/src/records/result.rs | 2 +- .../src/requests/validate_submit_request.rs | 6 +- ...b0f4e8bba048af02db76a8e3ed403a2e0c5f.json} | 6 +- ...31e6b9e29f27da54f8ed525dc71e8f3162a0a.json | 2 +- ...64fab1d44214a7f136306282a38c25536b22f.json | 14 - ...1c7c7b237943843b376a581d725c5accaeaf9.json | 15 + ...c378da1246c428764a7f45d1f04cedc996f4a.json | 15 - ...7b1cac2040f70a4973f13f892fcc7592cbf83.json | 15 + ...ffd170787045c3233daad045204b9aa53f66c.json | 2 +- ...7682719facd4993839554627298bc7c208adc.json | 15 - ...649f020fa9d4c4ed8ac9dd0ec0cd93163ccbc.json | 15 - ...77cd9a47553e3637302bf590b651e161415f6.json | 15 - ...fee527847ded0826f42467a48fa81503c3d5f.json | 2 +- server/migrations/20250426220809_init.sql | 2 +- server/src/routes/validate_submit.rs | 435 ++++++------------ 16 files changed, 185 insertions(+), 378 deletions(-) rename server/.sqlx/{query-3dd9d1f11dbcdecaf9c74aae7e7af4801a86cf6a31db67e4531e677aeea5f1e9.json => query-13b2878a958300c53e042824690bb0f4e8bba048af02db76a8e3ed403a2e0c5f.json} (74%) delete mode 100644 server/.sqlx/query-388759d95b0eab3626a0b06330964fab1d44214a7f136306282a38c25536b22f.json create mode 100644 server/.sqlx/query-428b183a56111200d7a00176e861c7c7b237943843b376a581d725c5accaeaf9.json delete mode 100644 server/.sqlx/query-57cc377f1f18d08a4cfe4dcd6bbc378da1246c428764a7f45d1f04cedc996f4a.json create mode 100644 server/.sqlx/query-84d0361eddf39635747ad29e4cc7b1cac2040f70a4973f13f892fcc7592cbf83.json delete mode 100644 server/.sqlx/query-ac1e8b9eb49f79f1cf5a8cadbd17682719facd4993839554627298bc7c208adc.json delete mode 100644 server/.sqlx/query-b82ba3eb5ab066193b65d2d635d649f020fa9d4c4ed8ac9dd0ec0cd93163ccbc.json delete mode 100644 server/.sqlx/query-ba285dca9c9d770e00a3e6817fb77cd9a47553e3637302bf590b651e161415f6.json diff --git a/common/src/errors/validate_submit_err.rs b/common/src/errors/validate_submit_err.rs index 75edca7..a22b12b 100644 --- a/common/src/errors/validate_submit_err.rs +++ b/common/src/errors/validate_submit_err.rs @@ -10,7 +10,7 @@ pub enum ValidateSubmitError { #[error("all results are inconclusive, and no new assignment has finished to solve it")] ValidationImpossibleError, #[error("validation group contained assignments belonging to multiple tasks")] - ValidationGroupTaskInconsistency, + TooManyTasksValidationError, #[error("assignments referred to by group id cannot refer to an assignment other than itself")] ValidationGroupAssociationInconsistency, #[error("state transition forbidden")] diff --git a/common/src/records/result.rs b/common/src/records/result.rs index 4f4a7b4..3d69067 100644 --- a/common/src/records/result.rs +++ b/common/src/records/result.rs @@ -13,7 +13,7 @@ pub struct Result { pub stdout: String, pub stderr: String, pub exit_code: Option, - pub group_assignment_id: Option>, + pub group_result_id: Option>, } #[non_exhaustive] diff --git a/common/src/requests/validate_submit_request.rs b/common/src/requests/validate_submit_request.rs index 67f601a..a54aca8 100644 --- a/common/src/requests/validate_submit_request.rs +++ b/common/src/requests/validate_submit_request.rs @@ -2,10 +2,10 @@ use std::collections::HashMap; use serde::{Deserialize, Serialize}; -use crate::{records::Assignment, types::Id}; +use crate::{records::{Assignment, Result}, types::Id}; #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ValidateSubmitRequest { - //First id is the assignment id that will change state, second is the "group id" it belongs with - pub assignments: HashMap, Option>>, + // First id is the assignment id that will change state, second is the "group id" it belongs with + pub assignments: HashMap, Option>>, } diff --git a/server/.sqlx/query-3dd9d1f11dbcdecaf9c74aae7e7af4801a86cf6a31db67e4531e677aeea5f1e9.json b/server/.sqlx/query-13b2878a958300c53e042824690bb0f4e8bba048af02db76a8e3ed403a2e0c5f.json similarity index 74% rename from server/.sqlx/query-3dd9d1f11dbcdecaf9c74aae7e7af4801a86cf6a31db67e4531e677aeea5f1e9.json rename to server/.sqlx/query-13b2878a958300c53e042824690bb0f4e8bba048af02db76a8e3ed403a2e0c5f.json index d8cd5ca..113bf17 100644 --- a/server/.sqlx/query-3dd9d1f11dbcdecaf9c74aae7e7af4801a86cf6a31db67e4531e677aeea5f1e9.json +++ b/server/.sqlx/query-13b2878a958300c53e042824690bb0f4e8bba048af02db76a8e3ed403a2e0c5f.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n *\n FROM\n results\n WHERE\n id = $1\n ", + "query": "\n SELECT\n *\n FROM\n results\n WHERE\n group_result_id = $1\n ", "describe": { "columns": [ { @@ -35,7 +35,7 @@ }, { "ordinal": 6, - "name": "group_assignment_id", + "name": "group_result_id", "type_info": "Int8" } ], @@ -54,5 +54,5 @@ true ] }, - "hash": "3dd9d1f11dbcdecaf9c74aae7e7af4801a86cf6a31db67e4531e677aeea5f1e9" + "hash": "13b2878a958300c53e042824690bb0f4e8bba048af02db76a8e3ed403a2e0c5f" } diff --git a/server/.sqlx/query-2daeacfdb74c4d12e2e5801d58931e6b9e29f27da54f8ed525dc71e8f3162a0a.json b/server/.sqlx/query-2daeacfdb74c4d12e2e5801d58931e6b9e29f27da54f8ed525dc71e8f3162a0a.json index 1755ff7..515813c 100644 --- a/server/.sqlx/query-2daeacfdb74c4d12e2e5801d58931e6b9e29f27da54f8ed525dc71e8f3162a0a.json +++ b/server/.sqlx/query-2daeacfdb74c4d12e2e5801d58931e6b9e29f27da54f8ed525dc71e8f3162a0a.json @@ -35,7 +35,7 @@ }, { "ordinal": 6, - "name": "group_assignment_id", + "name": "group_result_id", "type_info": "Int8" } ], diff --git a/server/.sqlx/query-388759d95b0eab3626a0b06330964fab1d44214a7f136306282a38c25536b22f.json b/server/.sqlx/query-388759d95b0eab3626a0b06330964fab1d44214a7f136306282a38c25536b22f.json deleted file mode 100644 index 18675b5..0000000 --- a/server/.sqlx/query-388759d95b0eab3626a0b06330964fab1d44214a7f136306282a38c25536b22f.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE\n tasks\n SET\n assignments_needed = assignments_needed + 1\n WHERE\n id = $1\n \n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [] - }, - "hash": "388759d95b0eab3626a0b06330964fab1d44214a7f136306282a38c25536b22f" -} diff --git a/server/.sqlx/query-428b183a56111200d7a00176e861c7c7b237943843b376a581d725c5accaeaf9.json b/server/.sqlx/query-428b183a56111200d7a00176e861c7c7b237943843b376a581d725c5accaeaf9.json new file mode 100644 index 0000000..7ae9fac --- /dev/null +++ b/server/.sqlx/query-428b183a56111200d7a00176e861c7c7b237943843b376a581d725c5accaeaf9.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE \n results\n SET \n group_result_id = $1\n WHERE \n id = ANY($2)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8Array" + ] + }, + "nullable": [] + }, + "hash": "428b183a56111200d7a00176e861c7c7b237943843b376a581d725c5accaeaf9" +} diff --git a/server/.sqlx/query-57cc377f1f18d08a4cfe4dcd6bbc378da1246c428764a7f45d1f04cedc996f4a.json b/server/.sqlx/query-57cc377f1f18d08a4cfe4dcd6bbc378da1246c428764a7f45d1f04cedc996f4a.json deleted file mode 100644 index 81ec1ca..0000000 --- a/server/.sqlx/query-57cc377f1f18d08a4cfe4dcd6bbc378da1246c428764a7f45d1f04cedc996f4a.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE\n results\n SET\n group_assignment_id = $1\n WHERE\n assignment_id = ANY($2)\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8Array" - ] - }, - "nullable": [] - }, - "hash": "57cc377f1f18d08a4cfe4dcd6bbc378da1246c428764a7f45d1f04cedc996f4a" -} diff --git a/server/.sqlx/query-84d0361eddf39635747ad29e4cc7b1cac2040f70a4973f13f892fcc7592cbf83.json b/server/.sqlx/query-84d0361eddf39635747ad29e4cc7b1cac2040f70a4973f13f892fcc7592cbf83.json new file mode 100644 index 0000000..00d297e --- /dev/null +++ b/server/.sqlx/query-84d0361eddf39635747ad29e4cc7b1cac2040f70a4973f13f892fcc7592cbf83.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE \n tasks\n SET \n canonical_result_id = $1\n WHERE \n id = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "84d0361eddf39635747ad29e4cc7b1cac2040f70a4973f13f892fcc7592cbf83" +} diff --git a/server/.sqlx/query-9d2127cb05e7631e969e289ff57ffd170787045c3233daad045204b9aa53f66c.json b/server/.sqlx/query-9d2127cb05e7631e969e289ff57ffd170787045c3233daad045204b9aa53f66c.json index 49254fd..8da719c 100644 --- a/server/.sqlx/query-9d2127cb05e7631e969e289ff57ffd170787045c3233daad045204b9aa53f66c.json +++ b/server/.sqlx/query-9d2127cb05e7631e969e289ff57ffd170787045c3233daad045204b9aa53f66c.json @@ -35,7 +35,7 @@ }, { "ordinal": 6, - "name": "group_assignment_id", + "name": "group_result_id", "type_info": "Int8" } ], diff --git a/server/.sqlx/query-ac1e8b9eb49f79f1cf5a8cadbd17682719facd4993839554627298bc7c208adc.json b/server/.sqlx/query-ac1e8b9eb49f79f1cf5a8cadbd17682719facd4993839554627298bc7c208adc.json deleted file mode 100644 index b8fcd8f..0000000 --- a/server/.sqlx/query-ac1e8b9eb49f79f1cf5a8cadbd17682719facd4993839554627298bc7c208adc.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE\n results\n SET\n group_assignment_id = $1\n WHERE\n assignment_id = ANY($2)\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8Array" - ] - }, - "nullable": [] - }, - "hash": "ac1e8b9eb49f79f1cf5a8cadbd17682719facd4993839554627298bc7c208adc" -} diff --git a/server/.sqlx/query-b82ba3eb5ab066193b65d2d635d649f020fa9d4c4ed8ac9dd0ec0cd93163ccbc.json b/server/.sqlx/query-b82ba3eb5ab066193b65d2d635d649f020fa9d4c4ed8ac9dd0ec0cd93163ccbc.json deleted file mode 100644 index d7425f4..0000000 --- a/server/.sqlx/query-b82ba3eb5ab066193b65d2d635d649f020fa9d4c4ed8ac9dd0ec0cd93163ccbc.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE\n tasks\n SET\n canonical_result_id = $1\n WHERE\n id = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "b82ba3eb5ab066193b65d2d635d649f020fa9d4c4ed8ac9dd0ec0cd93163ccbc" -} diff --git a/server/.sqlx/query-ba285dca9c9d770e00a3e6817fb77cd9a47553e3637302bf590b651e161415f6.json b/server/.sqlx/query-ba285dca9c9d770e00a3e6817fb77cd9a47553e3637302bf590b651e161415f6.json deleted file mode 100644 index 1d968c6..0000000 --- a/server/.sqlx/query-ba285dca9c9d770e00a3e6817fb77cd9a47553e3637302bf590b651e161415f6.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE\n tasks\n SET\n canonical_result_id = $1\n WHERE\n id = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "ba285dca9c9d770e00a3e6817fb77cd9a47553e3637302bf590b651e161415f6" -} diff --git a/server/.sqlx/query-e9abbf6cde84157e48c9c7f3307fee527847ded0826f42467a48fa81503c3d5f.json b/server/.sqlx/query-e9abbf6cde84157e48c9c7f3307fee527847ded0826f42467a48fa81503c3d5f.json index 6a37b38..179577d 100644 --- a/server/.sqlx/query-e9abbf6cde84157e48c9c7f3307fee527847ded0826f42467a48fa81503c3d5f.json +++ b/server/.sqlx/query-e9abbf6cde84157e48c9c7f3307fee527847ded0826f42467a48fa81503c3d5f.json @@ -35,7 +35,7 @@ }, { "ordinal": 6, - "name": "group_assignment_id", + "name": "group_result_id", "type_info": "Int8" } ], diff --git a/server/migrations/20250426220809_init.sql b/server/migrations/20250426220809_init.sql index f40535a..309f7b1 100644 --- a/server/migrations/20250426220809_init.sql +++ b/server/migrations/20250426220809_init.sql @@ -74,7 +74,7 @@ CREATE TABLE results ( stdout text NOT NULL, stderr text NOT NULL, exit_code int4, - group_assignment_id int8 REFERENCES assignments(id) ON DELETE RESTRICT ON UPDATE RESTRICT + group_result_id int8 REFERENCES results(id) ON DELETE RESTRICT ON UPDATE RESTRICT ); CREATE FUNCTION trigger_function_tasks_remove_assignment_user_id() diff --git a/server/src/routes/validate_submit.rs b/server/src/routes/validate_submit.rs index 4036dd9..2e71f52 100644 --- a/server/src/routes/validate_submit.rs +++ b/server/src/routes/validate_submit.rs @@ -18,44 +18,25 @@ pub async fn validate_submit( State(state): State, Json(request): Json, ) -> AppResult<(), ValidateSubmitError> { - let mut group_ids: Vec> = Vec::new(); - let mut assignment_ids: Vec> = Vec::new(); - let mut group_id_by_assignment: HashMap, Id> = HashMap::new(); - // The purpose of doing it this way is to only add assignments to assignment_ids if they had a group number. That way we can know that any assignment in assignments (or assignment_ids, or assignment_by_id) did not error. - for (ass, g_id) in request.assignments { - match g_id { - Some(g) => { - // Add group id for that assignment to group_ids - // Add assignment id to assignment_ids - // Add assignment id and group id to new HashMap which filters out errored results - group_ids.push(g); - assignment_ids.push(ass); - group_id_by_assignment.insert(ass, g); - } - None => { - // Validation error (Not invalid) - // Confirm the assignment exists at all before attempting to set its value - // I don't think we should increment assignments_needed in this case because this alerts us to a fundamental problem with the data we tried to validate - // We should investigate why that happened, not just run another through. - // If the error is transient and the other result was able to run through the validator, it'll increment it anyway at quorum higher than 1. + /* + check that there exists no results in the db submitted before the latest given assignment id, this is a subtle issue i just thought of that i think we have not discussed before. + find the valid group, if there is one, the valid group is the group that meets quorum with the earliest submitted result id + if there is no valid group: + 8.1. set all assignments that are in any group to 'inconclusive' + 8.2. set assignments_needed to the number of 'inconclusive' and 'error' results plus quorum minus the size of the largest group (i think this formula is correct but unsure, we discussed it before as well but don't wanna search for the message rn) + if there is a valid group: + 9.1. set all assignments in that group to 'valid' and all assignments in other groups to 'invalid' + 9.2. set the canonical result id to the earliest result in the group + import to note that when talking about groups, we should always also consider groups that were already in the db + not just the groups that the validator just submitted - let err_assignment = Assignment::select_one(ass).fetch_one(&state.pool).await; - match err_assignment { - Ok(_) => { - set_assignment_state(&[ass], AssignmentState::Error) - .execute(&state.pool) - .await?; - } - Err(_) => Err(AppError::Specific(ValidateSubmitError::InvalidAssignment))?, - } - set_assignment_state(&[ass], AssignmentState::Error) - .execute(&state.pool) - .await?; - } - } - } - group_ids.sort(); - group_ids.dedup(); + + + */ + let mut group_ids: HashSet> = HashSet::new(); + let mut assignment_ids: Vec> = request.assignments.keys().cloned().collect(); + let mut group_id_by_assignment: HashMap, Id> = HashMap::new(); + let mut assignments_by_group_id: HashMap, Vec>> = HashMap::new(); let assignments = sqlx::query_as_unchecked!( Assignment, @@ -67,196 +48,75 @@ pub async fn validate_submit( WHERE id = ANY($1) "#, - assignment_ids + &assignment_ids ) .fetch_all(&state.pool) .await?; - + // Ensure all assignments are real if assignment_ids.len() != assignments.len() { Err(AppError::Specific(ValidateSubmitError::InvalidAssignment))? } - let assignment_by_id: HashMap, &Assignment> = - assignments.iter().map(|ass| (ass.id, ass)).collect(); - - // Disallow state transitions via validation unless the assignment is one of these states - if assignments.iter().any(|assignment| { - assignment.state != AssignmentState::Submitted - && assignment.state != AssignmentState::Inconclusive - }) { + // Ensure all assignments are for the same task + let task_id = assignments[0].task_id; + if assignments.iter().any(|ass| ass.task_id != task_id) { + Err(AppError::Specific( + ValidateSubmitError::TooManyTasksValidationError, + ))? + } + // Disallow state transitions via validation unless the assignment is in the Submitted state + if assignments + .iter() + .any(|assignment| assignment.state != AssignmentState::Submitted) + { Err(AppError::Specific( ValidateSubmitError::StateTransitionForbidden, ))? } - /* - 1. Obtain unique group number from group number, split request into assignment_ids split by their group number - 2. Error if assignments for different tasks were given the same group number - 3. Error if assignments for the same task were given different group ids but those group ids are the other assignment - { "1": 2, "2": 1 } - Without changing anything it should just think quorum hasn't been met. - The special case is that it's a circular reference. - Check that - Per group processing: - 1. Determine which assignment within a group submitted a result first, that's the potential canonical result - 2. Determine if enough results are within that group to meet quorum - a. if there are - 1. Mark the assignments as valid. - 2. Set task.canonical_result = the result determined in #1 - 3. Set all other assignments for that task that are in different groups to invalid - b. if there are not - 1. Set all assignments to inconclusive - 2. Increment assignments_needed by 1 - - - */ - for group_id in group_ids { - let group_assignments: Vec> = group_id_by_assignment - .iter() - .filter(|(_, g_id)| **g_id == group_id) - .map(|(_, g_id)| *g_id) - .collect(); - // Error checking - let mut task_unique: HashSet> = HashSet::new(); - for assignment_id in group_assignments.clone() { - if let Some(a) = assignments.iter().find(|a| a.id == assignment_id) { - task_unique.insert(a.task_id); + // Set assignments to Error if they do not have a group_id (aka result_id) + for (ass, group_id) in request.assignments { + match group_id { + Some(g) => { + // Add group id for that assignment to group_ids + // Add assignment id to assignment_ids + // Add assignment id and group id to new HashMap which filters out errored results + assignments_by_group_id + .entry(g) + .or_insert_with(Vec::new) + .push(ass); + group_ids.insert(g); + group_id_by_assignment.insert(ass, g); } - if group_id_by_assignment[&group_id_by_assignment[&assignment_id]] - != group_id_by_assignment[&assignment_id] - { - Err(AppError::Specific( - ValidateSubmitError::ValidationGroupAssociationInconsistency, - ))? + None => { + set_assignment_state(&[ass], AssignmentState::Error) + .execute(&state.pool) + .await?; } } - if task_unique.len() > 1 { - // Cannot validate assignments cross-task. Only within the same task. - Err(AppError::Specific( - ValidateSubmitError::ValidationGroupTaskInconsistency, - ))? - } - let task: Task = Task::select_one( - task_unique - .iter() - .next() - .copied() // or `.cloned()` if Id isn't Copy - .expect("task_unique must contain exactly one task_id"), - ) - .fetch_one(&state.pool) - .await?; - // Are there enough for quorum - if (group_assignments.clone().len() as i32) < task.quorum { - // Inconclusive - let assignment_state_by_id: HashMap, AssignmentState> = - assignments.iter().map(|a| (a.id, a.state)).collect(); - // Determine if all of the submitted assignments are in the "Inconclusive" state. - // If they are, it's likely there's another assignment pending that we needed to validate with, but it has not returned yet. - // This indicates we're validating too early on an inconclusive batch. - if group_assignments.iter().all(|&aid| { - matches!( - assignment_state_by_id - .get(&aid) - .copied() - .expect("assignment id must exist"), - AssignmentState::Inconclusive - ) - }) { - // Cannot run the inconclusive part if we've already submitted a new one. - Err(AppError::Specific( - ValidateSubmitError::ValidationImpossibleError, - ))? - } - set_assignment_state(&group_assignments, AssignmentState::Inconclusive) - .execute(&state.pool) - .await?; - sqlx::query_unchecked!( - r#" - UPDATE - tasks - SET - assignments_needed = assignments_needed + 1 + } + let assignment_by_id: HashMap, &Assignment> = + assignments.iter().map(|ass| (ass.id, ass)).collect(); + + let task = Task::select_one(task_id).fetch_one(&state.pool).await?; + + for (group_id, group_assignment_ids) in assignments_by_group_id { + let group_db_results: Vec = sqlx::query_as_unchecked!( + Result, + r#" + SELECT + * + FROM + results WHERE - id = $1 - + group_result_id = $1 "#, - task.id - ) - .execute(&state.pool) - .await?; - sqlx::query_unchecked!( - r#" - UPDATE - results - SET - group_assignment_id = $1 - WHERE - assignment_id = ANY($2) - "#, - group_id, - &group_assignments - ) - .execute(&state.pool) - .await?; - // Exit if inconclusive - break; - } else if let Some(canonical_result_id) = task.canonical_result_id { - // We should validate against the canonical result instead. - // Get the result for the group_id (assignment_id) - let canonical_result: Result = sqlx::query_as_unchecked!( - Result, - r#" - SELECT - * - FROM - results - WHERE - id = $1 - "#, - &group_id - ) - .fetch_one(&state.pool) - .await?; - - // Is the current group_id the canonical_result's assignnment_id? - if canonical_result.id != canonical_result_id { - // Invalid, then. + &group_id + ) + .fetch_all(&state.pool) + .await?; - set_assignment_state(&group_assignments, AssignmentState::Invalid) - .execute(&state.pool) - .await?; - } - // Validate - set_assignment_state(&group_assignments, AssignmentState::Valid) - .execute(&state.pool) - .await?; - sqlx::query_unchecked!( - r#" - UPDATE - results - SET - group_assignment_id = $1 - WHERE - assignment_id = ANY($2) - "#, - group_id, - &group_assignments - ) - .execute(&state.pool) - .await?; - } - // There are enough for quorum - // Get assignments for our task_id, regardless of group - let task_assignments: Vec = assignments - .iter() - .filter(|ass| ass.task_id == task.id) - .cloned() - .collect(); - // Get their Ids - let task_assignment_ids: Vec> = - task_assignments.iter().map(|ass| ass.id).collect(); - // Use their ids to get the results for each - let task_results: Vec = sqlx::query_as_unchecked!( + let mut group_results = sqlx::query_as_unchecked!( Result, r#" SELECT @@ -266,108 +126,99 @@ pub async fn validate_submit( WHERE assignment_id = ANY($1) "#, - &task_assignment_ids + &group_assignment_ids ) .fetch_all(&state.pool) .await?; - // Get results only for this group - let group_results: Vec = task_results + // Get result ids before extending with existing db values so we aren't setting rows that don't need to be set + let result_ids: Vec> = group_results.iter().map(|result| result.id).collect(); + group_results.extend(group_db_results); + // Earliest submitted result within the group, db or fresh validator data + let group_canonical_result = group_results .iter() - .filter(|res| group_assignments.contains(&res.assignment_id)) - .cloned() - .collect(); - // Get the earliest submitted result within that group - let earliest_group_result = group_results - .iter() - .min_by_key(|r| r.created_at) - .cloned() - .expect("this should not be hit since we're only grabbing the min"); - let mut relevant_task_results: Vec<&Result> = Vec::new(); - - // iterate over task results to find other groups with the same task - for res in task_results.iter() { - if let Some(ass) = assignment_by_id.get(&res.assignment_id) { - // 1. Its actual group_id is not the current_group_id - // 2. It's associated with the target task.id - if ass.id != group_id && ass.task_id == task.id { - relevant_task_results.push(res); - } - } - } + .min_by_key(|result| result.created_at) + .expect("These are all known to be real already"); + // Set validator-provided results to the same group id + sqlx::query_unchecked!( + r#" + UPDATE + results + SET + group_result_id = $1 + WHERE + id = ANY($2) + "#, + group_canonical_result.id, + &result_ids + ) + .execute(&state.pool) + .await?; - let earliest_relevant_result: Option<&Result> = relevant_task_results - .iter() - .min_by_key(|r| r.created_at) - .copied(); - if let Some(earliest_result) = earliest_relevant_result { - // There is another result for another group for this task - if earliest_group_result.created_at <= earliest_result.created_at { - // This can be canonical - // Set group to valid - set_assignment_state(&group_assignments, AssignmentState::Valid) + // Check if we have quorum + if (group_results.len() as i32) >= task.quorum { + // Met quorum + // This should also catch the case that the db results + new results = quorum or higher since we combine them in an earlier step + match task.canonical_result_id { + Some(_) => {} + None => { + // Set canonical result + sqlx::query_unchecked!( + r#" + UPDATE + tasks + SET + canonical_result_id = $1 + WHERE + id = $2 + "#, + group_canonical_result.id, + task.id + ) .execute(&state.pool) .await?; - // Set task's canonical result id - sqlx::query_unchecked!( - r#" - UPDATE - tasks - SET - canonical_result_id = $1 - WHERE - id = $2 - "#, - earliest_group_result.id, - task.id - ) + } + } + // Set to valid + set_assignment_state(&group_assignment_ids, AssignmentState::Valid) .execute(&state.pool) .await?; - sqlx::query_unchecked!( - r#" - UPDATE - results - SET - group_assignment_id = $1 - WHERE - assignment_id = ANY($2) - "#, - group_id, - &group_assignments - ) + // Invalidate other groups for this task + } else if let Some(canonical_result_id) = task.canonical_result_id + && group_id != canonical_result_id + { + // Invalid + set_assignment_state(&group_assignment_ids, AssignmentState::Invalid) .execute(&state.pool) .await?; - } } else { - // This is the only relevant group, set to valid - set_assignment_state(&group_assignments, AssignmentState::Valid) + // Inconclusive + set_assignment_state(&group_assignment_ids, AssignmentState::Inconclusive) .execute(&state.pool) .await?; - // Set task's canonical result id + // Find largest group for task + let mut group_id_count: HashMap, i32> = HashMap::new(); + for gr in group_results { + match gr.group_result_id { + Some(gr_id) => *group_id_count.entry(gr_id).or_insert(0) += 1, + None => {} + } + } + let largest_group_size: i32 = group_id_count + .iter() + .filter_map(|g| group_id_count.get(g).copied()) + .max() + .unwrap_or(0); + // Set assignments_needed sqlx::query_unchecked!( r#" - UPDATE + UPDATE tasks - SET - canonical_result_id = $1 - WHERE + SET + assignments_needed = $1 + WHERE id = $2 "#, - earliest_group_result.id, - task.id - ) - .execute(&state.pool) - .await?; - sqlx::query_unchecked!( - r#" - UPDATE - results - SET - group_assignment_id = $1 - WHERE - assignment_id = ANY($2) - "#, - group_id, - &group_assignments + task.quorum - largest_group_size + task.id ) .execute(&state.pool) .await?; From 29a7026ae8e68fa3af81c1ca4c6391c7bc12c9f5 Mon Sep 17 00:00:00 2001 From: BoySanic Date: Sat, 23 Aug 2025 14:13:05 -0700 Subject: [PATCH 14/29] More refactoring --- common/src/records/result.rs | 3 +- .../src/requests/validate_submit_request.rs | 4 +- common/src/types/assignment_state.rs | 3 - common/src/types/mod.rs | 2 + common/src/types/result_state.rs | 15 +++ ...7b2e6b5a31199b622f28ca3692fe60eec53c9.json | 6 - ...b19dac70d34e283d16169617ad59af7f04ff5.json | 77 ++++++++++++ ...17c1c2cfc8193fb0b6aa9a5d17baf9e5c6d1.json} | 7 +- ...bb0f4e8bba048af02db76a8e3ed403a2e0c5f.json | 27 +++- ...31e6b9e29f27da54f8ed525dc71e8f3162a0a.json | 27 +++- ...69652b09907984c43d76917f998965f2fd19c.json | 119 ++++++++++++++++++ ...ffd170787045c3233daad045204b9aa53f66c.json | 27 +++- ...c5602d215db4e5cf61484cf7b80c84b0cfe5c.json | 3 - ...faf7ba6f659832bd727aa8faa69fe761254d1.json | 3 - ...fee527847ded0826f42467a48fa81503c3d5f.json | 27 +++- server/example.env | 3 - server/migrations/20250426220809_init.sql | 33 +++-- server/src/routes/validate_submit.rs | 110 ++++++++++------ server/src/util/mod.rs | 2 + server/src/util/set_result_state.rs | 21 ++++ 20 files changed, 428 insertions(+), 91 deletions(-) create mode 100644 common/src/types/result_state.rs create mode 100644 server/.sqlx/query-0fbc6c31fbd780f76738a14f5e0b19dac70d34e283d16169617ad59af7f04ff5.json rename server/.sqlx/{query-54a247ee9e473bf6e4b0de3035c0f0e211625d90dad1d3d07dd17fcf1e817b9f.json => query-12856967f604c24930149e34844c17c1c2cfc8193fb0b6aa9a5d17baf9e5c6d1.json} (83%) create mode 100644 server/.sqlx/query-8f124cd8df4921e78a1922f739269652b09907984c43d76917f998965f2fd19c.json delete mode 100644 server/example.env create mode 100644 server/src/util/set_result_state.rs diff --git a/common/src/records/result.rs b/common/src/records/result.rs index 3d69067..f37ee33 100644 --- a/common/src/records/result.rs +++ b/common/src/records/result.rs @@ -1,7 +1,7 @@ use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; -use crate::types::Id; +use crate::types::{Id, ResultState}; use super::Assignment; @@ -14,6 +14,7 @@ pub struct Result { pub stderr: String, pub exit_code: Option, pub group_result_id: Option>, + pub state: ResultState } #[non_exhaustive] diff --git a/common/src/requests/validate_submit_request.rs b/common/src/requests/validate_submit_request.rs index a54aca8..8f6645b 100644 --- a/common/src/requests/validate_submit_request.rs +++ b/common/src/requests/validate_submit_request.rs @@ -2,10 +2,10 @@ use std::collections::HashMap; use serde::{Deserialize, Serialize}; -use crate::{records::{Assignment, Result}, types::Id}; +use crate::{records::{Result}, types::Id}; #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ValidateSubmitRequest { // First id is the assignment id that will change state, second is the "group id" it belongs with - pub assignments: HashMap, Option>>, + pub results: HashMap, Option>>, } diff --git a/common/src/types/assignment_state.rs b/common/src/types/assignment_state.rs index da3bbb5..742bddf 100644 --- a/common/src/types/assignment_state.rs +++ b/common/src/types/assignment_state.rs @@ -11,8 +11,5 @@ pub enum AssignmentState { Canceled, Expired, Submitted, - Valid, - Invalid, - Inconclusive, Error, } diff --git a/common/src/types/mod.rs b/common/src/types/mod.rs index 543eb3b..16c8c8f 100644 --- a/common/src/types/mod.rs +++ b/common/src/types/mod.rs @@ -1,7 +1,9 @@ pub mod assignment_state; pub mod id; pub mod interval; +pub mod result_state; pub use assignment_state::AssignmentState; pub use id::Id; pub use interval::Interval; +pub use result_state::ResultState; diff --git a/common/src/types/result_state.rs b/common/src/types/result_state.rs new file mode 100644 index 0000000..e35a76f --- /dev/null +++ b/common/src/types/result_state.rs @@ -0,0 +1,15 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "sqlx", derive(sqlx::Type))] +#[cfg_attr( + feature = "sqlx", + sqlx(type_name = "result_state", rename_all = "snake_case") +)] +pub enum ResultState { + Init, + Valid, + Invalid, + Inconclusive, + Error, +} diff --git a/server/.sqlx/query-0a85c57626456d79f6a57c607437b2e6b5a31199b622f28ca3692fe60eec53c9.json b/server/.sqlx/query-0a85c57626456d79f6a57c607437b2e6b5a31199b622f28ca3692fe60eec53c9.json index 9a6dc09..2ad6a3a 100644 --- a/server/.sqlx/query-0a85c57626456d79f6a57c607437b2e6b5a31199b622f28ca3692fe60eec53c9.json +++ b/server/.sqlx/query-0a85c57626456d79f6a57c607437b2e6b5a31199b622f28ca3692fe60eec53c9.json @@ -40,9 +40,6 @@ "canceled", "expired", "submitted", - "valid", - "invalid", - "inconclusive", "error" ] } @@ -63,9 +60,6 @@ "canceled", "expired", "submitted", - "valid", - "invalid", - "inconclusive", "error" ] } diff --git a/server/.sqlx/query-0fbc6c31fbd780f76738a14f5e0b19dac70d34e283d16169617ad59af7f04ff5.json b/server/.sqlx/query-0fbc6c31fbd780f76738a14f5e0b19dac70d34e283d16169617ad59af7f04ff5.json new file mode 100644 index 0000000..fd66392 --- /dev/null +++ b/server/.sqlx/query-0fbc6c31fbd780f76738a14f5e0b19dac70d34e283d16169617ad59af7f04ff5.json @@ -0,0 +1,77 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n *\n FROM\n results\n WHERE\n id = ANY($1)\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 2, + "name": "state", + "type_info": { + "Custom": { + "name": "result_state", + "kind": { + "Enum": [ + "init", + "valid", + "invalid", + "inconclusive", + "error" + ] + } + } + } + }, + { + "ordinal": 3, + "name": "assignment_id", + "type_info": "Int8" + }, + { + "ordinal": 4, + "name": "stdout", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "stderr", + "type_info": "Text" + }, + { + "ordinal": 6, + "name": "exit_code", + "type_info": "Int4" + }, + { + "ordinal": 7, + "name": "group_result_id", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8Array" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + true, + true + ] + }, + "hash": "0fbc6c31fbd780f76738a14f5e0b19dac70d34e283d16169617ad59af7f04ff5" +} diff --git a/server/.sqlx/query-54a247ee9e473bf6e4b0de3035c0f0e211625d90dad1d3d07dd17fcf1e817b9f.json b/server/.sqlx/query-12856967f604c24930149e34844c17c1c2cfc8193fb0b6aa9a5d17baf9e5c6d1.json similarity index 83% rename from server/.sqlx/query-54a247ee9e473bf6e4b0de3035c0f0e211625d90dad1d3d07dd17fcf1e817b9f.json rename to server/.sqlx/query-12856967f604c24930149e34844c17c1c2cfc8193fb0b6aa9a5d17baf9e5c6d1.json index d0d8db8..3a34da8 100644 --- a/server/.sqlx/query-54a247ee9e473bf6e4b0de3035c0f0e211625d90dad1d3d07dd17fcf1e817b9f.json +++ b/server/.sqlx/query-12856967f604c24930149e34844c17c1c2cfc8193fb0b6aa9a5d17baf9e5c6d1.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n *\n FROM\n assignments\n WHERE\n id = ANY($1)\n ", + "query": "\n SELECT\n *\n FROM\n assignments a\n WHERE\n id = ANY($1)\n ", "describe": { "columns": [ { @@ -40,9 +40,6 @@ "canceled", "expired", "submitted", - "valid", - "invalid", - "inconclusive", "error" ] } @@ -64,5 +61,5 @@ false ] }, - "hash": "54a247ee9e473bf6e4b0de3035c0f0e211625d90dad1d3d07dd17fcf1e817b9f" + "hash": "12856967f604c24930149e34844c17c1c2cfc8193fb0b6aa9a5d17baf9e5c6d1" } diff --git a/server/.sqlx/query-13b2878a958300c53e042824690bb0f4e8bba048af02db76a8e3ed403a2e0c5f.json b/server/.sqlx/query-13b2878a958300c53e042824690bb0f4e8bba048af02db76a8e3ed403a2e0c5f.json index 113bf17..c665f25 100644 --- a/server/.sqlx/query-13b2878a958300c53e042824690bb0f4e8bba048af02db76a8e3ed403a2e0c5f.json +++ b/server/.sqlx/query-13b2878a958300c53e042824690bb0f4e8bba048af02db76a8e3ed403a2e0c5f.json @@ -15,26 +15,44 @@ }, { "ordinal": 2, + "name": "state", + "type_info": { + "Custom": { + "name": "result_state", + "kind": { + "Enum": [ + "init", + "valid", + "invalid", + "inconclusive", + "error" + ] + } + } + } + }, + { + "ordinal": 3, "name": "assignment_id", "type_info": "Int8" }, { - "ordinal": 3, + "ordinal": 4, "name": "stdout", "type_info": "Text" }, { - "ordinal": 4, + "ordinal": 5, "name": "stderr", "type_info": "Text" }, { - "ordinal": 5, + "ordinal": 6, "name": "exit_code", "type_info": "Int4" }, { - "ordinal": 6, + "ordinal": 7, "name": "group_result_id", "type_info": "Int8" } @@ -50,6 +68,7 @@ false, false, false, + false, true, true ] diff --git a/server/.sqlx/query-2daeacfdb74c4d12e2e5801d58931e6b9e29f27da54f8ed525dc71e8f3162a0a.json b/server/.sqlx/query-2daeacfdb74c4d12e2e5801d58931e6b9e29f27da54f8ed525dc71e8f3162a0a.json index 515813c..3f4340e 100644 --- a/server/.sqlx/query-2daeacfdb74c4d12e2e5801d58931e6b9e29f27da54f8ed525dc71e8f3162a0a.json +++ b/server/.sqlx/query-2daeacfdb74c4d12e2e5801d58931e6b9e29f27da54f8ed525dc71e8f3162a0a.json @@ -15,26 +15,44 @@ }, { "ordinal": 2, + "name": "state", + "type_info": { + "Custom": { + "name": "result_state", + "kind": { + "Enum": [ + "init", + "valid", + "invalid", + "inconclusive", + "error" + ] + } + } + } + }, + { + "ordinal": 3, "name": "assignment_id", "type_info": "Int8" }, { - "ordinal": 3, + "ordinal": 4, "name": "stdout", "type_info": "Text" }, { - "ordinal": 4, + "ordinal": 5, "name": "stderr", "type_info": "Text" }, { - "ordinal": 5, + "ordinal": 6, "name": "exit_code", "type_info": "Int4" }, { - "ordinal": 6, + "ordinal": 7, "name": "group_result_id", "type_info": "Int8" } @@ -50,6 +68,7 @@ false, false, false, + false, true, true ] diff --git a/server/.sqlx/query-8f124cd8df4921e78a1922f739269652b09907984c43d76917f998965f2fd19c.json b/server/.sqlx/query-8f124cd8df4921e78a1922f739269652b09907984c43d76917f998965f2fd19c.json new file mode 100644 index 0000000..157d305 --- /dev/null +++ b/server/.sqlx/query-8f124cd8df4921e78a1922f739269652b09907984c43d76917f998965f2fd19c.json @@ -0,0 +1,119 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n *\n FROM\n tasks t\n JOIN\n assignments a on t.id = a.task_id\n WHERE\n a.id = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 2, + "name": "deadline", + "type_info": "Interval" + }, + { + "ordinal": 3, + "name": "project_id", + "type_info": "Int8" + }, + { + "ordinal": 4, + "name": "stdin", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "assignments_needed", + "type_info": "Int4" + }, + { + "ordinal": 6, + "name": "assignment_user_ids", + "type_info": "Int8Array" + }, + { + "ordinal": 7, + "name": "canonical_result_id", + "type_info": "Int8" + }, + { + "ordinal": 8, + "name": "quorum", + "type_info": "Int4" + }, + { + "ordinal": 9, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 10, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 11, + "name": "deadline_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 12, + "name": "task_id", + "type_info": "Int8" + }, + { + "ordinal": 13, + "name": "user_id", + "type_info": "Int8" + }, + { + "ordinal": 14, + "name": "state", + "type_info": { + "Custom": { + "name": "assignment_state", + "kind": { + "Enum": [ + "init", + "canceled", + "expired", + "submitted", + "error" + ] + } + } + } + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + true, + false, + false, + false, + false, + false, + false, + false + ] + }, + "hash": "8f124cd8df4921e78a1922f739269652b09907984c43d76917f998965f2fd19c" +} diff --git a/server/.sqlx/query-9d2127cb05e7631e969e289ff57ffd170787045c3233daad045204b9aa53f66c.json b/server/.sqlx/query-9d2127cb05e7631e969e289ff57ffd170787045c3233daad045204b9aa53f66c.json index 8da719c..61e1852 100644 --- a/server/.sqlx/query-9d2127cb05e7631e969e289ff57ffd170787045c3233daad045204b9aa53f66c.json +++ b/server/.sqlx/query-9d2127cb05e7631e969e289ff57ffd170787045c3233daad045204b9aa53f66c.json @@ -15,26 +15,44 @@ }, { "ordinal": 2, + "name": "state", + "type_info": { + "Custom": { + "name": "result_state", + "kind": { + "Enum": [ + "init", + "valid", + "invalid", + "inconclusive", + "error" + ] + } + } + } + }, + { + "ordinal": 3, "name": "assignment_id", "type_info": "Int8" }, { - "ordinal": 3, + "ordinal": 4, "name": "stdout", "type_info": "Text" }, { - "ordinal": 4, + "ordinal": 5, "name": "stderr", "type_info": "Text" }, { - "ordinal": 5, + "ordinal": 6, "name": "exit_code", "type_info": "Int4" }, { - "ordinal": 6, + "ordinal": 7, "name": "group_result_id", "type_info": "Int8" } @@ -50,6 +68,7 @@ false, false, false, + false, true, true ] diff --git a/server/.sqlx/query-ce2921487afc54738ea394ab248c5602d215db4e5cf61484cf7b80c84b0cfe5c.json b/server/.sqlx/query-ce2921487afc54738ea394ab248c5602d215db4e5cf61484cf7b80c84b0cfe5c.json index 7ba3d62..3c00628 100644 --- a/server/.sqlx/query-ce2921487afc54738ea394ab248c5602d215db4e5cf61484cf7b80c84b0cfe5c.json +++ b/server/.sqlx/query-ce2921487afc54738ea394ab248c5602d215db4e5cf61484cf7b80c84b0cfe5c.json @@ -40,9 +40,6 @@ "canceled", "expired", "submitted", - "valid", - "invalid", - "inconclusive", "error" ] } diff --git a/server/.sqlx/query-d3f83d3bf9b010cdf4a5c8c65b2faf7ba6f659832bd727aa8faa69fe761254d1.json b/server/.sqlx/query-d3f83d3bf9b010cdf4a5c8c65b2faf7ba6f659832bd727aa8faa69fe761254d1.json index 3b03451..4ab1e03 100644 --- a/server/.sqlx/query-d3f83d3bf9b010cdf4a5c8c65b2faf7ba6f659832bd727aa8faa69fe761254d1.json +++ b/server/.sqlx/query-d3f83d3bf9b010cdf4a5c8c65b2faf7ba6f659832bd727aa8faa69fe761254d1.json @@ -14,9 +14,6 @@ "canceled", "expired", "submitted", - "valid", - "invalid", - "inconclusive", "error" ] } diff --git a/server/.sqlx/query-e9abbf6cde84157e48c9c7f3307fee527847ded0826f42467a48fa81503c3d5f.json b/server/.sqlx/query-e9abbf6cde84157e48c9c7f3307fee527847ded0826f42467a48fa81503c3d5f.json index 179577d..cd2e97a 100644 --- a/server/.sqlx/query-e9abbf6cde84157e48c9c7f3307fee527847ded0826f42467a48fa81503c3d5f.json +++ b/server/.sqlx/query-e9abbf6cde84157e48c9c7f3307fee527847ded0826f42467a48fa81503c3d5f.json @@ -15,26 +15,44 @@ }, { "ordinal": 2, + "name": "state", + "type_info": { + "Custom": { + "name": "result_state", + "kind": { + "Enum": [ + "init", + "valid", + "invalid", + "inconclusive", + "error" + ] + } + } + } + }, + { + "ordinal": 3, "name": "assignment_id", "type_info": "Int8" }, { - "ordinal": 3, + "ordinal": 4, "name": "stdout", "type_info": "Text" }, { - "ordinal": 4, + "ordinal": 5, "name": "stderr", "type_info": "Text" }, { - "ordinal": 5, + "ordinal": 6, "name": "exit_code", "type_info": "Int4" }, { - "ordinal": 6, + "ordinal": 7, "name": "group_result_id", "type_info": "Int8" } @@ -50,6 +68,7 @@ false, false, false, + false, true, true ] diff --git a/server/example.env b/server/example.env deleted file mode 100644 index 6fe42cd..0000000 --- a/server/example.env +++ /dev/null @@ -1,3 +0,0 @@ -DATABASE_URL=postgres://postgres@localhost/clusterizer -CLUSTERIZER_SECRET=balls -CLUSTERIZER_ADDRESS=0.0.0.0:3000 diff --git a/server/migrations/20250426220809_init.sql b/server/migrations/20250426220809_init.sql index 309f7b1..abc8af6 100644 --- a/server/migrations/20250426220809_init.sql +++ b/server/migrations/20250426220809_init.sql @@ -31,6 +31,23 @@ CREATE TABLE project_versions ( archive_url text NOT NULL ); + + +CREATE TYPE assignment_state AS ENUM ( + 'init', + 'canceled', + 'expired', + 'submitted', + 'error' +); + +CREATE TYPE result_state AS ENUM ( + 'init', + 'valid', + 'invalid', + 'inconclusive', + 'error' +); CREATE TABLE tasks ( id int8 GENERATED ALWAYS AS IDENTITY NOT NULL PRIMARY KEY, created_at timestamptz NOT NULL DEFAULT now(), @@ -42,18 +59,6 @@ CREATE TABLE tasks ( canonical_result_id int8, quorum int4 NOT NULL ); - -CREATE TYPE assignment_state AS ENUM ( - 'init', - 'canceled', - 'expired', - 'submitted', - 'valid', - 'invalid', - 'inconclusive', - 'error' -); - CREATE TABLE assignments ( id int8 GENERATED ALWAYS AS IDENTITY NOT NULL PRIMARY KEY, created_at timestamptz NOT NULL DEFAULT now(), @@ -70,13 +75,15 @@ CREATE UNIQUE INDEX assignments_task_id_user_id_key CREATE TABLE results ( id int8 GENERATED ALWAYS AS IDENTITY NOT NULL PRIMARY KEY, created_at timestamptz NOT NULL DEFAULT now(), + state result_state NOT NULL DEFAULT 'init', assignment_id int8 NOT NULL UNIQUE REFERENCES assignments(id) ON DELETE RESTRICT ON UPDATE RESTRICT, stdout text NOT NULL, stderr text NOT NULL, exit_code int4, group_result_id int8 REFERENCES results(id) ON DELETE RESTRICT ON UPDATE RESTRICT ); - +ALTER TABLE tasks +ADD FOREIGN KEY (canonical_result_id) REFERENCES results(id) ON DELETE RESTRICT ON UPDATE RESTRICT; CREATE FUNCTION trigger_function_tasks_remove_assignment_user_id() RETURNS TRIGGER AS $$ BEGIN diff --git a/server/src/routes/validate_submit.rs b/server/src/routes/validate_submit.rs index 2e71f52..1a0dad3 100644 --- a/server/src/routes/validate_submit.rs +++ b/server/src/routes/validate_submit.rs @@ -3,7 +3,7 @@ use clusterizer_common::{ errors::ValidateSubmitError, records::{Assignment, Result, Task}, requests::ValidateSubmitRequest, - types::{AssignmentState, Id}, + types::{Id, ResultState}, }; use std::collections::{HashMap, HashSet}; @@ -11,7 +11,7 @@ use std::collections::{HashMap, HashSet}; use crate::{ result::{AppError, AppResult}, state::AppState, - util::{Select, set_assignment_state}, + util::set_result_state, }; pub async fn validate_submit( @@ -34,40 +34,73 @@ pub async fn validate_submit( */ let mut group_ids: HashSet> = HashSet::new(); - let mut assignment_ids: Vec> = request.assignments.keys().cloned().collect(); - let mut group_id_by_assignment: HashMap, Id> = HashMap::new(); - let mut assignments_by_group_id: HashMap, Vec>> = HashMap::new(); + let result_ids: Vec> = request.results.keys().cloned().collect(); + let mut group_id_by_result: HashMap, Id> = HashMap::new(); + let mut results_by_group_id: HashMap, Vec>> = HashMap::new(); - let assignments = sqlx::query_as_unchecked!( - Assignment, + let results = sqlx::query_as_unchecked!( + Result, r#" SELECT * FROM - assignments + results WHERE id = ANY($1) "#, - &assignment_ids + &result_ids ) .fetch_all(&state.pool) .await?; // Ensure all assignments are real - if assignment_ids.len() != assignments.len() { + if result_ids.len() != results.len() { Err(AppError::Specific(ValidateSubmitError::InvalidAssignment))? } // Ensure all assignments are for the same task - let task_id = assignments[0].task_id; - if assignments.iter().any(|ass| ass.task_id != task_id) { + let task = sqlx::query_as_unchecked!( + Task, + r#" + SELECT + t.* + FROM + tasks t + JOIN + assignments a on t.id = a.task_id + WHERE + a.id = $1 + "#, + &results[0].assignment_id + ) + .fetch_one(&state.pool) + .await?; + let assignment_ids: Vec> = + results.iter().map(|result| result.assignment_id).collect(); + + let assignments: Vec = sqlx::query_as_unchecked!( + Assignment, + r#" + SELECT + * + FROM + assignments a + WHERE + id = ANY($1) + "#, + &assignment_ids + ) + .fetch_all(&state.pool) // or fetch_one, depending on your use + .await?; + + if assignments.iter().any(|ass| ass.task_id != task.id) { Err(AppError::Specific( ValidateSubmitError::TooManyTasksValidationError, ))? } // Disallow state transitions via validation unless the assignment is in the Submitted state - if assignments + if results .iter() - .any(|assignment| assignment.state != AssignmentState::Submitted) + .any(|result| result.state != ResultState::Init) { Err(AppError::Specific( ValidateSubmitError::StateTransitionForbidden, @@ -75,32 +108,28 @@ pub async fn validate_submit( } // Set assignments to Error if they do not have a group_id (aka result_id) - for (ass, group_id) in request.assignments { + for (result_id, group_id) in request.results { match group_id { Some(g) => { // Add group id for that assignment to group_ids // Add assignment id to assignment_ids // Add assignment id and group id to new HashMap which filters out errored results - assignments_by_group_id + results_by_group_id .entry(g) .or_insert_with(Vec::new) - .push(ass); + .push(result_id); group_ids.insert(g); - group_id_by_assignment.insert(ass, g); + group_id_by_result.insert(result_id, g); } None => { - set_assignment_state(&[ass], AssignmentState::Error) + set_result_state(&[result_id], ResultState::Error) .execute(&state.pool) .await?; } } } - let assignment_by_id: HashMap, &Assignment> = - assignments.iter().map(|ass| (ass.id, ass)).collect(); - - let task = Task::select_one(task_id).fetch_one(&state.pool).await?; - for (group_id, group_assignment_ids) in assignments_by_group_id { + for (group_id, group_result_ids) in results_by_group_id { let group_db_results: Vec = sqlx::query_as_unchecked!( Result, r#" @@ -126,13 +155,14 @@ pub async fn validate_submit( WHERE assignment_id = ANY($1) "#, - &group_assignment_ids + &group_result_ids ) .fetch_all(&state.pool) .await?; - // Get result ids before extending with existing db values so we aren't setting rows that don't need to be set - let result_ids: Vec> = group_results.iter().map(|result| result.id).collect(); group_results.extend(group_db_results); + let group_result_ids: Vec> = + group_results.iter().map(|result| result.id).collect(); + // Earliest submitted result within the group, db or fresh validator data let group_canonical_result = group_results .iter() @@ -179,7 +209,7 @@ pub async fn validate_submit( } } // Set to valid - set_assignment_state(&group_assignment_ids, AssignmentState::Valid) + set_result_state(&group_result_ids, ResultState::Valid) .execute(&state.pool) .await?; // Invalidate other groups for this task @@ -187,38 +217,46 @@ pub async fn validate_submit( && group_id != canonical_result_id { // Invalid - set_assignment_state(&group_assignment_ids, AssignmentState::Invalid) + set_result_state(&group_result_ids, ResultState::Invalid) .execute(&state.pool) .await?; } else { // Inconclusive - set_assignment_state(&group_assignment_ids, AssignmentState::Inconclusive) + set_result_state(&group_result_ids, ResultState::Inconclusive) .execute(&state.pool) .await?; // Find largest group for task let mut group_id_count: HashMap, i32> = HashMap::new(); - for gr in group_results { + for gr in &group_results { match gr.group_result_id { Some(gr_id) => *group_id_count.entry(gr_id).or_insert(0) += 1, None => {} } } + let inconclusive_and_error_size: i32 = group_results + .iter() + .filter(|result| { + result.state == ResultState::Inconclusive || result.state == ResultState::Error + }) + .count() as i32; + let largest_group_size: i32 = group_id_count .iter() - .filter_map(|g| group_id_count.get(g).copied()) + .map(|(_, count)| *count) .max() .unwrap_or(0); // Set assignments_needed sqlx::query_unchecked!( r#" - UPDATE + UPDATE tasks - SET + SET assignments_needed = $1 - WHERE + WHERE id = $2 "#, - task.quorum - largest_group_size + task.id + task.quorum - largest_group_size + inconclusive_and_error_size, + task.id ) .execute(&state.pool) .await?; diff --git a/server/src/util/mod.rs b/server/src/util/mod.rs index b1c2080..4c2d0bb 100644 --- a/server/src/util/mod.rs +++ b/server/src/util/mod.rs @@ -6,10 +6,12 @@ use sqlx::{ pub mod assignment_deadline; pub mod select; pub mod set_assignment_state; +pub mod set_result_state; pub use assignment_deadline::update_expired_assignments; pub use select::Select; pub use set_assignment_state::set_assignment_state; +pub use set_result_state::set_result_state; type Query = sqlx::query::Query<'static, Postgres, PgArguments>; type QueryScalar = sqlx::query::QueryScalar<'static, Postgres, T, PgArguments>; diff --git a/server/src/util/set_result_state.rs b/server/src/util/set_result_state.rs new file mode 100644 index 0000000..9e90d83 --- /dev/null +++ b/server/src/util/set_result_state.rs @@ -0,0 +1,21 @@ +use clusterizer_common::{ + records::Result, + types::{Id, ResultState}, +}; + +use super::Query; + +pub fn set_result_state(result_ids: &[Id], result_state: ResultState) -> Query { + sqlx::query_unchecked!( + r#" + UPDATE + results + SET + state = $1 + WHERE + id = ANY($2) + "#, + result_state, + result_ids + ) +} From 22dafe9eb444e4fea95b92f89529de9eaee8a32c Mon Sep 17 00:00:00 2001 From: BoySanic Date: Sat, 23 Aug 2025 14:15:03 -0700 Subject: [PATCH 15/29] clippy and sqlx prepare --- ...9fb9160b3c33f2fe845feeaa6ed3a006332cc.json | 70 +++++++++++ ...69652b09907984c43d76917f998965f2fd19c.json | 119 ------------------ ...462cd94a548cd83fbc39fb826ced7c0bce9f1.json | 28 +++++ ...3d931776fdddf26a5584efd6d085d96df034b.json | 15 +++ server/src/routes/validate_submit.rs | 11 +- 5 files changed, 116 insertions(+), 127 deletions(-) create mode 100644 server/.sqlx/query-179a66ba470325232cccfbff5eb9fb9160b3c33f2fe845feeaa6ed3a006332cc.json delete mode 100644 server/.sqlx/query-8f124cd8df4921e78a1922f739269652b09907984c43d76917f998965f2fd19c.json create mode 100644 server/.sqlx/query-bf6f3f4b26b32f1f446cf0fd67c462cd94a548cd83fbc39fb826ced7c0bce9f1.json create mode 100644 server/.sqlx/query-d46b213d56f4d6f72c0821cdae23d931776fdddf26a5584efd6d085d96df034b.json diff --git a/server/.sqlx/query-179a66ba470325232cccfbff5eb9fb9160b3c33f2fe845feeaa6ed3a006332cc.json b/server/.sqlx/query-179a66ba470325232cccfbff5eb9fb9160b3c33f2fe845feeaa6ed3a006332cc.json new file mode 100644 index 0000000..a29d2b4 --- /dev/null +++ b/server/.sqlx/query-179a66ba470325232cccfbff5eb9fb9160b3c33f2fe845feeaa6ed3a006332cc.json @@ -0,0 +1,70 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n t.*\n FROM\n tasks t\n JOIN\n assignments a on t.id = a.task_id\n WHERE\n a.id = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 2, + "name": "deadline", + "type_info": "Interval" + }, + { + "ordinal": 3, + "name": "project_id", + "type_info": "Int8" + }, + { + "ordinal": 4, + "name": "stdin", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "assignments_needed", + "type_info": "Int4" + }, + { + "ordinal": 6, + "name": "assignment_user_ids", + "type_info": "Int8Array" + }, + { + "ordinal": 7, + "name": "canonical_result_id", + "type_info": "Int8" + }, + { + "ordinal": 8, + "name": "quorum", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + true, + false + ] + }, + "hash": "179a66ba470325232cccfbff5eb9fb9160b3c33f2fe845feeaa6ed3a006332cc" +} diff --git a/server/.sqlx/query-8f124cd8df4921e78a1922f739269652b09907984c43d76917f998965f2fd19c.json b/server/.sqlx/query-8f124cd8df4921e78a1922f739269652b09907984c43d76917f998965f2fd19c.json deleted file mode 100644 index 157d305..0000000 --- a/server/.sqlx/query-8f124cd8df4921e78a1922f739269652b09907984c43d76917f998965f2fd19c.json +++ /dev/null @@ -1,119 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n *\n FROM\n tasks t\n JOIN\n assignments a on t.id = a.task_id\n WHERE\n a.id = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "id", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "created_at", - "type_info": "Timestamptz" - }, - { - "ordinal": 2, - "name": "deadline", - "type_info": "Interval" - }, - { - "ordinal": 3, - "name": "project_id", - "type_info": "Int8" - }, - { - "ordinal": 4, - "name": "stdin", - "type_info": "Text" - }, - { - "ordinal": 5, - "name": "assignments_needed", - "type_info": "Int4" - }, - { - "ordinal": 6, - "name": "assignment_user_ids", - "type_info": "Int8Array" - }, - { - "ordinal": 7, - "name": "canonical_result_id", - "type_info": "Int8" - }, - { - "ordinal": 8, - "name": "quorum", - "type_info": "Int4" - }, - { - "ordinal": 9, - "name": "id", - "type_info": "Int8" - }, - { - "ordinal": 10, - "name": "created_at", - "type_info": "Timestamptz" - }, - { - "ordinal": 11, - "name": "deadline_at", - "type_info": "Timestamptz" - }, - { - "ordinal": 12, - "name": "task_id", - "type_info": "Int8" - }, - { - "ordinal": 13, - "name": "user_id", - "type_info": "Int8" - }, - { - "ordinal": 14, - "name": "state", - "type_info": { - "Custom": { - "name": "assignment_state", - "kind": { - "Enum": [ - "init", - "canceled", - "expired", - "submitted", - "error" - ] - } - } - } - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false, - false, - false, - false, - false, - false, - false, - true, - false, - false, - false, - false, - false, - false, - false - ] - }, - "hash": "8f124cd8df4921e78a1922f739269652b09907984c43d76917f998965f2fd19c" -} diff --git a/server/.sqlx/query-bf6f3f4b26b32f1f446cf0fd67c462cd94a548cd83fbc39fb826ced7c0bce9f1.json b/server/.sqlx/query-bf6f3f4b26b32f1f446cf0fd67c462cd94a548cd83fbc39fb826ced7c0bce9f1.json new file mode 100644 index 0000000..f98d975 --- /dev/null +++ b/server/.sqlx/query-bf6f3f4b26b32f1f446cf0fd67c462cd94a548cd83fbc39fb826ced7c0bce9f1.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE \n results\n SET \n state = $1\n WHERE\n id = ANY($2)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + { + "Custom": { + "name": "result_state", + "kind": { + "Enum": [ + "init", + "valid", + "invalid", + "inconclusive", + "error" + ] + } + } + }, + "Int8Array" + ] + }, + "nullable": [] + }, + "hash": "bf6f3f4b26b32f1f446cf0fd67c462cd94a548cd83fbc39fb826ced7c0bce9f1" +} diff --git a/server/.sqlx/query-d46b213d56f4d6f72c0821cdae23d931776fdddf26a5584efd6d085d96df034b.json b/server/.sqlx/query-d46b213d56f4d6f72c0821cdae23d931776fdddf26a5584efd6d085d96df034b.json new file mode 100644 index 0000000..c2c777a --- /dev/null +++ b/server/.sqlx/query-d46b213d56f4d6f72c0821cdae23d931776fdddf26a5584efd6d085d96df034b.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE\n tasks\n SET\n assignments_needed = $1\n WHERE\n id = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "d46b213d56f4d6f72c0821cdae23d931776fdddf26a5584efd6d085d96df034b" +} diff --git a/server/src/routes/validate_submit.rs b/server/src/routes/validate_submit.rs index 1a0dad3..4d25bd0 100644 --- a/server/src/routes/validate_submit.rs +++ b/server/src/routes/validate_submit.rs @@ -116,7 +116,7 @@ pub async fn validate_submit( // Add assignment id and group id to new HashMap which filters out errored results results_by_group_id .entry(g) - .or_insert_with(Vec::new) + .or_default() .push(result_id); group_ids.insert(g); group_id_by_result.insert(result_id, g); @@ -228,10 +228,7 @@ pub async fn validate_submit( // Find largest group for task let mut group_id_count: HashMap, i32> = HashMap::new(); for gr in &group_results { - match gr.group_result_id { - Some(gr_id) => *group_id_count.entry(gr_id).or_insert(0) += 1, - None => {} - } + if let Some(gr_id) = gr.group_result_id { *group_id_count.entry(gr_id).or_insert(0) += 1 } } let inconclusive_and_error_size: i32 = group_results .iter() @@ -240,9 +237,7 @@ pub async fn validate_submit( }) .count() as i32; - let largest_group_size: i32 = group_id_count - .iter() - .map(|(_, count)| *count) + let largest_group_size: i32 = group_id_count.values().copied() .max() .unwrap_or(0); // Set assignments_needed From 58e42b2b5ecf5266fd9d4b3430fa2d03328f6e88 Mon Sep 17 00:00:00 2001 From: BoySanic Date: Sat, 23 Aug 2025 14:17:12 -0700 Subject: [PATCH 16/29] Re-add TraceLayer thing --- server/src/main.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/main.rs b/server/src/main.rs index 7cba180..1aa9526 100644 --- a/server/src/main.rs +++ b/server/src/main.rs @@ -60,6 +60,7 @@ async fn serve_task(state: AppState, address: String) { .route("/submit_result/{id}", post(routes::submit_result)) .route("/validate_fetch/{id}", get(routes::validate_fetch)) .route("/validate_submit", post(routes::validate_submit)) + .layer(TraceLayer::new_for_http()) .with_state(state); let listener = TcpListener::bind(address).await.unwrap(); From 6f7db7ec62243b3f43e9e13e17ec4878512488d7 Mon Sep 17 00:00:00 2001 From: BoySanic Date: Sat, 23 Aug 2025 14:19:56 -0700 Subject: [PATCH 17/29] Cargo clippy and sqlx prepare --- ...c93a01edb9ea3cd06c2c1189c29703b35f4894d7521.json | 6 ------ ...86bf86af3351a6629a942b732ccf6740b2e683eedae.json | 4 +--- ...b22dddc4842facf875aa48e0b63003bd4f8e043597b.json | 6 ------ server/src/routes/validate_submit.rs | 13 +++++-------- 4 files changed, 6 insertions(+), 23 deletions(-) diff --git a/server/.sqlx/query-110699ecb02de724e7b96c93a01edb9ea3cd06c2c1189c29703b35f4894d7521.json b/server/.sqlx/query-110699ecb02de724e7b96c93a01edb9ea3cd06c2c1189c29703b35f4894d7521.json index 16e3dd1..1dccf66 100644 --- a/server/.sqlx/query-110699ecb02de724e7b96c93a01edb9ea3cd06c2c1189c29703b35f4894d7521.json +++ b/server/.sqlx/query-110699ecb02de724e7b96c93a01edb9ea3cd06c2c1189c29703b35f4894d7521.json @@ -17,18 +17,12 @@ "ordinal": 2, "name": "name", "type_info": "Text" - }, - { - "ordinal": 3, - "name": "tester_archive_url", - "type_info": "Text" } ], "parameters": { "Left": [] }, "nullable": [ - false, false, false, false diff --git a/server/.sqlx/query-1260e6fd3c1f30f651d1f86bf86af3351a6629a942b732ccf6740b2e683eedae.json b/server/.sqlx/query-1260e6fd3c1f30f651d1f86bf86af3351a6629a942b732ccf6740b2e683eedae.json index 4e370e3..f64dee6 100644 --- a/server/.sqlx/query-1260e6fd3c1f30f651d1f86bf86af3351a6629a942b732ccf6740b2e683eedae.json +++ b/server/.sqlx/query-1260e6fd3c1f30f651d1f86bf86af3351a6629a942b732ccf6740b2e683eedae.json @@ -40,9 +40,7 @@ "canceled", "expired", "submitted", - "valid", - "invalid", - "inconclusive" + "error" ] } } diff --git a/server/.sqlx/query-d4273a3142cd2ace08d03b22dddc4842facf875aa48e0b63003bd4f8e043597b.json b/server/.sqlx/query-d4273a3142cd2ace08d03b22dddc4842facf875aa48e0b63003bd4f8e043597b.json index 5bd55d1..ca6ed71 100644 --- a/server/.sqlx/query-d4273a3142cd2ace08d03b22dddc4842facf875aa48e0b63003bd4f8e043597b.json +++ b/server/.sqlx/query-d4273a3142cd2ace08d03b22dddc4842facf875aa48e0b63003bd4f8e043597b.json @@ -17,11 +17,6 @@ "ordinal": 2, "name": "name", "type_info": "Text" - }, - { - "ordinal": 3, - "name": "tester_archive_url", - "type_info": "Text" } ], "parameters": { @@ -30,7 +25,6 @@ ] }, "nullable": [ - false, false, false, false diff --git a/server/src/routes/validate_submit.rs b/server/src/routes/validate_submit.rs index 4d25bd0..b6f2f98 100644 --- a/server/src/routes/validate_submit.rs +++ b/server/src/routes/validate_submit.rs @@ -114,10 +114,7 @@ pub async fn validate_submit( // Add group id for that assignment to group_ids // Add assignment id to assignment_ids // Add assignment id and group id to new HashMap which filters out errored results - results_by_group_id - .entry(g) - .or_default() - .push(result_id); + results_by_group_id.entry(g).or_default().push(result_id); group_ids.insert(g); group_id_by_result.insert(result_id, g); } @@ -228,7 +225,9 @@ pub async fn validate_submit( // Find largest group for task let mut group_id_count: HashMap, i32> = HashMap::new(); for gr in &group_results { - if let Some(gr_id) = gr.group_result_id { *group_id_count.entry(gr_id).or_insert(0) += 1 } + if let Some(gr_id) = gr.group_result_id { + *group_id_count.entry(gr_id).or_insert(0) += 1 + } } let inconclusive_and_error_size: i32 = group_results .iter() @@ -237,9 +236,7 @@ pub async fn validate_submit( }) .count() as i32; - let largest_group_size: i32 = group_id_count.values().copied() - .max() - .unwrap_or(0); + let largest_group_size: i32 = group_id_count.values().copied().max().unwrap_or(0); // Set assignments_needed sqlx::query_unchecked!( r#" From c686f8dfb8dde25e6a28b4f628a60bc20a604dd5 Mon Sep 17 00:00:00 2001 From: BoySanic Date: Sat, 23 Aug 2025 14:22:15 -0700 Subject: [PATCH 18/29] Cargo fmt --- common/src/records/result.rs | 2 +- common/src/requests/validate_submit_request.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/common/src/records/result.rs b/common/src/records/result.rs index f37ee33..3d497dc 100644 --- a/common/src/records/result.rs +++ b/common/src/records/result.rs @@ -14,7 +14,7 @@ pub struct Result { pub stderr: String, pub exit_code: Option, pub group_result_id: Option>, - pub state: ResultState + pub state: ResultState, } #[non_exhaustive] diff --git a/common/src/requests/validate_submit_request.rs b/common/src/requests/validate_submit_request.rs index 8f6645b..0603a3e 100644 --- a/common/src/requests/validate_submit_request.rs +++ b/common/src/requests/validate_submit_request.rs @@ -2,7 +2,7 @@ use std::collections::HashMap; use serde::{Deserialize, Serialize}; -use crate::{records::{Result}, types::Id}; +use crate::{records::Result, types::Id}; #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ValidateSubmitRequest { From 093abd98576d8bf7ad78405e3d6d39d759fb858b Mon Sep 17 00:00:00 2001 From: BoySanic Date: Sat, 23 Aug 2025 14:24:31 -0700 Subject: [PATCH 19/29] sqlx prepare --- ...724e7b96c93a01edb9ea3cd06c2c1189c29703b35f4894d7521.json | 6 ++++++ ...ace08d03b22dddc4842facf875aa48e0b63003bd4f8e043597b.json | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/server/.sqlx/query-110699ecb02de724e7b96c93a01edb9ea3cd06c2c1189c29703b35f4894d7521.json b/server/.sqlx/query-110699ecb02de724e7b96c93a01edb9ea3cd06c2c1189c29703b35f4894d7521.json index 1dccf66..16e3dd1 100644 --- a/server/.sqlx/query-110699ecb02de724e7b96c93a01edb9ea3cd06c2c1189c29703b35f4894d7521.json +++ b/server/.sqlx/query-110699ecb02de724e7b96c93a01edb9ea3cd06c2c1189c29703b35f4894d7521.json @@ -17,12 +17,18 @@ "ordinal": 2, "name": "name", "type_info": "Text" + }, + { + "ordinal": 3, + "name": "tester_archive_url", + "type_info": "Text" } ], "parameters": { "Left": [] }, "nullable": [ + false, false, false, false diff --git a/server/.sqlx/query-d4273a3142cd2ace08d03b22dddc4842facf875aa48e0b63003bd4f8e043597b.json b/server/.sqlx/query-d4273a3142cd2ace08d03b22dddc4842facf875aa48e0b63003bd4f8e043597b.json index ca6ed71..5bd55d1 100644 --- a/server/.sqlx/query-d4273a3142cd2ace08d03b22dddc4842facf875aa48e0b63003bd4f8e043597b.json +++ b/server/.sqlx/query-d4273a3142cd2ace08d03b22dddc4842facf875aa48e0b63003bd4f8e043597b.json @@ -17,6 +17,11 @@ "ordinal": 2, "name": "name", "type_info": "Text" + }, + { + "ordinal": 3, + "name": "tester_archive_url", + "type_info": "Text" } ], "parameters": { @@ -25,6 +30,7 @@ ] }, "nullable": [ + false, false, false, false From 5173c89d6e6c14ead92b582948c5dc2062cb1fcb Mon Sep 17 00:00:00 2001 From: BoySanic Date: Sun, 30 Nov 2025 21:54:15 -0800 Subject: [PATCH 20/29] finally some progress --- common/src/errors/validate_submit_err.rs | 24 +- ...413c5de92cbebeca249d519a43074862bc98e.json | 14 + ...c17c1c2cfc8193fb0b6aa9a5d17baf9e5c6d1.json | 65 --- ...bb0f4e8bba048af02db76a8e3ed403a2e0c5f.json | 77 ---- ...105d29e17356e756167e11b129d2b93bcf09.json} | 6 +- ...4c8f9c35818097734bd5ef90fe507cad77786.json | 15 + ...1c7c7b237943843b376a581d725c5accaeaf9.json | 15 - ...75a630db700a2adf7a8a61426105f5128f491.json | 15 + ...7b1cac2040f70a4973f13f892fcc7592cbf83.json | 15 - ...3d931776fdddf26a5584efd6d085d96df034b.json | 15 - ...6ba929e36d7c2de7931437acc6779cfae528.json} | 6 +- ...0dbd31ba71d4b7d938f3435ac964fffa9c17f.json | 15 + server/src/routes/validate_submit.rs | 391 ++++++++++-------- 13 files changed, 302 insertions(+), 371 deletions(-) create mode 100644 server/.sqlx/query-0addfba67e093d0471b35ff5647413c5de92cbebeca249d519a43074862bc98e.json delete mode 100644 server/.sqlx/query-12856967f604c24930149e34844c17c1c2cfc8193fb0b6aa9a5d17baf9e5c6d1.json delete mode 100644 server/.sqlx/query-13b2878a958300c53e042824690bb0f4e8bba048af02db76a8e3ed403a2e0c5f.json rename server/.sqlx/{query-179a66ba470325232cccfbff5eb9fb9160b3c33f2fe845feeaa6ed3a006332cc.json => query-167feb9a08a6cded140b1f2b38e9105d29e17356e756167e11b129d2b93bcf09.json} (82%) create mode 100644 server/.sqlx/query-420f3df90dfb4025ef667bfac5d4c8f9c35818097734bd5ef90fe507cad77786.json delete mode 100644 server/.sqlx/query-428b183a56111200d7a00176e861c7c7b237943843b376a581d725c5accaeaf9.json create mode 100644 server/.sqlx/query-62f56598373964d2870469ac45775a630db700a2adf7a8a61426105f5128f491.json delete mode 100644 server/.sqlx/query-84d0361eddf39635747ad29e4cc7b1cac2040f70a4973f13f892fcc7592cbf83.json delete mode 100644 server/.sqlx/query-d46b213d56f4d6f72c0821cdae23d931776fdddf26a5584efd6d085d96df034b.json rename server/.sqlx/{query-e9abbf6cde84157e48c9c7f3307fee527847ded0826f42467a48fa81503c3d5f.json => query-eb0fd36557a9764cbc763863ffee6ba929e36d7c2de7931437acc6779cfae528.json} (75%) create mode 100644 server/.sqlx/query-fea73eec47c800d67d5a49fb5da0dbd31ba71d4b7d938f3435ac964fffa9c17f.json diff --git a/common/src/errors/validate_submit_err.rs b/common/src/errors/validate_submit_err.rs index a22b12b..42b8459 100644 --- a/common/src/errors/validate_submit_err.rs +++ b/common/src/errors/validate_submit_err.rs @@ -3,16 +3,20 @@ use thiserror::Error; #[derive(Clone, Hash, Debug, Serialize, Deserialize, Error)] pub enum ValidateSubmitError { - #[error("invalid assignment")] - InvalidAssignment, - #[error("task already validated and this result is not valid")] - InconsistentValidationState, - #[error("all results are inconclusive, and no new assignment has finished to solve it")] - ValidationImpossibleError, - #[error("validation group contained assignments belonging to multiple tasks")] - TooManyTasksValidationError, - #[error("assignments referred to by group id cannot refer to an assignment other than itself")] - ValidationGroupAssociationInconsistency, + #[error("invalid result given")] + InvalidResult, + #[error("validation group contained results belonging to multiple tasks")] + InvalidTaskCount, + #[error("results referred to by group id cannot refer to an result other than itself")] + InvalidGroupReference, #[error("state transition forbidden")] StateTransitionForbidden, + #[error( + "cannot attempt validation without all results relevant to choosing the canonical result" + )] + MissingResults, + #[error( + "validator must choose the earliest group_result_id by created_at date to use for the group" + )] + NondeterministicGroup, } diff --git a/server/.sqlx/query-0addfba67e093d0471b35ff5647413c5de92cbebeca249d519a43074862bc98e.json b/server/.sqlx/query-0addfba67e093d0471b35ff5647413c5de92cbebeca249d519a43074862bc98e.json new file mode 100644 index 0000000..df083c3 --- /dev/null +++ b/server/.sqlx/query-0addfba67e093d0471b35ff5647413c5de92cbebeca249d519a43074862bc98e.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE\n results\n SET\n state = 'inconclusive'\n WHERE\n id = ANY($1)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8Array" + ] + }, + "nullable": [] + }, + "hash": "0addfba67e093d0471b35ff5647413c5de92cbebeca249d519a43074862bc98e" +} diff --git a/server/.sqlx/query-12856967f604c24930149e34844c17c1c2cfc8193fb0b6aa9a5d17baf9e5c6d1.json b/server/.sqlx/query-12856967f604c24930149e34844c17c1c2cfc8193fb0b6aa9a5d17baf9e5c6d1.json deleted file mode 100644 index 3a34da8..0000000 --- a/server/.sqlx/query-12856967f604c24930149e34844c17c1c2cfc8193fb0b6aa9a5d17baf9e5c6d1.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n *\n FROM\n assignments a\n WHERE\n id = ANY($1)\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "id", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "created_at", - "type_info": "Timestamptz" - }, - { - "ordinal": 2, - "name": "deadline_at", - "type_info": "Timestamptz" - }, - { - "ordinal": 3, - "name": "task_id", - "type_info": "Int8" - }, - { - "ordinal": 4, - "name": "user_id", - "type_info": "Int8" - }, - { - "ordinal": 5, - "name": "state", - "type_info": { - "Custom": { - "name": "assignment_state", - "kind": { - "Enum": [ - "init", - "canceled", - "expired", - "submitted", - "error" - ] - } - } - } - } - ], - "parameters": { - "Left": [ - "Int8Array" - ] - }, - "nullable": [ - false, - false, - false, - false, - false, - false - ] - }, - "hash": "12856967f604c24930149e34844c17c1c2cfc8193fb0b6aa9a5d17baf9e5c6d1" -} diff --git a/server/.sqlx/query-13b2878a958300c53e042824690bb0f4e8bba048af02db76a8e3ed403a2e0c5f.json b/server/.sqlx/query-13b2878a958300c53e042824690bb0f4e8bba048af02db76a8e3ed403a2e0c5f.json deleted file mode 100644 index c665f25..0000000 --- a/server/.sqlx/query-13b2878a958300c53e042824690bb0f4e8bba048af02db76a8e3ed403a2e0c5f.json +++ /dev/null @@ -1,77 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n *\n FROM\n results\n WHERE\n group_result_id = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "id", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "created_at", - "type_info": "Timestamptz" - }, - { - "ordinal": 2, - "name": "state", - "type_info": { - "Custom": { - "name": "result_state", - "kind": { - "Enum": [ - "init", - "valid", - "invalid", - "inconclusive", - "error" - ] - } - } - } - }, - { - "ordinal": 3, - "name": "assignment_id", - "type_info": "Int8" - }, - { - "ordinal": 4, - "name": "stdout", - "type_info": "Text" - }, - { - "ordinal": 5, - "name": "stderr", - "type_info": "Text" - }, - { - "ordinal": 6, - "name": "exit_code", - "type_info": "Int4" - }, - { - "ordinal": 7, - "name": "group_result_id", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false, - false, - false, - false, - false, - false, - true, - true - ] - }, - "hash": "13b2878a958300c53e042824690bb0f4e8bba048af02db76a8e3ed403a2e0c5f" -} diff --git a/server/.sqlx/query-179a66ba470325232cccfbff5eb9fb9160b3c33f2fe845feeaa6ed3a006332cc.json b/server/.sqlx/query-167feb9a08a6cded140b1f2b38e9105d29e17356e756167e11b129d2b93bcf09.json similarity index 82% rename from server/.sqlx/query-179a66ba470325232cccfbff5eb9fb9160b3c33f2fe845feeaa6ed3a006332cc.json rename to server/.sqlx/query-167feb9a08a6cded140b1f2b38e9105d29e17356e756167e11b129d2b93bcf09.json index a29d2b4..31600a3 100644 --- a/server/.sqlx/query-179a66ba470325232cccfbff5eb9fb9160b3c33f2fe845feeaa6ed3a006332cc.json +++ b/server/.sqlx/query-167feb9a08a6cded140b1f2b38e9105d29e17356e756167e11b129d2b93bcf09.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n t.*\n FROM\n tasks t\n JOIN\n assignments a on t.id = a.task_id\n WHERE\n a.id = $1\n ", + "query": "\n SELECT\n t.*\n FROM\n tasks t\n JOIN\n assignments a\n ON\n a.task_id = t.id\n WHERE\n a.id = ANY($1)\n ", "describe": { "columns": [ { @@ -51,7 +51,7 @@ ], "parameters": { "Left": [ - "Int8" + "Int8Array" ] }, "nullable": [ @@ -66,5 +66,5 @@ false ] }, - "hash": "179a66ba470325232cccfbff5eb9fb9160b3c33f2fe845feeaa6ed3a006332cc" + "hash": "167feb9a08a6cded140b1f2b38e9105d29e17356e756167e11b129d2b93bcf09" } diff --git a/server/.sqlx/query-420f3df90dfb4025ef667bfac5d4c8f9c35818097734bd5ef90fe507cad77786.json b/server/.sqlx/query-420f3df90dfb4025ef667bfac5d4c8f9c35818097734bd5ef90fe507cad77786.json new file mode 100644 index 0000000..d8cdaf8 --- /dev/null +++ b/server/.sqlx/query-420f3df90dfb4025ef667bfac5d4c8f9c35818097734bd5ef90fe507cad77786.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE\n results\n SET\n group_result_id = $1\n WHERE\n id = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "420f3df90dfb4025ef667bfac5d4c8f9c35818097734bd5ef90fe507cad77786" +} diff --git a/server/.sqlx/query-428b183a56111200d7a00176e861c7c7b237943843b376a581d725c5accaeaf9.json b/server/.sqlx/query-428b183a56111200d7a00176e861c7c7b237943843b376a581d725c5accaeaf9.json deleted file mode 100644 index 7ae9fac..0000000 --- a/server/.sqlx/query-428b183a56111200d7a00176e861c7c7b237943843b376a581d725c5accaeaf9.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE \n results\n SET \n group_result_id = $1\n WHERE \n id = ANY($2)\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8Array" - ] - }, - "nullable": [] - }, - "hash": "428b183a56111200d7a00176e861c7c7b237943843b376a581d725c5accaeaf9" -} diff --git a/server/.sqlx/query-62f56598373964d2870469ac45775a630db700a2adf7a8a61426105f5128f491.json b/server/.sqlx/query-62f56598373964d2870469ac45775a630db700a2adf7a8a61426105f5128f491.json new file mode 100644 index 0000000..8d97e1b --- /dev/null +++ b/server/.sqlx/query-62f56598373964d2870469ac45775a630db700a2adf7a8a61426105f5128f491.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE\n tasks\n SET\n assignments_needed = $1\n WHERE\n id = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "62f56598373964d2870469ac45775a630db700a2adf7a8a61426105f5128f491" +} diff --git a/server/.sqlx/query-84d0361eddf39635747ad29e4cc7b1cac2040f70a4973f13f892fcc7592cbf83.json b/server/.sqlx/query-84d0361eddf39635747ad29e4cc7b1cac2040f70a4973f13f892fcc7592cbf83.json deleted file mode 100644 index 00d297e..0000000 --- a/server/.sqlx/query-84d0361eddf39635747ad29e4cc7b1cac2040f70a4973f13f892fcc7592cbf83.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE \n tasks\n SET \n canonical_result_id = $1\n WHERE \n id = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "84d0361eddf39635747ad29e4cc7b1cac2040f70a4973f13f892fcc7592cbf83" -} diff --git a/server/.sqlx/query-d46b213d56f4d6f72c0821cdae23d931776fdddf26a5584efd6d085d96df034b.json b/server/.sqlx/query-d46b213d56f4d6f72c0821cdae23d931776fdddf26a5584efd6d085d96df034b.json deleted file mode 100644 index c2c777a..0000000 --- a/server/.sqlx/query-d46b213d56f4d6f72c0821cdae23d931776fdddf26a5584efd6d085d96df034b.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE\n tasks\n SET\n assignments_needed = $1\n WHERE\n id = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int4", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "d46b213d56f4d6f72c0821cdae23d931776fdddf26a5584efd6d085d96df034b" -} diff --git a/server/.sqlx/query-e9abbf6cde84157e48c9c7f3307fee527847ded0826f42467a48fa81503c3d5f.json b/server/.sqlx/query-eb0fd36557a9764cbc763863ffee6ba929e36d7c2de7931437acc6779cfae528.json similarity index 75% rename from server/.sqlx/query-e9abbf6cde84157e48c9c7f3307fee527847ded0826f42467a48fa81503c3d5f.json rename to server/.sqlx/query-eb0fd36557a9764cbc763863ffee6ba929e36d7c2de7931437acc6779cfae528.json index cd2e97a..c9b997d 100644 --- a/server/.sqlx/query-e9abbf6cde84157e48c9c7f3307fee527847ded0826f42467a48fa81503c3d5f.json +++ b/server/.sqlx/query-eb0fd36557a9764cbc763863ffee6ba929e36d7c2de7931437acc6779cfae528.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n *\n FROM\n results\n WHERE\n assignment_id = ANY($1)\n ", + "query": "\n SELECT\n r.*\n FROM\n results r\n JOIN\n assignments a\n ON\n a.id = r.assignment_id\n WHERE\n a.task_id = $1\n AND\n r.created_at < $2\n AND\n r.id != ALL($3)\n ", "describe": { "columns": [ { @@ -59,6 +59,8 @@ ], "parameters": { "Left": [ + "Int8", + "Timestamptz", "Int8Array" ] }, @@ -73,5 +75,5 @@ true ] }, - "hash": "e9abbf6cde84157e48c9c7f3307fee527847ded0826f42467a48fa81503c3d5f" + "hash": "eb0fd36557a9764cbc763863ffee6ba929e36d7c2de7931437acc6779cfae528" } diff --git a/server/.sqlx/query-fea73eec47c800d67d5a49fb5da0dbd31ba71d4b7d938f3435ac964fffa9c17f.json b/server/.sqlx/query-fea73eec47c800d67d5a49fb5da0dbd31ba71d4b7d938f3435ac964fffa9c17f.json new file mode 100644 index 0000000..6a22411 --- /dev/null +++ b/server/.sqlx/query-fea73eec47c800d67d5a49fb5da0dbd31ba71d4b7d938f3435ac964fffa9c17f.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE\n results\n SET\n state = CASE\n WHEN group_result_id = $1 THEN 'valid'::result_state\n ELSE 'invalid'::result_state\n END\n WHERE\n id = ANY($2)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8Array" + ] + }, + "nullable": [] + }, + "hash": "fea73eec47c800d67d5a49fb5da0dbd31ba71d4b7d938f3435ac964fffa9c17f" +} diff --git a/server/src/routes/validate_submit.rs b/server/src/routes/validate_submit.rs index b6f2f98..503ccc1 100644 --- a/server/src/routes/validate_submit.rs +++ b/server/src/routes/validate_submit.rs @@ -5,6 +5,7 @@ use clusterizer_common::{ requests::ValidateSubmitRequest, types::{Id, ResultState}, }; +use sqlx::types::chrono::{DateTime, Utc}; use std::collections::{HashMap, HashSet}; @@ -19,26 +20,68 @@ pub async fn validate_submit( Json(request): Json, ) -> AppResult<(), ValidateSubmitError> { /* - check that there exists no results in the db submitted before the latest given assignment id, this is a subtle issue i just thought of that i think we have not discussed before. - find the valid group, if there is one, the valid group is the group that meets quorum with the earliest submitted result id - if there is no valid group: - 8.1. set all assignments that are in any group to 'inconclusive' - 8.2. set assignments_needed to the number of 'inconclusive' and 'error' results plus quorum minus the size of the largest group (i think this formula is correct but unsure, we discussed it before as well but don't wanna search for the message rn) - if there is a valid group: - 9.1. set all assignments in that group to 'valid' and all assignments in other groups to 'invalid' - 9.2. set the canonical result id to the earliest result in the group - import to note that when talking about groups, we should always also consider groups that were already in the db - not just the groups that the validator just submitted + given_results = fetch all results of given result_ids + if given_results.len() != result_ids.len(): + error + if the state of any of given_results is not 'init': + error + + tasks = fetch all tasks of given_results + + if tasks.len() != 1: + error + + last_date = max(given_results submitted date) + + previously_given_results = fetch all results submitted before last_date and not in given_results + + if the state of any of previously_given_results is 'init': + error + + if the group id of the result with the same id as any given group id is not equal to that same group id: + error + + error_results = [] + + for result, group_id in given_results: + if group_id is not None: + update group id of result + result.group_id = group_id + else: + error_results.append(result) + result.state = 'error' + + update state of error_results + + all_results = given_results + previously_given_results + + results_by_group_id = you know how to make this + + valid_group_id = group_id of the group with at least quorum reuslts that has the earliest submitted result + + if valid_group_id is None: + set result state of all results in results_by_group_id to 'inconslusive' + update assignments_needed + else: + UPDATE + results + SET + state = (group_id = $1 ? 'valid' : 'invalid') + WHERE + id = ANY($2) + + # for group_id, group_result_ids in results_by_group_id: + # if group_id == valid_group_id: + # set result state to 'valid' + # else: + # set result state to 'invalid' */ - let mut group_ids: HashSet> = HashSet::new(); - let result_ids: Vec> = request.results.keys().cloned().collect(); - let mut group_id_by_result: HashMap, Id> = HashMap::new(); - let mut results_by_group_id: HashMap, Vec>> = HashMap::new(); + let result_ids: Vec<_> = request.results.keys().cloned().collect(); - let results = sqlx::query_as_unchecked!( + let given_results: Vec = sqlx::query_as_unchecked!( Result, r#" SELECT @@ -48,17 +91,25 @@ pub async fn validate_submit( WHERE id = ANY($1) "#, - &result_ids + result_ids ) .fetch_all(&state.pool) .await?; - // Ensure all assignments are real - if result_ids.len() != results.len() { - Err(AppError::Specific(ValidateSubmitError::InvalidAssignment))? + + if given_results.len() != request.results.len() { + Err(AppError::Specific(ValidateSubmitError::InvalidResult))? } - // Ensure all assignments are for the same task - let task = sqlx::query_as_unchecked!( + if given_results + .iter() + .any(|result| result.state != ResultState::Init) + { + Err(AppError::Specific( + ValidateSubmitError::StateTransitionForbidden, + ))? + } + + let given_tasks: Vec = sqlx::query_as_unchecked!( Task, r#" SELECT @@ -66,59 +117,110 @@ pub async fn validate_submit( FROM tasks t JOIN - assignments a on t.id = a.task_id + assignments a + ON + a.task_id = t.id WHERE - a.id = $1 + a.id = ANY($1) "#, - &results[0].assignment_id + given_results.iter().map(|result| result.assignment_id).collect::>() ) - .fetch_one(&state.pool) + .fetch_all(&state.pool) .await?; - let assignment_ids: Vec> = - results.iter().map(|result| result.assignment_id).collect(); - let assignments: Vec = sqlx::query_as_unchecked!( - Assignment, + if given_tasks.len() != 1 { + Err(AppError::Specific(ValidateSubmitError::InvalidTaskCount))? + } + + let task = &given_tasks[0]; + let last_given_date = given_results + .clone() + .iter() + .map(|result| result.created_at) + .max() + .expect("There will be a created_at because of the db schema"); + + let previously_given_results: Vec = sqlx::query_as_unchecked!( + Result, r#" SELECT - * + r.* FROM + results r + JOIN assignments a + ON + a.id = r.assignment_id WHERE - id = ANY($1) + a.task_id = $1 + AND + r.created_at < $2 + AND + r.id != ALL($3) "#, - &assignment_ids - ) - .fetch_all(&state.pool) // or fetch_one, depending on your use + task.id, + last_given_date, + given_results.clone().into_iter().map(|result| result.id).collect::>() + ).fetch_all(&state.pool) .await?; - if assignments.iter().any(|ass| ass.task_id != task.id) { - Err(AppError::Specific( - ValidateSubmitError::TooManyTasksValidationError, - ))? - } - // Disallow state transitions via validation unless the assignment is in the Submitted state - if results - .iter() - .any(|result| result.state != ResultState::Init) + // if the state of any of previously_given_results is 'init': + if previously_given_results + .iter() + .any(|result| result.state == ResultState::Init) { Err(AppError::Specific( ValidateSubmitError::StateTransitionForbidden, ))? } + let mut all_given_results = previously_given_results.clone(); + all_given_results.extend(given_results.clone()); + + let mut results_by_group_id: HashMap, Vec>> = HashMap::new(); + // Build results_by_group_id + for (result_id, group_id) in &request.results { + if let Some(gid) = group_id { + results_by_group_id + .entry(*gid) + .or_insert_with(Vec::new) + .push(*result_id); + } + } + + for (group_id, results) in &results_by_group_id { + if group_id != results.iter().map(|result| result).min().expect("group cannot be empty") { + Err(AppError::Specific(ValidateSubmitError::NondeterministicGroup))? + } + } + + let mut results_by_result_id: HashMap, Result> = HashMap::new(); + for (result) in all_given_results.clone() { + results_by_result_id.insert(result.id, result); + } - // Set assignments to Error if they do not have a group_id (aka result_id) for (result_id, group_id) in request.results { match group_id { - Some(g) => { - // Add group id for that assignment to group_ids - // Add assignment id to assignment_ids - // Add assignment id and group id to new HashMap which filters out errored results - results_by_group_id.entry(g).or_default().push(result_id); - group_ids.insert(g); - group_id_by_result.insert(result_id, g); + Some(gid) => { + // Validation successful, update group_result_id both locally and in db + results_by_result_id.get_mut(&result_id).unwrap().group_result_id = Some(gid.clone()); + sqlx::query_unchecked!( + r#" + UPDATE + results + SET + group_result_id = $1 + WHERE + id = $2 + "#, + Some(gid.clone()), + result_id + ) + .execute(&state.pool) + .await?; } None => { + // Validation unsuccessful, update state to error + results_by_result_id.get_mut(&result_id).unwrap().state = ResultState::Error; set_result_state(&[result_id], ResultState::Error) .execute(&state.pool) .await?; @@ -126,133 +228,84 @@ pub async fn validate_submit( } } - for (group_id, group_result_ids) in results_by_group_id { - let group_db_results: Vec = sqlx::query_as_unchecked!( - Result, - r#" - SELECT - * - FROM - results - WHERE - group_result_id = $1 - "#, - &group_id - ) - .fetch_all(&state.pool) - .await?; - let mut group_results = sqlx::query_as_unchecked!( - Result, - r#" - SELECT - * - FROM - results - WHERE - assignment_id = ANY($1) - "#, - &group_result_ids - ) - .fetch_all(&state.pool) - .await?; - group_results.extend(group_db_results); - let group_result_ids: Vec> = - group_results.iter().map(|result| result.id).collect(); - - // Earliest submitted result within the group, db or fresh validator data - let group_canonical_result = group_results - .iter() - .min_by_key(|result| result.created_at) - .expect("These are all known to be real already"); - // Set validator-provided results to the same group id + let valid_group_id = results_by_group_id + .iter() + .filter(|(_, results)| results.len() as i32 >= task.quorum) + .map(|(&group_id, results)| { + let earliest = results + .iter() + .map(|r| results_by_result_id[r].created_at) + .min() + .expect("groups are never empty"); + (group_id, earliest) + }) + .min_by_key(|&(_, earliest)| earliest) + .map(|(group_id, _)| group_id); + + if valid_group_id == None { sqlx::query_unchecked!( - r#" - UPDATE - results - SET - group_result_id = $1 - WHERE - id = ANY($2) - "#, - group_canonical_result.id, - &result_ids + r#" + UPDATE + results + SET + state = 'inconclusive' + WHERE + id = ANY($1) + "#, + all_given_results.clone().into_iter().map(|result| result.id).collect::>() ) .execute(&state.pool) .await?; - - // Check if we have quorum - if (group_results.len() as i32) >= task.quorum { - // Met quorum - // This should also catch the case that the db results + new results = quorum or higher since we combine them in an earlier step - match task.canonical_result_id { - Some(_) => {} - None => { - // Set canonical result - sqlx::query_unchecked!( - r#" - UPDATE - tasks - SET - canonical_result_id = $1 - WHERE - id = $2 - "#, - group_canonical_result.id, - task.id - ) - .execute(&state.pool) - .await?; - } + + let mut group_counts: HashMap, i32> = HashMap::new(); + for mut result in all_given_results { + if(given_results.clone().into_iter().any(|g_r| g_r.id == result.id)) { + result.state = ResultState::Inconclusive; } - // Set to valid - set_result_state(&group_result_ids, ResultState::Valid) - .execute(&state.pool) - .await?; - // Invalidate other groups for this task - } else if let Some(canonical_result_id) = task.canonical_result_id - && group_id != canonical_result_id - { - // Invalid - set_result_state(&group_result_ids, ResultState::Invalid) - .execute(&state.pool) - .await?; - } else { - // Inconclusive - set_result_state(&group_result_ids, ResultState::Inconclusive) - .execute(&state.pool) - .await?; - // Find largest group for task - let mut group_id_count: HashMap, i32> = HashMap::new(); - for gr in &group_results { - if let Some(gr_id) = gr.group_result_id { - *group_id_count.entry(gr_id).or_insert(0) += 1 + if(result.state == ResultState::Inconclusive){ + match result.group_result_id { + Some(group_id) => { + *group_counts.entry(group_id).or_insert(0) += 1; + }, + None => {} } } - let inconclusive_and_error_size: i32 = group_results - .iter() - .filter(|result| { - result.state == ResultState::Inconclusive || result.state == ResultState::Error - }) - .count() as i32; - - let largest_group_size: i32 = group_id_count.values().copied().max().unwrap_or(0); - // Set assignments_needed - sqlx::query_unchecked!( - r#" - UPDATE - tasks - SET - assignments_needed = $1 - WHERE - id = $2 - "#, - task.quorum - largest_group_size + inconclusive_and_error_size, - task.id - ) - .execute(&state.pool) - .await?; } + + let largest_inconclusive_group_count = group_counts.into_iter().map(|(_, count)| count).max().expect("It will exist"); + sqlx::query_unchecked!( + r#" + UPDATE + tasks + SET + assignments_needed = $1 + WHERE + id = $2 + "#, + task.quorum - largest_inconclusive_group_count, + task.id + ) + .execute(&state.pool) + .await?; + } else { + sqlx::query_unchecked!( + r#" + UPDATE + results + SET + state = CASE + WHEN group_result_id = $1 THEN 'valid'::result_state + ELSE 'invalid'::result_state + END + WHERE + id = ANY($2) + "#, + valid_group_id, + all_given_results.into_iter().map(|result| result.id).collect::>() + ) + .execute(&state.pool) + .await?; } Ok(()) } From bda4d53a3d3e56633c38f14a3689ac11c9a72c00 Mon Sep 17 00:00:00 2001 From: BoySanic Date: Sun, 30 Nov 2025 21:54:40 -0800 Subject: [PATCH 21/29] formatting/clippy --- server/src/routes/validate_submit.rs | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/server/src/routes/validate_submit.rs b/server/src/routes/validate_submit.rs index 503ccc1..314f43b 100644 --- a/server/src/routes/validate_submit.rs +++ b/server/src/routes/validate_submit.rs @@ -1,13 +1,12 @@ use axum::{Json, extract::State}; use clusterizer_common::{ errors::ValidateSubmitError, - records::{Assignment, Result, Task}, + records::{Result, Task}, requests::ValidateSubmitRequest, types::{Id, ResultState}, }; -use sqlx::types::chrono::{DateTime, Utc}; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use crate::{ result::{AppError, AppResult}, @@ -182,19 +181,19 @@ pub async fn validate_submit( if let Some(gid) = group_id { results_by_group_id .entry(*gid) - .or_insert_with(Vec::new) + .or_default() .push(*result_id); } } for (group_id, results) in &results_by_group_id { - if group_id != results.iter().map(|result| result).min().expect("group cannot be empty") { + if group_id != results.iter().min().expect("group cannot be empty") { Err(AppError::Specific(ValidateSubmitError::NondeterministicGroup))? } } let mut results_by_result_id: HashMap, Result> = HashMap::new(); - for (result) in all_given_results.clone() { + for result in all_given_results.clone() { results_by_result_id.insert(result.id, result); } @@ -202,7 +201,7 @@ pub async fn validate_submit( match group_id { Some(gid) => { // Validation successful, update group_result_id both locally and in db - results_by_result_id.get_mut(&result_id).unwrap().group_result_id = Some(gid.clone()); + results_by_result_id.get_mut(&result_id).unwrap().group_result_id = Some(gid); sqlx::query_unchecked!( r#" UPDATE @@ -243,7 +242,7 @@ pub async fn validate_submit( .min_by_key(|&(_, earliest)| earliest) .map(|(group_id, _)| group_id); - if valid_group_id == None { + if valid_group_id.is_none() { sqlx::query_unchecked!( r#" UPDATE @@ -260,20 +259,17 @@ pub async fn validate_submit( let mut group_counts: HashMap, i32> = HashMap::new(); for mut result in all_given_results { - if(given_results.clone().into_iter().any(|g_r| g_r.id == result.id)) { + if given_results.clone().into_iter().any(|g_r| g_r.id == result.id) { result.state = ResultState::Inconclusive; } - if(result.state == ResultState::Inconclusive){ - match result.group_result_id { - Some(group_id) => { - *group_counts.entry(group_id).or_insert(0) += 1; - }, - None => {} + if result.state == ResultState::Inconclusive { + if let Some(group_id) = result.group_result_id { + *group_counts.entry(group_id).or_insert(0) += 1; } } } - let largest_inconclusive_group_count = group_counts.into_iter().map(|(_, count)| count).max().expect("It will exist"); + let largest_inconclusive_group_count = group_counts.into_values().max().expect("It will exist"); sqlx::query_unchecked!( r#" UPDATE From 41cd060b4eda1a9cc9094dd81c256d84884b323a Mon Sep 17 00:00:00 2001 From: BoySanic Date: Mon, 1 Dec 2025 11:50:20 -0800 Subject: [PATCH 22/29] Implement fix --- server/src/routes/validate_submit.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/server/src/routes/validate_submit.rs b/server/src/routes/validate_submit.rs index 314f43b..1c1eef9 100644 --- a/server/src/routes/validate_submit.rs +++ b/server/src/routes/validate_submit.rs @@ -262,10 +262,8 @@ pub async fn validate_submit( if given_results.clone().into_iter().any(|g_r| g_r.id == result.id) { result.state = ResultState::Inconclusive; } - if result.state == ResultState::Inconclusive { - if let Some(group_id) = result.group_result_id { + if result.state == ResultState::Inconclusive && let Some(group_id) = result.group_result_id { *group_counts.entry(group_id).or_insert(0) += 1; - } } } From d027e01919f45089566d47ff47c09a24c8710692 Mon Sep 17 00:00:00 2001 From: BoySanic Date: Mon, 1 Dec 2025 14:39:47 -0800 Subject: [PATCH 23/29] Some fixes --- common/src/errors/validate_submit_err.rs | 10 +--------- server/example.env | 3 +++ server/migrations/20250426220809_init.sql | 8 +++++--- 3 files changed, 9 insertions(+), 12 deletions(-) create mode 100644 server/example.env diff --git a/common/src/errors/validate_submit_err.rs b/common/src/errors/validate_submit_err.rs index 42b8459..7b761d3 100644 --- a/common/src/errors/validate_submit_err.rs +++ b/common/src/errors/validate_submit_err.rs @@ -8,15 +8,7 @@ pub enum ValidateSubmitError { #[error("validation group contained results belonging to multiple tasks")] InvalidTaskCount, #[error("results referred to by group id cannot refer to an result other than itself")] - InvalidGroupReference, + NondeterministicGroup, #[error("state transition forbidden")] StateTransitionForbidden, - #[error( - "cannot attempt validation without all results relevant to choosing the canonical result" - )] - MissingResults, - #[error( - "validator must choose the earliest group_result_id by created_at date to use for the group" - )] - NondeterministicGroup, } diff --git a/server/example.env b/server/example.env new file mode 100644 index 0000000..93752c0 --- /dev/null +++ b/server/example.env @@ -0,0 +1,3 @@ +DATABASE_URL=postgres://postgres@localhost/clusterizer +CLUSTERIZER_SECRET=balls +CLUSTERIZER_ADDRESS=0.0.0.0:3000 \ No newline at end of file diff --git a/server/migrations/20250426220809_init.sql b/server/migrations/20250426220809_init.sql index abc8af6..f8d877b 100644 --- a/server/migrations/20250426220809_init.sql +++ b/server/migrations/20250426220809_init.sql @@ -31,8 +31,6 @@ CREATE TABLE project_versions ( archive_url text NOT NULL ); - - CREATE TYPE assignment_state AS ENUM ( 'init', 'canceled', @@ -48,6 +46,7 @@ CREATE TYPE result_state AS ENUM ( 'inconclusive', 'error' ); + CREATE TABLE tasks ( id int8 GENERATED ALWAYS AS IDENTITY NOT NULL PRIMARY KEY, created_at timestamptz NOT NULL DEFAULT now(), @@ -56,9 +55,10 @@ CREATE TABLE tasks ( stdin text NOT NULL, assignments_needed int4 NOT NULL, assignment_user_ids int8[] NOT NULL DEFAULT ARRAY[]::int8[], - canonical_result_id int8, + canonical_result_id int8 REFERENCES results(id), quorum int4 NOT NULL ); + CREATE TABLE assignments ( id int8 GENERATED ALWAYS AS IDENTITY NOT NULL PRIMARY KEY, created_at timestamptz NOT NULL DEFAULT now(), @@ -82,8 +82,10 @@ CREATE TABLE results ( exit_code int4, group_result_id int8 REFERENCES results(id) ON DELETE RESTRICT ON UPDATE RESTRICT ); + ALTER TABLE tasks ADD FOREIGN KEY (canonical_result_id) REFERENCES results(id) ON DELETE RESTRICT ON UPDATE RESTRICT; + CREATE FUNCTION trigger_function_tasks_remove_assignment_user_id() RETURNS TRIGGER AS $$ BEGIN From d073a9212881619ab090ae4d92d5ba195be13461 Mon Sep 17 00:00:00 2001 From: BoySanic Date: Mon, 1 Dec 2025 14:43:53 -0800 Subject: [PATCH 24/29] Rename state transition error, other fixes --- common/src/errors/validate_submit_err.rs | 2 +- server/src/routes/validate_fetch.rs | 7 +++---- server/src/routes/validate_submit.rs | 4 ++-- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/common/src/errors/validate_submit_err.rs b/common/src/errors/validate_submit_err.rs index 7b761d3..607bcac 100644 --- a/common/src/errors/validate_submit_err.rs +++ b/common/src/errors/validate_submit_err.rs @@ -10,5 +10,5 @@ pub enum ValidateSubmitError { #[error("results referred to by group id cannot refer to an result other than itself")] NondeterministicGroup, #[error("state transition forbidden")] - StateTransitionForbidden, + ForbiddenStateTransition, } diff --git a/server/src/routes/validate_fetch.rs b/server/src/routes/validate_fetch.rs index d640743..9313cbf 100644 --- a/server/src/routes/validate_fetch.rs +++ b/server/src/routes/validate_fetch.rs @@ -48,10 +48,9 @@ pub async fn validate_fetch( t.id HAVING t.project_id = $1 - AND ( - count(a.id) >= t.assignments_needed - OR t.canonical_result_id IS NOT NULL - ) + AND + count(a.id) >= t.assignments_needed + "#, project.id ) diff --git a/server/src/routes/validate_submit.rs b/server/src/routes/validate_submit.rs index 1c1eef9..4b41ef4 100644 --- a/server/src/routes/validate_submit.rs +++ b/server/src/routes/validate_submit.rs @@ -104,7 +104,7 @@ pub async fn validate_submit( .any(|result| result.state != ResultState::Init) { Err(AppError::Specific( - ValidateSubmitError::StateTransitionForbidden, + ValidateSubmitError::ForbiddenStateTransition, ))? } @@ -169,7 +169,7 @@ pub async fn validate_submit( .any(|result| result.state == ResultState::Init) { Err(AppError::Specific( - ValidateSubmitError::StateTransitionForbidden, + ValidateSubmitError::ForbiddenStateTransition, ))? } let mut all_given_results = previously_given_results.clone(); From f7662c8d0db773e038b01589ed8340773650fe72 Mon Sep 17 00:00:00 2001 From: BoySanic Date: Mon, 1 Dec 2025 14:46:23 -0800 Subject: [PATCH 25/29] Change sql query --- server/src/routes/validate_fetch.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/routes/validate_fetch.rs b/server/src/routes/validate_fetch.rs index 9313cbf..8c8f536 100644 --- a/server/src/routes/validate_fetch.rs +++ b/server/src/routes/validate_fetch.rs @@ -43,7 +43,7 @@ pub async fn validate_fetch( JOIN assignments a ON a.task_id = t.id WHERE - a.state not in ('canceled', 'init', 'expired') + a.state = 'submitted' GROUP BY t.id HAVING From a56e47d3bd676675ec0ec05129fc7cd3c127f212 Mon Sep 17 00:00:00 2001 From: BoySanic Date: Mon, 1 Dec 2025 14:59:18 -0800 Subject: [PATCH 26/29] Fix sql stuff --- ...a454ff7e7027985c67ce20eadfc6affb26e814af406c2a64f9d2.json} | 4 ++-- server/migrations/20250426220809_init.sql | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) rename server/.sqlx/{query-9ee75dbb5012a5310b8af01a05b6bf6e50cca4edd9b9c2b475201cc208720a2d.json => query-0e3b84e9511aa454ff7e7027985c67ce20eadfc6affb26e814af406c2a64f9d2.json} (76%) diff --git a/server/.sqlx/query-9ee75dbb5012a5310b8af01a05b6bf6e50cca4edd9b9c2b475201cc208720a2d.json b/server/.sqlx/query-0e3b84e9511aa454ff7e7027985c67ce20eadfc6affb26e814af406c2a64f9d2.json similarity index 76% rename from server/.sqlx/query-9ee75dbb5012a5310b8af01a05b6bf6e50cca4edd9b9c2b475201cc208720a2d.json rename to server/.sqlx/query-0e3b84e9511aa454ff7e7027985c67ce20eadfc6affb26e814af406c2a64f9d2.json index bc9e7e2..d1e4d0f 100644 --- a/server/.sqlx/query-9ee75dbb5012a5310b8af01a05b6bf6e50cca4edd9b9c2b475201cc208720a2d.json +++ b/server/.sqlx/query-0e3b84e9511aa454ff7e7027985c67ce20eadfc6affb26e814af406c2a64f9d2.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n t.*\n FROM\n tasks t\n JOIN assignments a ON\n a.task_id = t.id\n WHERE\n a.state not in ('canceled', 'init', 'expired')\n GROUP BY\n t.id\n HAVING\n t.project_id = $1\n AND (\n count(a.id) >= t.assignments_needed\n OR t.canonical_result_id IS NOT NULL\n )\n ", + "query": "\n SELECT\n t.*\n FROM\n tasks t\n JOIN assignments a ON\n a.task_id = t.id\n WHERE\n a.state = 'submitted'\n GROUP BY\n t.id\n HAVING\n t.project_id = $1\n AND\n count(a.id) >= t.assignments_needed\n \n ", "describe": { "columns": [ { @@ -66,5 +66,5 @@ false ] }, - "hash": "9ee75dbb5012a5310b8af01a05b6bf6e50cca4edd9b9c2b475201cc208720a2d" + "hash": "0e3b84e9511aa454ff7e7027985c67ce20eadfc6affb26e814af406c2a64f9d2" } diff --git a/server/migrations/20250426220809_init.sql b/server/migrations/20250426220809_init.sql index f8d877b..d5facda 100644 --- a/server/migrations/20250426220809_init.sql +++ b/server/migrations/20250426220809_init.sql @@ -55,7 +55,7 @@ CREATE TABLE tasks ( stdin text NOT NULL, assignments_needed int4 NOT NULL, assignment_user_ids int8[] NOT NULL DEFAULT ARRAY[]::int8[], - canonical_result_id int8 REFERENCES results(id), + canonical_result_id int8, quorum int4 NOT NULL ); From 3a70ceb913b19b415711e451e9fd8995697a8aaa Mon Sep 17 00:00:00 2001 From: BoySanic Date: Mon, 1 Dec 2025 15:05:11 -0800 Subject: [PATCH 27/29] Fix format --- server/src/routes/validate_submit.rs | 70 +++++++++++++++++++--------- 1 file changed, 48 insertions(+), 22 deletions(-) diff --git a/server/src/routes/validate_submit.rs b/server/src/routes/validate_submit.rs index 4b41ef4..66001c3 100644 --- a/server/src/routes/validate_submit.rs +++ b/server/src/routes/validate_submit.rs @@ -122,7 +122,10 @@ pub async fn validate_submit( WHERE a.id = ANY($1) "#, - given_results.iter().map(|result| result.assignment_id).collect::>() + given_results + .iter() + .map(|result| result.assignment_id) + .collect::>() ) .fetch_all(&state.pool) .await?; @@ -159,14 +162,19 @@ pub async fn validate_submit( "#, task.id, last_given_date, - given_results.clone().into_iter().map(|result| result.id).collect::>() - ).fetch_all(&state.pool) + given_results + .clone() + .into_iter() + .map(|result| result.id) + .collect::>() + ) + .fetch_all(&state.pool) .await?; // if the state of any of previously_given_results is 'init': if previously_given_results - .iter() - .any(|result| result.state == ResultState::Init) + .iter() + .any(|result| result.state == ResultState::Init) { Err(AppError::Specific( ValidateSubmitError::ForbiddenStateTransition, @@ -188,7 +196,9 @@ pub async fn validate_submit( for (group_id, results) in &results_by_group_id { if group_id != results.iter().min().expect("group cannot be empty") { - Err(AppError::Specific(ValidateSubmitError::NondeterministicGroup))? + Err(AppError::Specific( + ValidateSubmitError::NondeterministicGroup, + ))? } } @@ -201,7 +211,10 @@ pub async fn validate_submit( match group_id { Some(gid) => { // Validation successful, update group_result_id both locally and in db - results_by_result_id.get_mut(&result_id).unwrap().group_result_id = Some(gid); + results_by_result_id + .get_mut(&result_id) + .unwrap() + .group_result_id = Some(gid); sqlx::query_unchecked!( r#" UPDATE @@ -227,7 +240,6 @@ pub async fn validate_submit( } } - let valid_group_id = results_by_group_id .iter() .filter(|(_, results)| results.len() as i32 >= task.quorum) @@ -241,10 +253,10 @@ pub async fn validate_submit( }) .min_by_key(|&(_, earliest)| earliest) .map(|(group_id, _)| group_id); - + if valid_group_id.is_none() { sqlx::query_unchecked!( - r#" + r#" UPDATE results SET @@ -252,24 +264,35 @@ pub async fn validate_submit( WHERE id = ANY($1) "#, - all_given_results.clone().into_iter().map(|result| result.id).collect::>() + all_given_results + .clone() + .into_iter() + .map(|result| result.id) + .collect::>() ) .execute(&state.pool) .await?; - + let mut group_counts: HashMap, i32> = HashMap::new(); for mut result in all_given_results { - if given_results.clone().into_iter().any(|g_r| g_r.id == result.id) { + if given_results + .clone() + .into_iter() + .any(|g_r| g_r.id == result.id) + { result.state = ResultState::Inconclusive; } - if result.state == ResultState::Inconclusive && let Some(group_id) = result.group_result_id { - *group_counts.entry(group_id).or_insert(0) += 1; + if result.state == ResultState::Inconclusive + && let Some(group_id) = result.group_result_id + { + *group_counts.entry(group_id).or_insert(0) += 1; } } - let largest_inconclusive_group_count = group_counts.into_values().max().expect("It will exist"); + let largest_inconclusive_group_count = + group_counts.into_values().max().expect("It will exist"); sqlx::query_unchecked!( - r#" + r#" UPDATE tasks SET @@ -277,14 +300,14 @@ pub async fn validate_submit( WHERE id = $2 "#, - task.quorum - largest_inconclusive_group_count, - task.id + task.quorum - largest_inconclusive_group_count, + task.id ) .execute(&state.pool) .await?; } else { sqlx::query_unchecked!( - r#" + r#" UPDATE results SET @@ -295,8 +318,11 @@ pub async fn validate_submit( WHERE id = ANY($2) "#, - valid_group_id, - all_given_results.into_iter().map(|result| result.id).collect::>() + valid_group_id, + all_given_results + .into_iter() + .map(|result| result.id) + .collect::>() ) .execute(&state.pool) .await?; From fde2af75bf4e9b8ba72c1f85810970d0b75a8586 Mon Sep 17 00:00:00 2001 From: Chen Steenvoorden Date: Wed, 3 Dec 2025 03:29:47 +0100 Subject: [PATCH 28/29] Refactor --- common/src/errors/mod.rs | 4 +- common/src/errors/validate_submit_err.rs | 14 - common/src/errors/validate_submit_error.rs | 16 + .../src/requests/validate_submit_request.rs | 2 +- common/src/types/assignment_state.rs | 1 - ...7b2e6b5a31199b622f28ca3692fe60eec53c9.json | 6 +- ...413c5de92cbebeca249d519a43074862bc98e.json | 14 - ...af3351a6629a942b732ccf6740b2e683eedae.json | 3 +- ...c4971bd7f593a6bcec201970fe5ae9e120e7f.json | 15 + ...b11432326d631d24b5ebaea76946aa913df1.json} | 4 +- ...4c8f9c35818097734bd5ef90fe507cad77786.json | 15 - ...fd0e1d578432b0ec081732c79d49d843abfe.json} | 6 +- ...75a630db700a2adf7a8a61426105f5128f491.json | 15 - ...636256031600e6a52fc94e57880b40e306b2b.json | 14 + ...1c9875f8443a0982e81d1538161656074183.json} | 4 +- ...c5602d215db4e5cf61484cf7b80c84b0cfe5c.json | 3 +- ...faf7ba6f659832bd727aa8faa69fe761254d1.json | 3 +- ...dad562d8eecde82db258dc183868b211f4410.json | 15 + ...2e6fd3fabcc164935906cd974dabd677ec14f.json | 15 + ...0dbd31ba71d4b7d938f3435ac964fffa9c17f.json | 15 - server/example.env | 2 +- server/migrations/20250426220809_init.sql | 31 +- server/src/routes/fetch_tasks.rs | 2 +- server/src/routes/validate_fetch.rs | 13 +- server/src/routes/validate_submit.rs | 349 +++++++----------- server/src/util/set_assignment_state.rs | 2 +- server/src/util/set_result_state.rs | 2 +- 27 files changed, 250 insertions(+), 335 deletions(-) delete mode 100644 common/src/errors/validate_submit_err.rs create mode 100644 common/src/errors/validate_submit_error.rs delete mode 100644 server/.sqlx/query-0addfba67e093d0471b35ff5647413c5de92cbebeca249d519a43074862bc98e.json create mode 100644 server/.sqlx/query-37247bf346c477389245f83ce58c4971bd7f593a6bcec201970fe5ae9e120e7f.json rename server/.sqlx/{query-0e3b84e9511aa454ff7e7027985c67ce20eadfc6affb26e814af406c2a64f9d2.json => query-37787d0e5dbb0fd034a68efbe9eeb11432326d631d24b5ebaea76946aa913df1.json} (74%) delete mode 100644 server/.sqlx/query-420f3df90dfb4025ef667bfac5d4c8f9c35818097734bd5ef90fe507cad77786.json rename server/.sqlx/{query-eb0fd36557a9764cbc763863ffee6ba929e36d7c2de7931437acc6779cfae528.json => query-5008175ade98bc9947127cbdfb3cfd0e1d578432b0ec081732c79d49d843abfe.json} (80%) delete mode 100644 server/.sqlx/query-62f56598373964d2870469ac45775a630db700a2adf7a8a61426105f5128f491.json create mode 100644 server/.sqlx/query-758af5d6a249187a308a1746d22636256031600e6a52fc94e57880b40e306b2b.json rename server/.sqlx/{query-167feb9a08a6cded140b1f2b38e9105d29e17356e756167e11b129d2b93bcf09.json => query-cd5ffd3d5d60bb7c7072db520c4d1c9875f8443a0982e81d1538161656074183.json} (84%) create mode 100644 server/.sqlx/query-dd2ccc411586e044836d4f5c9d3dad562d8eecde82db258dc183868b211f4410.json create mode 100644 server/.sqlx/query-f0f7b39962dd47d3a523564bbe52e6fd3fabcc164935906cd974dabd677ec14f.json delete mode 100644 server/.sqlx/query-fea73eec47c800d67d5a49fb5da0dbd31ba71d4b7d938f3435ac964fffa9c17f.json diff --git a/common/src/errors/mod.rs b/common/src/errors/mod.rs index 27fa704..825a820 100644 --- a/common/src/errors/mod.rs +++ b/common/src/errors/mod.rs @@ -4,7 +4,7 @@ pub mod not_found; pub mod register_error; pub mod submit_result_error; pub mod validate_fetch_error; -pub mod validate_submit_err; +pub mod validate_submit_error; pub use fetch_tasks_error::FetchTasksError; pub use infallible::Infallible; @@ -12,4 +12,4 @@ pub use not_found::NotFound; pub use register_error::RegisterError; pub use submit_result_error::SubmitResultError; pub use validate_fetch_error::ValidateFetchError; -pub use validate_submit_err::ValidateSubmitError; +pub use validate_submit_error::ValidateSubmitError; diff --git a/common/src/errors/validate_submit_err.rs b/common/src/errors/validate_submit_err.rs deleted file mode 100644 index 607bcac..0000000 --- a/common/src/errors/validate_submit_err.rs +++ /dev/null @@ -1,14 +0,0 @@ -use serde::{Deserialize, Serialize}; -use thiserror::Error; - -#[derive(Clone, Hash, Debug, Serialize, Deserialize, Error)] -pub enum ValidateSubmitError { - #[error("invalid result given")] - InvalidResult, - #[error("validation group contained results belonging to multiple tasks")] - InvalidTaskCount, - #[error("results referred to by group id cannot refer to an result other than itself")] - NondeterministicGroup, - #[error("state transition forbidden")] - ForbiddenStateTransition, -} diff --git a/common/src/errors/validate_submit_error.rs b/common/src/errors/validate_submit_error.rs new file mode 100644 index 0000000..1c1e8dd --- /dev/null +++ b/common/src/errors/validate_submit_error.rs @@ -0,0 +1,16 @@ +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +#[derive(Clone, Hash, Debug, Serialize, Deserialize, Error)] +pub enum ValidateSubmitError { + #[error("invalid result")] + InvalidResult, + #[error("expected results for exactly one task")] + InvalidTaskCount, + #[error("the group id of all results in a group must be the first submitted result")] + InconsistentGroup, + #[error("forbidden state transition")] + ForbiddenStateTransition, + #[error("missing some results for this task")] + MissingResults, +} diff --git a/common/src/requests/validate_submit_request.rs b/common/src/requests/validate_submit_request.rs index 0603a3e..9688528 100644 --- a/common/src/requests/validate_submit_request.rs +++ b/common/src/requests/validate_submit_request.rs @@ -6,6 +6,6 @@ use crate::{records::Result, types::Id}; #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ValidateSubmitRequest { - // First id is the assignment id that will change state, second is the "group id" it belongs with + // Map from result id to group id. None means error. pub results: HashMap, Option>>, } diff --git a/common/src/types/assignment_state.rs b/common/src/types/assignment_state.rs index 742bddf..3f7338c 100644 --- a/common/src/types/assignment_state.rs +++ b/common/src/types/assignment_state.rs @@ -11,5 +11,4 @@ pub enum AssignmentState { Canceled, Expired, Submitted, - Error, } diff --git a/server/.sqlx/query-0a85c57626456d79f6a57c607437b2e6b5a31199b622f28ca3692fe60eec53c9.json b/server/.sqlx/query-0a85c57626456d79f6a57c607437b2e6b5a31199b622f28ca3692fe60eec53c9.json index 2ad6a3a..ee9c8a1 100644 --- a/server/.sqlx/query-0a85c57626456d79f6a57c607437b2e6b5a31199b622f28ca3692fe60eec53c9.json +++ b/server/.sqlx/query-0a85c57626456d79f6a57c607437b2e6b5a31199b622f28ca3692fe60eec53c9.json @@ -39,8 +39,7 @@ "init", "canceled", "expired", - "submitted", - "error" + "submitted" ] } } @@ -59,8 +58,7 @@ "init", "canceled", "expired", - "submitted", - "error" + "submitted" ] } } diff --git a/server/.sqlx/query-0addfba67e093d0471b35ff5647413c5de92cbebeca249d519a43074862bc98e.json b/server/.sqlx/query-0addfba67e093d0471b35ff5647413c5de92cbebeca249d519a43074862bc98e.json deleted file mode 100644 index df083c3..0000000 --- a/server/.sqlx/query-0addfba67e093d0471b35ff5647413c5de92cbebeca249d519a43074862bc98e.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE\n results\n SET\n state = 'inconclusive'\n WHERE\n id = ANY($1)\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8Array" - ] - }, - "nullable": [] - }, - "hash": "0addfba67e093d0471b35ff5647413c5de92cbebeca249d519a43074862bc98e" -} diff --git a/server/.sqlx/query-1260e6fd3c1f30f651d1f86bf86af3351a6629a942b732ccf6740b2e683eedae.json b/server/.sqlx/query-1260e6fd3c1f30f651d1f86bf86af3351a6629a942b732ccf6740b2e683eedae.json index f64dee6..ffa6665 100644 --- a/server/.sqlx/query-1260e6fd3c1f30f651d1f86bf86af3351a6629a942b732ccf6740b2e683eedae.json +++ b/server/.sqlx/query-1260e6fd3c1f30f651d1f86bf86af3351a6629a942b732ccf6740b2e683eedae.json @@ -39,8 +39,7 @@ "init", "canceled", "expired", - "submitted", - "error" + "submitted" ] } } diff --git a/server/.sqlx/query-37247bf346c477389245f83ce58c4971bd7f593a6bcec201970fe5ae9e120e7f.json b/server/.sqlx/query-37247bf346c477389245f83ce58c4971bd7f593a6bcec201970fe5ae9e120e7f.json new file mode 100644 index 0000000..8f8b328 --- /dev/null +++ b/server/.sqlx/query-37247bf346c477389245f83ce58c4971bd7f593a6bcec201970fe5ae9e120e7f.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE\n results\n SET\n state = CASE\n WHEN group_result_id = $1 THEN 'valid'::result_state\n ELSE 'invalid'::result_state\n END\n WHERE\n id = ANY($2)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8Array" + ] + }, + "nullable": [] + }, + "hash": "37247bf346c477389245f83ce58c4971bd7f593a6bcec201970fe5ae9e120e7f" +} diff --git a/server/.sqlx/query-0e3b84e9511aa454ff7e7027985c67ce20eadfc6affb26e814af406c2a64f9d2.json b/server/.sqlx/query-37787d0e5dbb0fd034a68efbe9eeb11432326d631d24b5ebaea76946aa913df1.json similarity index 74% rename from server/.sqlx/query-0e3b84e9511aa454ff7e7027985c67ce20eadfc6affb26e814af406c2a64f9d2.json rename to server/.sqlx/query-37787d0e5dbb0fd034a68efbe9eeb11432326d631d24b5ebaea76946aa913df1.json index d1e4d0f..d3b1b7e 100644 --- a/server/.sqlx/query-0e3b84e9511aa454ff7e7027985c67ce20eadfc6affb26e814af406c2a64f9d2.json +++ b/server/.sqlx/query-37787d0e5dbb0fd034a68efbe9eeb11432326d631d24b5ebaea76946aa913df1.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n t.*\n FROM\n tasks t\n JOIN assignments a ON\n a.task_id = t.id\n WHERE\n a.state = 'submitted'\n GROUP BY\n t.id\n HAVING\n t.project_id = $1\n AND\n count(a.id) >= t.assignments_needed\n \n ", + "query": "\n SELECT\n t.*\n FROM\n tasks t\n JOIN assignments a ON\n a.task_id = t.id\n LEFT JOIN results r ON\n r.assignment_id = a.id\n AND r.state = 'init'\n WHERE\n a.state = 'submitted'\n GROUP BY\n t.id\n HAVING\n t.project_id = $1\n AND count(a.id) >= t.assignments_needed\n AND count(r.id) > 0\n ", "describe": { "columns": [ { @@ -66,5 +66,5 @@ false ] }, - "hash": "0e3b84e9511aa454ff7e7027985c67ce20eadfc6affb26e814af406c2a64f9d2" + "hash": "37787d0e5dbb0fd034a68efbe9eeb11432326d631d24b5ebaea76946aa913df1" } diff --git a/server/.sqlx/query-420f3df90dfb4025ef667bfac5d4c8f9c35818097734bd5ef90fe507cad77786.json b/server/.sqlx/query-420f3df90dfb4025ef667bfac5d4c8f9c35818097734bd5ef90fe507cad77786.json deleted file mode 100644 index d8cdaf8..0000000 --- a/server/.sqlx/query-420f3df90dfb4025ef667bfac5d4c8f9c35818097734bd5ef90fe507cad77786.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE\n results\n SET\n group_result_id = $1\n WHERE\n id = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "420f3df90dfb4025ef667bfac5d4c8f9c35818097734bd5ef90fe507cad77786" -} diff --git a/server/.sqlx/query-eb0fd36557a9764cbc763863ffee6ba929e36d7c2de7931437acc6779cfae528.json b/server/.sqlx/query-5008175ade98bc9947127cbdfb3cfd0e1d578432b0ec081732c79d49d843abfe.json similarity index 80% rename from server/.sqlx/query-eb0fd36557a9764cbc763863ffee6ba929e36d7c2de7931437acc6779cfae528.json rename to server/.sqlx/query-5008175ade98bc9947127cbdfb3cfd0e1d578432b0ec081732c79d49d843abfe.json index c9b997d..d052858 100644 --- a/server/.sqlx/query-eb0fd36557a9764cbc763863ffee6ba929e36d7c2de7931437acc6779cfae528.json +++ b/server/.sqlx/query-5008175ade98bc9947127cbdfb3cfd0e1d578432b0ec081732c79d49d843abfe.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n r.*\n FROM\n results r\n JOIN\n assignments a\n ON\n a.id = r.assignment_id\n WHERE\n a.task_id = $1\n AND\n r.created_at < $2\n AND\n r.id != ALL($3)\n ", + "query": "\n SELECT\n r.*\n FROM\n results r\n JOIN assignments a ON\n a.id = r.assignment_id\n WHERE\n a.task_id = $1\n AND r.id < $2\n AND r.id != ALL($3)\n ", "describe": { "columns": [ { @@ -60,7 +60,7 @@ "parameters": { "Left": [ "Int8", - "Timestamptz", + "Int8", "Int8Array" ] }, @@ -75,5 +75,5 @@ true ] }, - "hash": "eb0fd36557a9764cbc763863ffee6ba929e36d7c2de7931437acc6779cfae528" + "hash": "5008175ade98bc9947127cbdfb3cfd0e1d578432b0ec081732c79d49d843abfe" } diff --git a/server/.sqlx/query-62f56598373964d2870469ac45775a630db700a2adf7a8a61426105f5128f491.json b/server/.sqlx/query-62f56598373964d2870469ac45775a630db700a2adf7a8a61426105f5128f491.json deleted file mode 100644 index 8d97e1b..0000000 --- a/server/.sqlx/query-62f56598373964d2870469ac45775a630db700a2adf7a8a61426105f5128f491.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE\n tasks\n SET\n assignments_needed = $1\n WHERE\n id = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int4", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "62f56598373964d2870469ac45775a630db700a2adf7a8a61426105f5128f491" -} diff --git a/server/.sqlx/query-758af5d6a249187a308a1746d22636256031600e6a52fc94e57880b40e306b2b.json b/server/.sqlx/query-758af5d6a249187a308a1746d22636256031600e6a52fc94e57880b40e306b2b.json new file mode 100644 index 0000000..4facc00 --- /dev/null +++ b/server/.sqlx/query-758af5d6a249187a308a1746d22636256031600e6a52fc94e57880b40e306b2b.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE\n results\n SET\n state = 'inconclusive'\n WHERE\n id = ANY($1)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8Array" + ] + }, + "nullable": [] + }, + "hash": "758af5d6a249187a308a1746d22636256031600e6a52fc94e57880b40e306b2b" +} diff --git a/server/.sqlx/query-167feb9a08a6cded140b1f2b38e9105d29e17356e756167e11b129d2b93bcf09.json b/server/.sqlx/query-cd5ffd3d5d60bb7c7072db520c4d1c9875f8443a0982e81d1538161656074183.json similarity index 84% rename from server/.sqlx/query-167feb9a08a6cded140b1f2b38e9105d29e17356e756167e11b129d2b93bcf09.json rename to server/.sqlx/query-cd5ffd3d5d60bb7c7072db520c4d1c9875f8443a0982e81d1538161656074183.json index 31600a3..9ea0108 100644 --- a/server/.sqlx/query-167feb9a08a6cded140b1f2b38e9105d29e17356e756167e11b129d2b93bcf09.json +++ b/server/.sqlx/query-cd5ffd3d5d60bb7c7072db520c4d1c9875f8443a0982e81d1538161656074183.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n t.*\n FROM\n tasks t\n JOIN\n assignments a\n ON\n a.task_id = t.id\n WHERE\n a.id = ANY($1)\n ", + "query": "\n SELECT\n t.*\n FROM\n tasks t\n JOIN assignments a ON\n a.task_id = t.id\n WHERE\n a.id = ANY($1)\n ", "describe": { "columns": [ { @@ -66,5 +66,5 @@ false ] }, - "hash": "167feb9a08a6cded140b1f2b38e9105d29e17356e756167e11b129d2b93bcf09" + "hash": "cd5ffd3d5d60bb7c7072db520c4d1c9875f8443a0982e81d1538161656074183" } diff --git a/server/.sqlx/query-ce2921487afc54738ea394ab248c5602d215db4e5cf61484cf7b80c84b0cfe5c.json b/server/.sqlx/query-ce2921487afc54738ea394ab248c5602d215db4e5cf61484cf7b80c84b0cfe5c.json index 3c00628..e351a95 100644 --- a/server/.sqlx/query-ce2921487afc54738ea394ab248c5602d215db4e5cf61484cf7b80c84b0cfe5c.json +++ b/server/.sqlx/query-ce2921487afc54738ea394ab248c5602d215db4e5cf61484cf7b80c84b0cfe5c.json @@ -39,8 +39,7 @@ "init", "canceled", "expired", - "submitted", - "error" + "submitted" ] } } diff --git a/server/.sqlx/query-d3f83d3bf9b010cdf4a5c8c65b2faf7ba6f659832bd727aa8faa69fe761254d1.json b/server/.sqlx/query-d3f83d3bf9b010cdf4a5c8c65b2faf7ba6f659832bd727aa8faa69fe761254d1.json index 4ab1e03..78957cc 100644 --- a/server/.sqlx/query-d3f83d3bf9b010cdf4a5c8c65b2faf7ba6f659832bd727aa8faa69fe761254d1.json +++ b/server/.sqlx/query-d3f83d3bf9b010cdf4a5c8c65b2faf7ba6f659832bd727aa8faa69fe761254d1.json @@ -13,8 +13,7 @@ "init", "canceled", "expired", - "submitted", - "error" + "submitted" ] } } diff --git a/server/.sqlx/query-dd2ccc411586e044836d4f5c9d3dad562d8eecde82db258dc183868b211f4410.json b/server/.sqlx/query-dd2ccc411586e044836d4f5c9d3dad562d8eecde82db258dc183868b211f4410.json new file mode 100644 index 0000000..4a4102a --- /dev/null +++ b/server/.sqlx/query-dd2ccc411586e044836d4f5c9d3dad562d8eecde82db258dc183868b211f4410.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE\n results\n SET\n group_result_id = $1\n WHERE\n id = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "dd2ccc411586e044836d4f5c9d3dad562d8eecde82db258dc183868b211f4410" +} diff --git a/server/.sqlx/query-f0f7b39962dd47d3a523564bbe52e6fd3fabcc164935906cd974dabd677ec14f.json b/server/.sqlx/query-f0f7b39962dd47d3a523564bbe52e6fd3fabcc164935906cd974dabd677ec14f.json new file mode 100644 index 0000000..5b8d773 --- /dev/null +++ b/server/.sqlx/query-f0f7b39962dd47d3a523564bbe52e6fd3fabcc164935906cd974dabd677ec14f.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE\n tasks\n SET\n assignments_needed = $1\n WHERE\n id = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "f0f7b39962dd47d3a523564bbe52e6fd3fabcc164935906cd974dabd677ec14f" +} diff --git a/server/.sqlx/query-fea73eec47c800d67d5a49fb5da0dbd31ba71d4b7d938f3435ac964fffa9c17f.json b/server/.sqlx/query-fea73eec47c800d67d5a49fb5da0dbd31ba71d4b7d938f3435ac964fffa9c17f.json deleted file mode 100644 index 6a22411..0000000 --- a/server/.sqlx/query-fea73eec47c800d67d5a49fb5da0dbd31ba71d4b7d938f3435ac964fffa9c17f.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE\n results\n SET\n state = CASE\n WHEN group_result_id = $1 THEN 'valid'::result_state\n ELSE 'invalid'::result_state\n END\n WHERE\n id = ANY($2)\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8Array" - ] - }, - "nullable": [] - }, - "hash": "fea73eec47c800d67d5a49fb5da0dbd31ba71d4b7d938f3435ac964fffa9c17f" -} diff --git a/server/example.env b/server/example.env index 93752c0..6fe42cd 100644 --- a/server/example.env +++ b/server/example.env @@ -1,3 +1,3 @@ DATABASE_URL=postgres://postgres@localhost/clusterizer CLUSTERIZER_SECRET=balls -CLUSTERIZER_ADDRESS=0.0.0.0:3000 \ No newline at end of file +CLUSTERIZER_ADDRESS=0.0.0.0:3000 diff --git a/server/migrations/20250426220809_init.sql b/server/migrations/20250426220809_init.sql index d5facda..2ef73e5 100644 --- a/server/migrations/20250426220809_init.sql +++ b/server/migrations/20250426220809_init.sql @@ -31,22 +31,6 @@ CREATE TABLE project_versions ( archive_url text NOT NULL ); -CREATE TYPE assignment_state AS ENUM ( - 'init', - 'canceled', - 'expired', - 'submitted', - 'error' -); - -CREATE TYPE result_state AS ENUM ( - 'init', - 'valid', - 'invalid', - 'inconclusive', - 'error' -); - CREATE TABLE tasks ( id int8 GENERATED ALWAYS AS IDENTITY NOT NULL PRIMARY KEY, created_at timestamptz NOT NULL DEFAULT now(), @@ -59,6 +43,13 @@ CREATE TABLE tasks ( quorum int4 NOT NULL ); +CREATE TYPE assignment_state AS ENUM ( + 'init', + 'canceled', + 'expired', + 'submitted' +); + CREATE TABLE assignments ( id int8 GENERATED ALWAYS AS IDENTITY NOT NULL PRIMARY KEY, created_at timestamptz NOT NULL DEFAULT now(), @@ -72,6 +63,14 @@ CREATE UNIQUE INDEX assignments_task_id_user_id_key ON assignments (task_id, user_id) WHERE state != 'canceled' AND state != 'expired'; +CREATE TYPE result_state AS ENUM ( + 'init', + 'valid', + 'invalid', + 'inconclusive', + 'error' +); + CREATE TABLE results ( id int8 GENERATED ALWAYS AS IDENTITY NOT NULL PRIMARY KEY, created_at timestamptz NOT NULL DEFAULT now(), diff --git a/server/src/routes/fetch_tasks.rs b/server/src/routes/fetch_tasks.rs index d75b28f..03d346e 100644 --- a/server/src/routes/fetch_tasks.rs +++ b/server/src/routes/fetch_tasks.rs @@ -28,7 +28,7 @@ pub async fn fetch_tasks( WHERE id = ANY($1) "#, - request.project_ids + request.project_ids, ) .fetch_all(&mut *tx) .await?; diff --git a/server/src/routes/validate_fetch.rs b/server/src/routes/validate_fetch.rs index 8c8f536..dd99c0a 100644 --- a/server/src/routes/validate_fetch.rs +++ b/server/src/routes/validate_fetch.rs @@ -27,7 +27,7 @@ pub async fn validate_fetch( WHERE id = $1 "#, - project_id + project_id, ) .fetch_one(&state.pool) .await @@ -42,19 +42,22 @@ pub async fn validate_fetch( tasks t JOIN assignments a ON a.task_id = t.id + LEFT JOIN results r ON + r.assignment_id = a.id + AND r.state = 'init' WHERE a.state = 'submitted' GROUP BY t.id HAVING t.project_id = $1 - AND - count(a.id) >= t.assignments_needed - + AND count(a.id) >= t.assignments_needed + AND count(r.id) > 0 "#, - project.id + project.id, ) .fetch_all(&state.pool) .await?; + Ok(Json(tasks)) } diff --git a/server/src/routes/validate_submit.rs b/server/src/routes/validate_submit.rs index 66001c3..bf9d260 100644 --- a/server/src/routes/validate_submit.rs +++ b/server/src/routes/validate_submit.rs @@ -3,7 +3,7 @@ use clusterizer_common::{ errors::ValidateSubmitError, records::{Result, Task}, requests::ValidateSubmitRequest, - types::{Id, ResultState}, + types::ResultState, }; use std::collections::HashMap; @@ -18,69 +18,10 @@ pub async fn validate_submit( State(state): State, Json(request): Json, ) -> AppResult<(), ValidateSubmitError> { - /* - given_results = fetch all results of given result_ids + // Fetch results from the request. + let result_ids: Vec<_> = request.results.keys().collect(); - if given_results.len() != result_ids.len(): - error - - if the state of any of given_results is not 'init': - error - - tasks = fetch all tasks of given_results - - if tasks.len() != 1: - error - - last_date = max(given_results submitted date) - - previously_given_results = fetch all results submitted before last_date and not in given_results - - if the state of any of previously_given_results is 'init': - error - - if the group id of the result with the same id as any given group id is not equal to that same group id: - error - - error_results = [] - - for result, group_id in given_results: - if group_id is not None: - update group id of result - result.group_id = group_id - else: - error_results.append(result) - result.state = 'error' - - update state of error_results - - all_results = given_results + previously_given_results - - results_by_group_id = you know how to make this - - valid_group_id = group_id of the group with at least quorum reuslts that has the earliest submitted result - - if valid_group_id is None: - set result state of all results in results_by_group_id to 'inconslusive' - update assignments_needed - else: - UPDATE - results - SET - state = (group_id = $1 ? 'valid' : 'invalid') - WHERE - id = ANY($2) - - # for group_id, group_result_ids in results_by_group_id: - # if group_id == valid_group_id: - # set result state to 'valid' - # else: - # set result state to 'invalid' - - */ - let result_ids: Vec<_> = request.results.keys().cloned().collect(); - - let given_results: Vec = sqlx::query_as_unchecked!( + let results = sqlx::query_as_unchecked!( Result, r#" SELECT @@ -95,11 +36,13 @@ pub async fn validate_submit( .fetch_all(&state.pool) .await?; - if given_results.len() != request.results.len() { + // Check all result ids were valid. + if results.len() != request.results.len() { Err(AppError::Specific(ValidateSubmitError::InvalidResult))? } - if given_results + // Check all results have the 'init' state. + if results .iter() .any(|result| result.state != ResultState::Init) { @@ -108,224 +51,198 @@ pub async fn validate_submit( ))? } - let given_tasks: Vec = sqlx::query_as_unchecked!( + // Fetch tasks for the results we are going to validate. + let assignment_ids: Vec<_> = results.iter().map(|result| result.assignment_id).collect(); + + let tasks = sqlx::query_as_unchecked!( Task, r#" SELECT t.* FROM tasks t - JOIN - assignments a - ON - a.task_id = t.id + JOIN assignments a ON + a.task_id = t.id WHERE a.id = ANY($1) "#, - given_results - .iter() - .map(|result| result.assignment_id) - .collect::>() + assignment_ids, ) .fetch_all(&state.pool) .await?; - if given_tasks.len() != 1 { + // Can only validate one task at a time, for now. + if tasks.len() != 1 { Err(AppError::Specific(ValidateSubmitError::InvalidTaskCount))? } - let task = &given_tasks[0]; - let last_given_date = given_results - .clone() - .iter() - .map(|result| result.created_at) + let task = &tasks[0]; + + // Fetch the remaining results for this task. This ignores results whose id exceeds the largest + // id from the validation request, because the validator program also did not consider them. + let last_result_id = request + .results + .keys() .max() - .expect("There will be a created_at because of the db schema"); + .expect("results cannot be empty"); - let previously_given_results: Vec = sqlx::query_as_unchecked!( + let previous_results = sqlx::query_as_unchecked!( Result, r#" SELECT r.* FROM results r - JOIN - assignments a - ON - a.id = r.assignment_id + JOIN assignments a ON + a.id = r.assignment_id WHERE a.task_id = $1 - AND - r.created_at < $2 - AND - r.id != ALL($3) + AND r.id < $2 + AND r.id != ALL($3) "#, task.id, - last_given_date, - given_results - .clone() - .into_iter() - .map(|result| result.id) - .collect::>() + last_result_id, + result_ids, ) .fetch_all(&state.pool) .await?; - // if the state of any of previously_given_results is 'init': - if previously_given_results + // Check the validator didn't miss any tasks. This is needed for deterministic validation. + if previous_results .iter() .any(|result| result.state == ResultState::Init) { - Err(AppError::Specific( - ValidateSubmitError::ForbiddenStateTransition, - ))? + Err(AppError::Specific(ValidateSubmitError::MissingResults))? } - let mut all_given_results = previously_given_results.clone(); - all_given_results.extend(given_results.clone()); - let mut results_by_group_id: HashMap, Vec>> = HashMap::new(); - // Build results_by_group_id - for (result_id, group_id) in &request.results { - if let Some(gid) = group_id { - results_by_group_id - .entry(*gid) - .or_default() - .push(*result_id); + // Build groups and errored results. + let mut groups: HashMap<_, Vec<_>> = HashMap::new(); + let mut error_result_ids = Vec::new(); + + for result in &previous_results { + if let Some(group_id) = result.group_result_id { + groups.entry(group_id).or_default().push(result.id); } } - for (group_id, results) in &results_by_group_id { - if group_id != results.iter().min().expect("group cannot be empty") { - Err(AppError::Specific( - ValidateSubmitError::NondeterministicGroup, - ))? + for (&result_id, &group_id) in &request.results { + if let Some(group_id) = group_id { + groups.entry(group_id).or_default().push(result_id); + } else { + error_result_ids.push(result_id); } } - let mut results_by_result_id: HashMap, Result> = HashMap::new(); - for result in all_given_results.clone() { - results_by_result_id.insert(result.id, result); + // Check that each group id is the lowest of any result ids in the group. + for (group_id, result_ids) in &groups { + if group_id != result_ids.iter().min().expect("group cannot be empty") { + Err(AppError::Specific(ValidateSubmitError::InconsistentGroup))? + } } - for (result_id, group_id) in request.results { - match group_id { - Some(gid) => { - // Validation successful, update group_result_id both locally and in db - results_by_result_id - .get_mut(&result_id) - .unwrap() - .group_result_id = Some(gid); - sqlx::query_unchecked!( - r#" - UPDATE - results - SET - group_result_id = $1 - WHERE - id = $2 - "#, - Some(gid.clone()), - result_id - ) - .execute(&state.pool) - .await?; - } - None => { - // Validation unsuccessful, update state to error - results_by_result_id.get_mut(&result_id).unwrap().state = ResultState::Error; - set_result_state(&[result_id], ResultState::Error) - .execute(&state.pool) - .await?; - } + // Update state of error results. + set_result_state(&error_result_ids, ResultState::Error) + .execute(&state.pool) + .await?; + + // Update group ids. + for (&result_id, &group_id) in &request.results { + if let Some(group_id) = group_id { + sqlx::query_unchecked!( + r#" + UPDATE + results + SET + group_result_id = $1 + WHERE + id = $2 + "#, + group_id, + result_id, + ) + .execute(&state.pool) + .await?; } } - let valid_group_id = results_by_group_id + // Find the id of a group that meets quorum, if any. When multiple groups meet quorum, we + // select the one with the lowest id instead of the largest group. This is needed for + // deterministic validation. + let valid_group_id = groups .iter() .filter(|(_, results)| results.len() as i32 >= task.quorum) - .map(|(&group_id, results)| { - let earliest = results - .iter() - .map(|r| results_by_result_id[r].created_at) - .min() - .expect("groups are never empty"); - (group_id, earliest) - }) - .min_by_key(|&(_, earliest)| earliest) - .map(|(group_id, _)| group_id); + .map(|(&group_id, _)| group_id) + .min(); + + if let Some(valid_group_id) = valid_group_id { + // If there was a valid group, update the state of all results. + let group_result_ids: Vec<_> = groups.values().flatten().collect(); - if valid_group_id.is_none() { sqlx::query_unchecked!( r#" - UPDATE - results - SET - state = 'inconclusive' - WHERE - id = ANY($1) - "#, - all_given_results - .clone() - .into_iter() - .map(|result| result.id) - .collect::>() + UPDATE + results + SET + state = CASE + WHEN group_result_id = $1 THEN 'valid'::result_state + ELSE 'invalid'::result_state + END + WHERE + id = ANY($2) + "#, + valid_group_id, + group_result_ids, ) .execute(&state.pool) .await?; + } else { + // Otherwise, update the state of the new results to 'inconclusive'. + let inconclusive_result_ids: Vec<_> = request + .results + .iter() + .filter(|(_, group_id)| group_id.is_some()) + .map(|(result_id, _)| result_id) + .collect(); - let mut group_counts: HashMap, i32> = HashMap::new(); - for mut result in all_given_results { - if given_results - .clone() - .into_iter() - .any(|g_r| g_r.id == result.id) - { - result.state = ResultState::Inconclusive; - } - if result.state == ResultState::Inconclusive - && let Some(group_id) = result.group_result_id - { - *group_counts.entry(group_id).or_insert(0) += 1; - } - } - - let largest_inconclusive_group_count = - group_counts.into_values().max().expect("It will exist"); sqlx::query_unchecked!( r#" - UPDATE - tasks - SET - assignments_needed = $1 - WHERE - id = $2 - "#, - task.quorum - largest_inconclusive_group_count, - task.id + UPDATE + results + SET + state = 'inconclusive' + WHERE + id = ANY($1) + "#, + inconclusive_result_ids, ) .execute(&state.pool) .await?; - } else { + + // Finally, update the number of assignments needed. + let largest_inconclusive_group = groups + .values() + .max_by_key(|results| results.len()) + .expect("there is at least one group"); + + let assignments_needed = (results.len() + previous_results.len() + - largest_inconclusive_group.len()) as i32 + + task.quorum; + sqlx::query_unchecked!( r#" - UPDATE - results - SET - state = CASE - WHEN group_result_id = $1 THEN 'valid'::result_state - ELSE 'invalid'::result_state - END - WHERE - id = ANY($2) - "#, - valid_group_id, - all_given_results - .into_iter() - .map(|result| result.id) - .collect::>() + UPDATE + tasks + SET + assignments_needed = $1 + WHERE + id = $2 + "#, + assignments_needed, + task.id, ) .execute(&state.pool) .await?; } + Ok(()) } diff --git a/server/src/util/set_assignment_state.rs b/server/src/util/set_assignment_state.rs index aeb1c3f..54d891c 100644 --- a/server/src/util/set_assignment_state.rs +++ b/server/src/util/set_assignment_state.rs @@ -19,6 +19,6 @@ pub fn set_assignment_state( id = ANY($2) "#, assignment_state, - assignment_ids + assignment_ids, ) } diff --git a/server/src/util/set_result_state.rs b/server/src/util/set_result_state.rs index 9e90d83..75cdf49 100644 --- a/server/src/util/set_result_state.rs +++ b/server/src/util/set_result_state.rs @@ -16,6 +16,6 @@ pub fn set_result_state(result_ids: &[Id], result_state: ResultState) -> id = ANY($2) "#, result_state, - result_ids + result_ids, ) } From 646948fa86219869a57871474d95ac705a6161e2 Mon Sep 17 00:00:00 2001 From: Chen Steenvoorden Date: Sat, 6 Dec 2025 21:15:51 +0100 Subject: [PATCH 29/29] Add search parameters --- common/src/records/result.rs | 12 +++++++++++ common/src/records/task.rs | 6 ++++++ ...02f2e0e01a252ef1aa79e45da16db3a15fad.json} | 5 +++-- ...d435b8f6ef317eec2981cf24b97d9a1394ed.json} | 21 ++++++++++++++++--- server/src/util/select.rs | 6 ++++++ 5 files changed, 45 insertions(+), 5 deletions(-) rename server/.sqlx/{query-0d6d91c4e0acb78e4fda4df1804d430966e8ae83a168de0d3444bb8a4c7b1051.json => query-ab022e8774fc4cef4434e9ba30b902f2e0e01a252ef1aa79e45da16db3a15fad.json} (86%) rename server/.sqlx/{query-9d2127cb05e7631e969e289ff57ffd170787045c3233daad045204b9aa53f66c.json => query-d87fa0f565013412bab63b399f7dd435b8f6ef317eec2981cf24b97d9a1394ed.json} (71%) diff --git a/common/src/records/result.rs b/common/src/records/result.rs index 3d497dc..343ee43 100644 --- a/common/src/records/result.rs +++ b/common/src/records/result.rs @@ -21,6 +21,8 @@ pub struct Result { #[derive(Clone, Hash, Debug, Default, Serialize, Deserialize)] pub struct ResultFilter { pub assignment_id: Option>, + pub group_result_id: Option>, + pub state: Option, } impl ResultFilter { @@ -28,4 +30,14 @@ impl ResultFilter { self.assignment_id = Some(assignment_id); self } + + pub fn group_result_id(mut self, group_result_id: Id) -> Self { + self.group_result_id = Some(group_result_id); + self + } + + pub fn state(mut self, state: ResultState) -> Self { + self.state = Some(state); + self + } } diff --git a/common/src/records/task.rs b/common/src/records/task.rs index 18cc4f0..7964b9d 100644 --- a/common/src/records/task.rs +++ b/common/src/records/task.rs @@ -22,6 +22,7 @@ pub struct Task { #[derive(Clone, Hash, Debug, Default, Serialize, Deserialize)] pub struct TaskFilter { pub project_id: Option>, + pub canonical_result_id: Option>, } impl TaskFilter { @@ -29,4 +30,9 @@ impl TaskFilter { self.project_id = Some(project_id); self } + + pub fn canonical_result_id(mut self, canonical_result_id: Id) -> Self { + self.canonical_result_id = Some(canonical_result_id); + self + } } diff --git a/server/.sqlx/query-0d6d91c4e0acb78e4fda4df1804d430966e8ae83a168de0d3444bb8a4c7b1051.json b/server/.sqlx/query-ab022e8774fc4cef4434e9ba30b902f2e0e01a252ef1aa79e45da16db3a15fad.json similarity index 86% rename from server/.sqlx/query-0d6d91c4e0acb78e4fda4df1804d430966e8ae83a168de0d3444bb8a4c7b1051.json rename to server/.sqlx/query-ab022e8774fc4cef4434e9ba30b902f2e0e01a252ef1aa79e45da16db3a15fad.json index eb6a480..73478a5 100644 --- a/server/.sqlx/query-0d6d91c4e0acb78e4fda4df1804d430966e8ae83a168de0d3444bb8a4c7b1051.json +++ b/server/.sqlx/query-ab022e8774fc4cef4434e9ba30b902f2e0e01a252ef1aa79e45da16db3a15fad.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n *\n FROM\n tasks\n WHERE\n project_id = $1 IS NOT FALSE\n ", + "query": "\n SELECT\n *\n FROM\n tasks\n WHERE\n project_id = $1 IS NOT FALSE\n AND (canonical_result_id = $2 OR $2 IS NULL)\n ", "describe": { "columns": [ { @@ -51,6 +51,7 @@ ], "parameters": { "Left": [ + "Int8", "Int8" ] }, @@ -66,5 +67,5 @@ false ] }, - "hash": "0d6d91c4e0acb78e4fda4df1804d430966e8ae83a168de0d3444bb8a4c7b1051" + "hash": "ab022e8774fc4cef4434e9ba30b902f2e0e01a252ef1aa79e45da16db3a15fad" } diff --git a/server/.sqlx/query-9d2127cb05e7631e969e289ff57ffd170787045c3233daad045204b9aa53f66c.json b/server/.sqlx/query-d87fa0f565013412bab63b399f7dd435b8f6ef317eec2981cf24b97d9a1394ed.json similarity index 71% rename from server/.sqlx/query-9d2127cb05e7631e969e289ff57ffd170787045c3233daad045204b9aa53f66c.json rename to server/.sqlx/query-d87fa0f565013412bab63b399f7dd435b8f6ef317eec2981cf24b97d9a1394ed.json index 61e1852..b16c3f7 100644 --- a/server/.sqlx/query-9d2127cb05e7631e969e289ff57ffd170787045c3233daad045204b9aa53f66c.json +++ b/server/.sqlx/query-d87fa0f565013412bab63b399f7dd435b8f6ef317eec2981cf24b97d9a1394ed.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n *\n FROM\n results\n WHERE\n assignment_id = $1 IS NOT FALSE\n ", + "query": "\n SELECT\n *\n FROM\n results\n WHERE\n assignment_id = $1 IS NOT FALSE\n AND (group_result_id = $2 OR $2 IS NULL)\n AND state = $3 IS NOT FALSE\n ", "describe": { "columns": [ { @@ -59,7 +59,22 @@ ], "parameters": { "Left": [ - "Int8" + "Int8", + "Int8", + { + "Custom": { + "name": "result_state", + "kind": { + "Enum": [ + "init", + "valid", + "invalid", + "inconclusive", + "error" + ] + } + } + } ] }, "nullable": [ @@ -73,5 +88,5 @@ true ] }, - "hash": "9d2127cb05e7631e969e289ff57ffd170787045c3233daad045204b9aa53f66c" + "hash": "d87fa0f565013412bab63b399f7dd435b8f6ef317eec2981cf24b97d9a1394ed" } diff --git a/server/src/util/select.rs b/server/src/util/select.rs index 755692c..43a500b 100644 --- a/server/src/util/select.rs +++ b/server/src/util/select.rs @@ -122,8 +122,10 @@ impl Select for Task { tasks WHERE project_id = $1 IS NOT FALSE + AND (canonical_result_id = $2 OR $2 IS NULL) "#, filter.project_id, + filter.canonical_result_id, ) } @@ -172,8 +174,12 @@ impl Select for Result { results WHERE assignment_id = $1 IS NOT FALSE + AND (group_result_id = $2 OR $2 IS NULL) + AND state = $3 IS NOT FALSE "#, filter.assignment_id, + filter.group_result_id, + filter.state, ) }