diff --git a/.github/dependabot.yml b/.github/dependabot.yml index c715c165e143b..c0f461b9dc854 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -115,3 +115,42 @@ updates: update-types: - minor - patch +- package-ecosystem: maven + directory: /test/lang/java + schedule: + interval: daily + open-pull-requests-limit: 50 + labels: [A-dependencies] + allow: + - dependency-name: "org.postgresql:postgresql" +- package-ecosystem: nuget + directory: /test/lang/csharp/csharp-npgsql.csproj + schedule: + interval: daily + open-pull-requests-limit: 50 + labels: [A-dependencies] + allow: + - dependency-name: "npgsql" +- package-ecosystem: npm + directory: /test/lang/js + schedule: + interval: daily + open-pull-requests-limit: 50 + labels: [A-dependencies] + allow: + - dependency-name: "pg" + - dependency-name: "@types/pg" +- package-ecosystem: pip + directory: /test/lang/python + schedule: + interval: daily + open-pull-requests-limit: 50 + labels: [A-dependencies] +- package-ecosystem: bundler + directory: /test/lang/ruby + schedule: + interval: daily + open-pull-requests-limit: 50 + labels: [A-dependencies] + allow: + - dependency-name: "pg" diff --git a/Cargo.lock b/Cargo.lock index f829b9251f154..675b0bb54e374 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -488,6 +488,18 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "async-native-tls" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d57d4cec3c647232e1094dc013546c0b33ce785d8aeb251e1f20dfaf8a9a13fe" +dependencies = [ + "futures-util", + "native-tls", + "thiserror 1.0.69", + "url", +] + [[package]] name = "async-process" version = "2.3.0" @@ -5112,7 +5124,7 @@ dependencies = [ [[package]] name = "mz-balancerd" -version = "0.147.0-dev.0" +version = "0.147.13" dependencies = [ "anyhow", "async-trait", @@ -5251,7 +5263,7 @@ dependencies = [ [[package]] name = "mz-catalog-debug" -version = "0.147.0-dev.0" +version = "0.147.13" dependencies = [ "anyhow", "clap", @@ -5392,6 +5404,7 @@ dependencies = [ "tracing", "tracing-subscriber", "turmoil", + "uuid", "workspace-hack", ] @@ -5415,12 +5428,13 @@ dependencies = [ "tokio-stream", "tonic", "tonic-build", + "uuid", "workspace-hack", ] [[package]] name = "mz-clusterd" -version = "0.147.0-dev.0" +version = "0.147.13" dependencies = [ "anyhow", "axum", @@ -5744,7 +5758,7 @@ dependencies = [ [[package]] name = "mz-environmentd" -version = "0.147.0-dev.0" +version = "0.147.13" dependencies = [ "anyhow", "askama", @@ -6252,7 +6266,7 @@ dependencies = [ [[package]] name = "mz-materialized" -version = "0.147.0-dev.0" +version = "0.147.13" dependencies = [ "mz-clusterd", "mz-environmentd", @@ -6450,7 +6464,7 @@ dependencies = [ [[package]] name = "mz-orchestratord" -version = "0.147.0-dev.0" +version = "0.147.13" dependencies = [ "anyhow", "async-trait", @@ -6634,7 +6648,7 @@ dependencies = [ [[package]] name = "mz-persist-client" -version = "0.147.0-dev.0" +version = "0.147.13" dependencies = [ "anyhow", "arrayvec 0.7.6", @@ -7766,7 +7780,7 @@ dependencies = [ [[package]] name = "mz-testdrive" -version = "0.147.0-dev.0" +version = "0.147.13" dependencies = [ "anyhow", "arrow", @@ -11071,8 +11085,9 @@ dependencies = [ [[package]] name = "tiberius" version = "0.12.3" -source = "git+https://github.com/MaterializeInc/tiberius?rev=555076bc80087d21addb929948508c4e1a3c1fa7#555076bc80087d21addb929948508c4e1a3c1fa7" +source = "git+https://github.com/MaterializeInc/tiberius?rev=64ca594cc22ed67d072c2d0110455da50539e1cd#64ca594cc22ed67d072c2d0110455da50539e1cd" dependencies = [ + "async-native-tls", "async-trait", "asynchronous-codec", "byteorder", diff --git a/Cargo.toml b/Cargo.toml index bcc3df1cff2f2..10ba907ca45b7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -341,7 +341,7 @@ reqwest-retry = { git = "https://github.com/MaterializeInc/reqwest-middleware.gi # Need to upstream a few PRs related to test builders. # # Note: All changes in our fork of tiberius should be pushed to the `mz_changes` branch. -tiberius = { git = "https://github.com/MaterializeInc/tiberius", rev = "555076bc80087d21addb929948508c4e1a3c1fa7" } +tiberius = { git = "https://github.com/MaterializeInc/tiberius", rev="64ca594cc22ed67d072c2d0110455da50539e1cd" } # BEGIN LINT CONFIG # DO NOT EDIT. Automatically generated by bin/gen-lints. diff --git a/LICENSE b/LICENSE index cb8b0126f6455..d95e92b1ba92e 100644 --- a/LICENSE +++ b/LICENSE @@ -13,7 +13,7 @@ Business Source License 1.1 Licensor: Materialize, Inc. -Licensed Work: Materialize Version 20250620 +Licensed Work: Materialize Version v0.147.13 The Licensed Work is © 2025 Materialize, Inc. Additional Use Grant: Within a single installation of the Licensed Work, diff --git a/ci/plugins/cloudtest/hooks/pre-exit b/ci/plugins/cloudtest/hooks/pre-exit index b63d1fc4b4b57..13e1f2285d81c 100755 --- a/ci/plugins/cloudtest/hooks/pre-exit +++ b/ci/plugins/cloudtest/hooks/pre-exit @@ -117,7 +117,7 @@ sudo journalctl --merge --since "$(cat step_start_timestamp)" > journalctl-merge mapfile -t artifacts < <(printf "run.log\nkubectl-get-logs.log\nkubectl-get-logs-previous.log\nkubectl-get-events.log\nkubectl-get-all.log\nkubectl-describe-all.log\nkubectl-pods-with-nodes.log\nkubectl-get-events-kube-system.log\nkubectl-get-all-kube-system.log\nkubectl-describe-all-kube-system.log\njournalctl-merge.log\nkail-output.log\ntrufflehog.log\n"; find . -name 'junit_*.xml') artifacts_str=$(IFS=";"; echo "${artifacts[*]}") -bin/ci-builder run stable trufflehog --no-update --no-verification --json --exclude-detectors=coda,dockerhub,box,npmtoken filesystem "${artifacts[@]}" | trufflehog_jq_filter_logs > trufflehog.log +bin/ci-builder run stable trufflehog --no-update --no-verification --json --exclude-detectors=coda,dockerhub,box,npmtoken,github filesystem "${artifacts[@]}" | trufflehog_jq_filter_logs > trufflehog.log unset CI_EXTRA_ARGS # We don't want extra args for the annotation # Continue even if ci-annotate-errors fails diff --git a/ci/plugins/mzcompose/hooks/pre-exit b/ci/plugins/mzcompose/hooks/pre-exit index 9ee6e4c823420..0067d70eeb168 100755 --- a/ci/plugins/mzcompose/hooks/pre-exit +++ b/ci/plugins/mzcompose/hooks/pre-exit @@ -90,7 +90,7 @@ mapfile -t artifacts < <(printf "run.log\nservices.log\njournalctl-merge.log\nne artifacts_str=$(IFS=";"; echo "${artifacts[*]}") echo "--- Running trufflehog to scan artifacts for secrets" -bin/ci-builder run stable trufflehog --no-update --no-verification --json --exclude-detectors=coda,dockerhub,box,npmtoken filesystem "${artifacts[@]}" | trufflehog_jq_filter_logs > trufflehog.log +bin/ci-builder run stable trufflehog --no-update --no-verification --json --exclude-detectors=coda,dockerhub,box,npmtoken,github filesystem "${artifacts[@]}" | trufflehog_jq_filter_logs > trufflehog.log echo "Uploading log artifacts" unset CI_EXTRA_ARGS # We don't want extra args for the annotation diff --git a/ci/publish-helm-charts/pipeline.template.yml b/ci/publish-helm-charts/pipeline.template.yml index 4bd7b1c6bf2eb..0c708e69da343 100644 --- a/ci/publish-helm-charts/pipeline.template.yml +++ b/ci/publish-helm-charts/pipeline.template.yml @@ -29,6 +29,7 @@ steps: composition: terraform run: aws-temporary args: ["--tag=$CI_MZ_VERSION", --no-run-testdrive-files] + ci-builder: stable - id: terraform-gcp-tag label: "Terraform + Helm Chart E2E on GCP (tagged)" @@ -43,6 +44,7 @@ steps: composition: terraform run: gcp-temporary args: ["--tag=$CI_MZ_VERSION", --no-run-testdrive-files] + ci-builder: stable - id: terraform-azure-tag label: "Terraform + Helm Chart E2E on Azure (tagged)" @@ -57,6 +59,7 @@ steps: composition: terraform run: azure-temporary args: ["--tag=$CI_MZ_VERSION", --no-run-testdrive-files] + ci-builder: stable - wait: ~ diff --git a/misc/helm-charts/operator/Chart.yaml b/misc/helm-charts/operator/Chart.yaml index 1431786f08cae..06e98943b5924 100644 --- a/misc/helm-charts/operator/Chart.yaml +++ b/misc/helm-charts/operator/Chart.yaml @@ -12,6 +12,6 @@ name: materialize-operator description: Materialize Kubernetes Operator Helm Chart type: application version: v25.2.0-beta.1 -appVersion: v0.147.0-dev.0 +appVersion: v0.147.13 icon: https://materialize.com/favicon.ico home: https://materialize.com diff --git a/misc/helm-charts/operator/README.md b/misc/helm-charts/operator/README.md index 2ba5320cad234..60e1e2b7ee490 100644 --- a/misc/helm-charts/operator/README.md +++ b/misc/helm-charts/operator/README.md @@ -1,6 +1,6 @@ # Materialize Kubernetes Operator Helm Chart -![Version: v25.2.0-beta.1](https://img.shields.io/badge/Version-v25.2.0--beta.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.147.0-dev.0](https://img.shields.io/badge/AppVersion-v0.147.0--dev.0-informational?style=flat-square) +![Version: v25.2.0-beta.1](https://img.shields.io/badge/Version-v25.2.0--beta.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.147.13](https://img.shields.io/badge/AppVersion-v0.147.13-informational?style=flat-square) Materialize Kubernetes Operator Helm Chart @@ -151,7 +151,7 @@ The following table lists the configurable parameters of the Materialize operato | `operator.clusters.defaultSizes.system` | | ``"25cc"`` | | `operator.image.pullPolicy` | Policy for pulling the image: "IfNotPresent" avoids unnecessary re-pulling of images | ``"IfNotPresent"`` | | `operator.image.repository` | The Docker repository for the operator image | ``"materialize/orchestratord"`` | -| `operator.image.tag` | The tag/version of the operator image to be used | ``"v0.146.0"`` | +| `operator.image.tag` | The tag/version of the operator image to be used | ``"v0.147.13"`` | | `operator.nodeSelector` | Node selector to use for the operator pod | ``nil`` | | `operator.resources.limits` | Resource limits for the operator's CPU and memory | ``{"memory":"512Mi"}`` | | `operator.resources.requests` | Resources requested by the operator for CPU and memory | ``{"cpu":"100m","memory":"512Mi"}`` | @@ -177,7 +177,7 @@ Specify each parameter using the `--set key=value[,key=value]` argument to `helm ```shell helm install my-materialize-operator \ - --set operator.image.tag=v0.147.0-dev.0 \ + --set operator.image.tag=v0.147.13 \ materialize/materialize-operator ``` @@ -212,7 +212,7 @@ metadata: name: 12345678-1234-1234-1234-123456789012 namespace: materialize-environment spec: - environmentdImageRef: materialize/environmentd:v0.147.0-dev.0 + environmentdImageRef: materialize/environmentd:v0.147.13 backendSecretName: materialize-backend environmentdResourceRequirements: limits: diff --git a/misc/helm-charts/operator/templates/deployment.yaml b/misc/helm-charts/operator/templates/deployment.yaml index a26c158fa04de..e6e5aa5ae50f2 100644 --- a/misc/helm-charts/operator/templates/deployment.yaml +++ b/misc/helm-charts/operator/templates/deployment.yaml @@ -25,6 +25,10 @@ spec: labels: {{- include "materialize-operator.selectorLabels" . | nindent 8 }} spec: + securityContext: + fsGroup: 999 + runAsGroup: 999 + runAsUser: 999 serviceAccountName: {{ include "materialize-operator.serviceAccountName" . }} {{- if .Values.operator.nodeSelector }} nodeSelector: @@ -81,7 +85,7 @@ spec: {{- if .Values.console.enabled }} - "--create-console" {{- end }} - - "--console-image-tag-default=25.2.1" + - "--console-image-tag-default=25.2.3" {{- range $key, $value := .Values.console.imageTagMapOverride }} - "--console-image-tag-map={{ $key }}={{ $value }}" {{- end }} @@ -222,3 +226,12 @@ spec: {{- end }} resources: {{- toYaml .Values.operator.resources | nindent 10 }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/misc/helm-charts/operator/tests/deployment_test.yaml b/misc/helm-charts/operator/tests/deployment_test.yaml index 3addde632de0f..9321fa2644887 100644 --- a/misc/helm-charts/operator/tests/deployment_test.yaml +++ b/misc/helm-charts/operator/tests/deployment_test.yaml @@ -17,7 +17,7 @@ tests: of: Deployment - equal: path: spec.template.spec.containers[0].image - value: materialize/orchestratord:v0.146.0 + value: materialize/orchestratord:v0.147.13 - equal: path: spec.template.spec.containers[0].imagePullPolicy value: IfNotPresent diff --git a/misc/helm-charts/operator/values.yaml b/misc/helm-charts/operator/values.yaml index 583ad72f85c6a..e4d2395e09640 100644 --- a/misc/helm-charts/operator/values.yaml +++ b/misc/helm-charts/operator/values.yaml @@ -13,7 +13,7 @@ operator: # -- The Docker repository for the operator image repository: materialize/orchestratord # -- The tag/version of the operator image to be used - tag: v0.146.0 + tag: v0.147.13 # -- Policy for pulling the image: "IfNotPresent" avoids unnecessary re-pulling of images pullPolicy: IfNotPresent diff --git a/misc/helm-charts/testing/materialize.yaml b/misc/helm-charts/testing/materialize.yaml index 1187adb3bac91..f10ca81960930 100644 --- a/misc/helm-charts/testing/materialize.yaml +++ b/misc/helm-charts/testing/materialize.yaml @@ -28,7 +28,7 @@ metadata: name: 12345678-1234-1234-1234-123456789012 namespace: materialize-environment spec: - environmentdImageRef: materialize/environmentd:v0.147.0-dev.0 + environmentdImageRef: materialize/environmentd:v0.147.13 backendSecretName: materialize-backend authenticatorKind: None #balancerdExternalCertificateSpec: diff --git a/misc/images/materialized-base/Dockerfile b/misc/images/materialized-base/Dockerfile index 6f70a151d62e7..a83e97e9bf76f 100644 --- a/misc/images/materialized-base/Dockerfile +++ b/misc/images/materialized-base/Dockerfile @@ -15,7 +15,7 @@ # deployed to production, but the version needs to be bumped whenever features # that the console depends upon are removed (to a version of the console that # doesn't depend on those features). -FROM materialize/console:25.2.1 AS console +FROM materialize/console:25.2.3 AS console MZFROM ubuntu-base diff --git a/misc/python/materialize/cli/ci_annotate_errors.py b/misc/python/materialize/cli/ci_annotate_errors.py index 13d0ed1787419..28617cb5bd06f 100644 --- a/misc/python/materialize/cli/ci_annotate_errors.py +++ b/misc/python/materialize/cli/ci_annotate_errors.py @@ -189,6 +189,7 @@ PRODUCT_LIMITS_FIND_IGNORE_RE = re.compile( rb""" ( Memory\ cgroup\ out\ of\ memory + | [Oo]ut\ [Oo]f\ [Mm]emory | limits-materialized .* \| .* fatal\ runtime\ error:\ stack\ overflow | limits-materialized .* \| .* has\ overflowed\ its\ stack ) diff --git a/misc/python/materialize/cli/helm_chart_version_bump.py b/misc/python/materialize/cli/helm_chart_version_bump.py index 037341ea3624e..8c9a7a52c92ed 100644 --- a/misc/python/materialize/cli/helm_chart_version_bump.py +++ b/misc/python/materialize/cli/helm_chart_version_bump.py @@ -39,6 +39,7 @@ def main() -> int: yaml = YAML() yaml.preserve_quotes = True + yaml.width = 4096 # Don't introduce line breaks mods = [ ( diff --git a/misc/python/materialize/cloudtest/k8s/environmentd.py b/misc/python/materialize/cloudtest/k8s/environmentd.py index 50e7eebe10018..44282e952ba66 100644 --- a/misc/python/materialize/cloudtest/k8s/environmentd.py +++ b/misc/python/materialize/cloudtest/k8s/environmentd.py @@ -363,6 +363,7 @@ def env_vars(self) -> list[V1EnvVar]: ) env = [ + V1EnvVar(name="MZ_TEST_ONLY_DUMMY_SEGMENT_CLIENT", value="true"), V1EnvVar(name="MZ_SOFT_ASSERTIONS", value="1"), V1EnvVar(name="MZ_POD_NAME", value_from=value_from), V1EnvVar(name="AWS_REGION", value="minio"), diff --git a/misc/python/materialize/mzcompose/services/materialized.py b/misc/python/materialize/mzcompose/services/materialized.py index 3fc825578b177..551a8876c5839 100644 --- a/misc/python/materialize/mzcompose/services/materialized.py +++ b/misc/python/materialize/mzcompose/services/materialized.py @@ -114,6 +114,7 @@ def __init__( environment = [ "MZ_NO_TELEMETRY=1", + "MZ_TEST_ONLY_DUMMY_SEGMENT_CLIENT=true", f"MZ_SOFT_ASSERTIONS={int(soft_assertions)}", # The following settings can not be baked in the default image, as they # are enabled for testing purposes only diff --git a/misc/python/materialize/mzcompose/services/sql_server.py b/misc/python/materialize/mzcompose/services/sql_server.py index 516ff54d61585..cf63c7da234ce 100644 --- a/misc/python/materialize/mzcompose/services/sql_server.py +++ b/misc/python/materialize/mzcompose/services/sql_server.py @@ -23,8 +23,10 @@ def __init__( # lowercase letters, base-10 digits and/or non-alphanumeric symbols. sa_password: str = DEFAULT_SA_PASSWORD, name: str = "sql-server", - image: str = "mcr.microsoft.com/mssql/server", + # 2017 mssql images core on OSx, 2019 is the earliest that has passed + image: str = "mcr.microsoft.com/mssql/server:2019-CU32-ubuntu-20.04", environment_extra: list[str] = [], + volumes_extra: list[str] = [], ) -> None: super().__init__( name=name, @@ -34,6 +36,7 @@ def __init__( # See See https://github.com/microsoft/mssql-docker/issues/802 for current status "platform": "linux/amd64", "ports": [1433], + "volumes": volumes_extra, "environment": [ "ACCEPT_EULA=Y", "MSSQL_PID=Developer", diff --git a/misc/python/materialize/sqlsmith.py b/misc/python/materialize/sqlsmith.py index f1c3e04e740f0..61068abf45ad7 100644 --- a/misc/python/materialize/sqlsmith.py +++ b/misc/python/materialize/sqlsmith.py @@ -135,4 +135,5 @@ "exceeded recursion limit of 2048", "key cannot be null", # expected, see PR materialize#25941 "regexp_extract must specify at least one capture group", + "array_fill with arrays not yet supported", ] diff --git a/misc/shlib/shlib.bash b/misc/shlib/shlib.bash index 1be0160af8ce8..7a1cfe8b894f4 100644 --- a/misc/shlib/shlib.bash +++ b/misc/shlib/shlib.bash @@ -297,6 +297,8 @@ trufflehog_jq_filter_logs() { (.Raw | contains("jdbc:postgresql://127.0.0") | not) and (.Raw | contains("jdbc:postgresql://cockroach") | not) and (.Raw | contains("materialize:materialize") | not) and + (.Raw | contains("ExpirationReaper") | not) and + (.Raw | contains("u1@example.com") | not) and .Raw != "[REDACTED]" )' } diff --git a/src/adapter-types/src/dyncfgs.rs b/src/adapter-types/src/dyncfgs.rs index fb1eee3368461..caacc30bba694 100644 --- a/src/adapter-types/src/dyncfgs.rs +++ b/src/adapter-types/src/dyncfgs.rs @@ -114,7 +114,7 @@ pub const ENABLE_EXPRESSION_CACHE: Config = Config::new( /// Whether we allow sources in multi-replica clusters. pub const ENABLE_MULTI_REPLICA_SOURCES: Config = Config::new( "enable_multi_replica_sources", - false, + true, "Enable multi-replica sources.", ); diff --git a/src/adapter/src/catalog/transact.rs b/src/adapter/src/catalog/transact.rs index 9ac433c631e23..a5c77f20222ae 100644 --- a/src/adapter/src/catalog/transact.rs +++ b/src/adapter/src/catalog/transact.rs @@ -45,8 +45,8 @@ use mz_repr::{CatalogItemId, ColumnName, ColumnType, Diff, GlobalId, strconv}; use mz_sql::ast::RawDataType; use mz_sql::catalog::{ CatalogDatabase, CatalogError as SqlCatalogError, CatalogItem as SqlCatalogItem, CatalogRole, - CatalogSchema, DefaultPrivilegeAclItem, DefaultPrivilegeObject, RoleAttributes, RoleMembership, - RoleVars, + CatalogSchema, DefaultPrivilegeAclItem, DefaultPrivilegeObject, PasswordAction, + RoleAttributesRaw, RoleMembership, RoleVars, }; use mz_sql::names::{ CommentObjectId, DatabaseId, FullItemName, ObjectId, QualifiedItemName, @@ -82,7 +82,8 @@ pub enum Op { AlterRole { id: RoleId, name: String, - attributes: RoleAttributes, + attributes: RoleAttributesRaw, + nopassword: bool, vars: RoleVars, }, AlterNetworkPolicy { @@ -109,7 +110,7 @@ pub enum Op { }, CreateRole { name: String, - attributes: RoleAttributes, + attributes: RoleAttributesRaw, }, CreateCluster { id: ClusterId, @@ -659,14 +660,23 @@ impl Catalog { id, name, attributes, + nopassword, vars, } => { state.ensure_not_reserved_role(&id)?; let mut existing_role = state.get_role(&id).clone(); - existing_role.attributes = attributes; + let password = attributes.password.clone(); + existing_role.attributes = attributes.into(); existing_role.vars = vars; - tx.update_role(id, existing_role.into())?; + let password_action = if nopassword { + PasswordAction::Clear + } else if let Some(password) = password { + PasswordAction::Set(password) + } else { + PasswordAction::NoChange + }; + tx.update_role(id, existing_role.into(), password_action)?; CatalogState::add_to_audit_log( &state.system_configuration, @@ -1587,7 +1597,7 @@ impl Catalog { } let mut member_role = state.get_role(&member_id).clone(); member_role.membership.map.insert(role_id, grantor_id); - tx.update_role(member_id, member_role.into())?; + tx.update_role(member_id, member_role.into(), PasswordAction::NoChange)?; CatalogState::add_to_audit_log( &state.system_configuration, @@ -1617,7 +1627,7 @@ impl Catalog { state.ensure_grantable_role(&role_id)?; let mut member_role = state.get_role(&member_id).clone(); member_role.membership.map.remove(&role_id); - tx.update_role(member_id, member_role.into())?; + tx.update_role(member_id, member_role.into(), PasswordAction::NoChange)?; CatalogState::add_to_audit_log( &state.system_configuration, diff --git a/src/adapter/src/client.rs b/src/adapter/src/client.rs index 6c1a06d21087f..e9b0fdeb83a10 100644 --- a/src/adapter/src/client.rs +++ b/src/adapter/src/client.rs @@ -402,11 +402,24 @@ Issue a SQL query to get started. Need help? session_client .declare(EMPTY_PORTAL.into(), stmt, sql.to_string()) .await?; + match session_client .execute(EMPTY_PORTAL.into(), futures::future::pending(), None) .await? { - (ExecuteResponse::SendingRowsStreaming { rows, .. }, _) => Ok(rows), + (ExecuteResponse::SendingRowsStreaming { mut rows, .. }, _) => { + // We have to only drop the session client _after_ we read the + // result. Otherwise the peek will get cancelled right when we + // drop the session client. So we wrap it up in an extra stream + // like this, which owns the client and can return it. + let owning_response_stream = async_stream::stream! { + while let Some(rows) = rows.next().await { + yield rows; + } + drop(session_client); + }; + Ok(Box::pin(owning_response_stream)) + } r => bail!("unsupported response type: {r:?}"), } } diff --git a/src/adapter/src/coord.rs b/src/adapter/src/coord.rs index 7734be867cc06..d79578a263687 100644 --- a/src/adapter/src/coord.rs +++ b/src/adapter/src/coord.rs @@ -1907,6 +1907,8 @@ impl Coordinator { self.controller.create_replica( instance.id, replica.replica_id, + instance.name.clone(), + replica.name.clone(), role, replica.config.clone(), enable_worker_core_affinity, diff --git a/src/adapter/src/coord/command_handler.rs b/src/adapter/src/coord/command_handler.rs index 860f15eae7824..20f71fc5c0df9 100644 --- a/src/adapter/src/coord/command_handler.rs +++ b/src/adapter/src/coord/command_handler.rs @@ -33,7 +33,7 @@ use mz_sql::ast::{ AlterConnectionAction, AlterConnectionStatement, AlterSourceAction, AstInfo, ConstantVisitor, CopyRelation, CopyStatement, CreateSourceOptionName, Raw, Statement, SubscribeStatement, }; -use mz_sql::catalog::RoleAttributes; +use mz_sql::catalog::RoleAttributesRaw; use mz_sql::names::{Aug, PartialItemName, ResolvedIds}; use mz_sql::plan::{ AbortTransactionPlan, CommitTransactionPlan, CreateRolePlan, Params, Plan, @@ -396,7 +396,7 @@ impl Coordinator { // This includes preventing any user, except a pre-defined set of system users, from // connecting to an internal port. Therefore it's ok to always create a new role for the // user. - let attributes = RoleAttributes::new(); + let attributes = RoleAttributesRaw::new(); let plan = CreateRolePlan { name: user.name.to_string(), attributes, diff --git a/src/adapter/src/coord/sequencer/inner.rs b/src/adapter/src/coord/sequencer/inner.rs index e20728718e128..568080061f10f 100644 --- a/src/adapter/src/coord/sequencer/inner.rs +++ b/src/adapter/src/coord/sequencer/inner.rs @@ -54,7 +54,7 @@ use mz_sql::ast::{CreateSubsourceStatement, MySqlConfigOptionName, UnresolvedIte use mz_sql::catalog::{ CatalogCluster, CatalogClusterReplica, CatalogDatabase, CatalogError, CatalogItem as SqlCatalogItem, CatalogItemType, CatalogRole, CatalogSchema, CatalogTypeDetails, - ErrorMessageObjectDescription, ObjectType, RoleAttributes, RoleVars, SessionCatalog, + ErrorMessageObjectDescription, ObjectType, RoleAttributesRaw, RoleVars, SessionCatalog, }; use mz_sql::names::{ Aug, ObjectId, QualifiedItemName, ResolvedDatabaseSpecifier, ResolvedIds, ResolvedItemName, @@ -994,7 +994,7 @@ impl Coordinator { } /// Validates the role attributes for a `CREATE ROLE` statement. - fn validate_role_attributes(&self, attributes: &RoleAttributes) -> Result<(), AdapterError> { + fn validate_role_attributes(&self, attributes: &RoleAttributesRaw) -> Result<(), AdapterError> { if !ENABLE_PASSWORD_AUTH.get(self.catalog().system_config().dyncfgs()) { if attributes.superuser.is_some() || attributes.password.is_some() @@ -3443,9 +3443,13 @@ impl Coordinator { let mut notices = vec![]; // Get the attributes and variables from the role, as they currently are. - let mut attributes = role.attributes().clone(); + let mut attributes: RoleAttributesRaw = role.attributes().clone().into(); let mut vars = role.vars().clone(); + // Whether to set the password to NULL. This is a special case since the existing + // password is not stored in the role attributes. + let mut nopassword = false; + // Apply our updates. match option { PlannedAlterRoleOption::Attributes(attrs) => { @@ -3468,7 +3472,7 @@ impl Coordinator { } if attrs.nopassword.unwrap_or(false) { - attributes.password = None; + nopassword = true; } if let Some(notice) = self.should_emit_rbac_notice(session) { @@ -3534,6 +3538,7 @@ impl Coordinator { id, name, attributes, + nopassword, vars: RoleVars { map: vars }, }; let response = self diff --git a/src/adapter/src/coord/sequencer/inner/cluster.rs b/src/adapter/src/coord/sequencer/inner/cluster.rs index 880ae7a3fc247..dad13e088e657 100644 --- a/src/adapter/src/coord/sequencer/inner/cluster.rs +++ b/src/adapter/src/coord/sequencer/inner/cluster.rs @@ -941,9 +941,13 @@ impl Coordinator { ) .expect("creating cluster must not fail"); - let replica_ids: Vec<_> = cluster.replicas().map(|r| r.replica_id).collect(); - for replica_id in replica_ids { - self.create_cluster_replica(cluster_id, replica_id).await; + let replica_ids: Vec<_> = cluster + .replicas() + .map(|r| (r.replica_id, format!("{}.{}", cluster.name(), &r.name))) + .collect(); + for (replica_id, replica_name) in replica_ids { + self.create_cluster_replica(cluster_id, replica_id, replica_name) + .await; } if !introspection_source_ids.is_empty() { @@ -1074,12 +1078,18 @@ impl Coordinator { .resolve_replica_in_cluster(&cluster_id, &name) .expect("just created") .replica_id(); - self.create_cluster_replica(cluster_id, id).await; + + self.create_cluster_replica(cluster_id, id, name).await; Ok(ExecuteResponse::CreatedClusterReplica) } - async fn create_cluster_replica(&mut self, cluster_id: ClusterId, replica_id: ReplicaId) { + async fn create_cluster_replica( + &mut self, + cluster_id: ClusterId, + replica_id: ReplicaId, + replica_name: String, + ) { let cluster = self.catalog().get_cluster(cluster_id); let role = cluster.role(); let replica_config = cluster @@ -1095,6 +1105,8 @@ impl Coordinator { .create_replica( cluster_id, replica_id, + cluster.name.to_owned(), + replica_name, role, replica_config, enable_worker_core_affinity, @@ -1292,7 +1304,7 @@ impl Coordinator { NeedsFinalization::No => { ops.push(catalog::Op::UpdateClusterConfig { id: cluster_id, - name, + name: name.clone(), config: new_config, }); } @@ -1305,7 +1317,8 @@ impl Coordinator { .resolve_replica_in_cluster(&cluster_id, &replica_name) .expect("just created") .replica_id(); - self.create_cluster_replica(cluster_id, replica_id).await; + self.create_cluster_replica(cluster_id, replica_id, replica_name) + .await; } Ok(finalization_needed) } diff --git a/src/adapter/src/coord/validity.rs b/src/adapter/src/coord/validity.rs index 15f95234f11f3..48ec3cffcd2db 100644 --- a/src/adapter/src/coord/validity.rs +++ b/src/adapter/src/coord/validity.rs @@ -180,7 +180,7 @@ mod tests { use mz_ore::{assert_contains, assert_ok}; use mz_repr::role_id::RoleId; use mz_repr::{CatalogItemId, Timestamp}; - use mz_sql::catalog::RoleAttributes; + use mz_sql::catalog::RoleAttributesRaw; use mz_sql::session::metadata::SessionMetadata; use uuid::Uuid; @@ -208,7 +208,7 @@ mod tests { None, vec![Op::CreateRole { name: role.into(), - attributes: RoleAttributes::new(), + attributes: RoleAttributesRaw::new(), }], ) .await diff --git a/src/balancerd/BUILD.bazel b/src/balancerd/BUILD.bazel index 351838ab3bb34..b5809729f1568 100644 --- a/src/balancerd/BUILD.bazel +++ b/src/balancerd/BUILD.bazel @@ -32,7 +32,7 @@ rust_library( proc_macro_deps = [] + all_crate_deps(proc_macro = True), rustc_env = {}, rustc_flags = [], - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ "//src/alloc:mz_alloc", "//src/alloc-default:mz_alloc_default", @@ -74,7 +74,7 @@ rust_test( ), rustc_env = {}, rustc_flags = [], - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ "//src/alloc:mz_alloc", "//src/alloc-default:mz_alloc_default", @@ -144,7 +144,7 @@ rust_test( ), rustc_env = {}, rustc_flags = [], - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ ":mz_balancerd", "//src/alloc:mz_alloc", @@ -187,7 +187,7 @@ rust_binary( "@//misc/bazel/platforms:xlang_lto_enabled": ["-Clinker-plugin-lto"], "//conditions:default": [], }), - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ ":mz_balancerd", "//src/alloc:mz_alloc", diff --git a/src/balancerd/Cargo.toml b/src/balancerd/Cargo.toml index 6a21dc6b7c6eb..429d3241998c8 100644 --- a/src/balancerd/Cargo.toml +++ b/src/balancerd/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "mz-balancerd" description = "Balancer service." -version = "0.147.0-dev.0" +version = "0.147.13" edition.workspace = true rust-version.workspace = true publish = false diff --git a/src/catalog-debug/BUILD.bazel b/src/catalog-debug/BUILD.bazel index a047d6a77fed4..fc51af9345cc6 100644 --- a/src/catalog-debug/BUILD.bazel +++ b/src/catalog-debug/BUILD.bazel @@ -34,7 +34,7 @@ rust_binary( "@//misc/bazel/platforms:xlang_lto_enabled": ["-Clinker-plugin-lto"], "//conditions:default": [], }), - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ "//src/adapter:mz_adapter", "//src/adapter-types:mz_adapter_types", diff --git a/src/catalog-debug/Cargo.toml b/src/catalog-debug/Cargo.toml index abd02aa492a14..25730826cf6bb 100644 --- a/src/catalog-debug/Cargo.toml +++ b/src/catalog-debug/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "mz-catalog-debug" description = "Durable metadata storage debug tool." -version = "0.147.0-dev.0" +version = "0.147.13" edition.workspace = true rust-version.workspace = true publish = false diff --git a/src/catalog/src/builtin.rs b/src/catalog/src/builtin.rs index 8cac03eaf0512..6e7b16865998d 100644 --- a/src/catalog/src/builtin.rs +++ b/src/catalog/src/builtin.rs @@ -42,9 +42,10 @@ use mz_repr::namespaces::{ }; use mz_repr::role_id::RoleId; use mz_repr::{RelationDesc, RelationType, ScalarType}; +use mz_sql::catalog::RoleAttributesRaw; use mz_sql::catalog::{ CatalogItemType, CatalogType, CatalogTypeDetails, CatalogTypePgMetadata, NameReference, - ObjectType, RoleAttributes, SystemObjectType, TypeReference, + ObjectType, SystemObjectType, TypeReference, }; use mz_sql::rbac; use mz_sql::session::user::{ @@ -288,7 +289,7 @@ pub struct BuiltinRole { /// IMPORTANT: Must start with a prefix from [`BUILTIN_PREFIXES`]. pub name: &'static str, pub oid: u32, - pub attributes: RoleAttributes, + pub attributes: RoleAttributesRaw, } #[derive(Clone, Debug)] @@ -6756,6 +6757,7 @@ pub static PG_CLASS_ALL_DATABASES: LazyLock = LazyLock::new(|| { .with_column("relhasindex", ScalarType::Bool.nullable(false)) .with_column("relpersistence", ScalarType::PgLegacyChar.nullable(false)) .with_column("relkind", ScalarType::String.nullable(true)) + .with_column("relnatts", ScalarType::Int16.nullable(false)) .with_column("relchecks", ScalarType::Int16.nullable(false)) .with_column("relhasrules", ScalarType::Bool.nullable(false)) .with_column("relhastriggers", ScalarType::Bool.nullable(false)) @@ -6797,6 +6799,14 @@ SELECT WHEN class_objects.type = 'view' THEN 'v' WHEN class_objects.type = 'materialized-view' THEN 'm' END relkind, + COALESCE( + ( + SELECT count(*)::pg_catalog.int2 + FROM mz_catalog.mz_columns + WHERE mz_columns.id = class_objects.id + ), + 0::pg_catalog.int2 + ) AS relnatts, -- MZ doesn't support CHECK constraints so relchecks is filled with 0 0::pg_catalog.int2 AS relchecks, -- MZ doesn't support creating rules so relhasrules is filled with false @@ -6841,7 +6851,8 @@ ON mz_internal.pg_class_all_databases (relname)", is_retained_metrics_object: false, }; -pub static PG_CLASS: LazyLock = LazyLock::new(|| BuiltinView { +pub static PG_CLASS: LazyLock = LazyLock::new(|| { + BuiltinView { name: "pg_class", schema: PG_CATALOG_SCHEMA, oid: oid::VIEW_PG_CLASS_OID, @@ -6858,6 +6869,7 @@ pub static PG_CLASS: LazyLock = LazyLock::new(|| BuiltinView { .with_column("relhasindex", ScalarType::Bool.nullable(false)) .with_column("relpersistence", ScalarType::PgLegacyChar.nullable(false)) .with_column("relkind", ScalarType::String.nullable(true)) + .with_column("relnatts", ScalarType::Int16.nullable(false)) .with_column("relchecks", ScalarType::Int16.nullable(false)) .with_column("relhasrules", ScalarType::Bool.nullable(false)) .with_column("relhastriggers", ScalarType::Bool.nullable(false)) @@ -6876,12 +6888,13 @@ pub static PG_CLASS: LazyLock = LazyLock::new(|| BuiltinView { sql: " SELECT oid, relname, relnamespace, reloftype, relowner, relam, reltablespace, reltuples, reltoastrelid, - relhasindex, relpersistence, relkind, relchecks, relhasrules, relhastriggers, relhassubclass, + relhasindex, relpersistence, relkind, relnatts, relchecks, relhasrules, relhastriggers, relhassubclass, relrowsecurity, relforcerowsecurity, relreplident, relispartition, relhasoids, reloptions FROM mz_internal.pg_class_all_databases WHERE database_name IS NULL OR database_name = pg_catalog.current_database(); ", access: vec![PUBLIC_SELECT], +} }); pub static PG_DEPEND: LazyLock = LazyLock::new(|| BuiltinView { @@ -6992,6 +7005,7 @@ pub static PG_INDEX: LazyLock = LazyLock::new(|| { desc: RelationDesc::builder() .with_column("indexrelid", ScalarType::Oid.nullable(false)) .with_column("indrelid", ScalarType::Oid.nullable(false)) + .with_column("indnatts", ScalarType::Int16.nullable(false)) .with_column("indisunique", ScalarType::Bool.nullable(false)) .with_column("indisprimary", ScalarType::Bool.nullable(false)) .with_column("indimmediate", ScalarType::Bool.nullable(false)) @@ -7008,6 +7022,15 @@ pub static PG_INDEX: LazyLock = LazyLock::new(|| { sql: "SELECT mz_indexes.oid AS indexrelid, mz_relations.oid AS indrelid, + COALESCE( + ( + SELECT count(*)::pg_catalog.int2 + FROM mz_catalog.mz_columns + JOIN mz_catalog.mz_relations mri ON mz_columns.id = mri.id + WHERE mri.oid = mz_catalog.mz_relations.oid + ), + 0::pg_catalog.int2 + ) AS indnatts, -- MZ doesn't support creating unique indexes so indisunique is filled with false false::pg_catalog.bool AS indisunique, false::pg_catalog.bool AS indisprimary, @@ -13370,21 +13393,21 @@ pub const MZ_SYSTEM_ROLE: BuiltinRole = BuiltinRole { id: MZ_SYSTEM_ROLE_ID, name: SYSTEM_USER_NAME, oid: oid::ROLE_MZ_SYSTEM_OID, - attributes: RoleAttributes::new().with_all(), + attributes: RoleAttributesRaw::new().with_all(), }; pub const MZ_SUPPORT_ROLE: BuiltinRole = BuiltinRole { id: MZ_SUPPORT_ROLE_ID, name: SUPPORT_USER_NAME, oid: oid::ROLE_MZ_SUPPORT_OID, - attributes: RoleAttributes::new(), + attributes: RoleAttributesRaw::new(), }; pub const MZ_ANALYTICS_ROLE: BuiltinRole = BuiltinRole { id: MZ_ANALYTICS_ROLE_ID, name: ANALYTICS_USER_NAME, oid: oid::ROLE_MZ_ANALYTICS_OID, - attributes: RoleAttributes::new(), + attributes: RoleAttributesRaw::new(), }; /// This role can `SELECT` from various query history objects, @@ -13393,7 +13416,7 @@ pub const MZ_MONITOR_ROLE: BuiltinRole = BuiltinRole { id: MZ_MONITOR_ROLE_ID, name: "mz_monitor", oid: oid::ROLE_MZ_MONITOR_OID, - attributes: RoleAttributes::new(), + attributes: RoleAttributesRaw::new(), }; /// This role is like [`MZ_MONITOR_ROLE`], but can only query @@ -13402,7 +13425,7 @@ pub const MZ_MONITOR_REDACTED: BuiltinRole = BuiltinRole { id: MZ_MONITOR_REDACTED_ROLE_ID, name: "mz_monitor_redacted", oid: oid::ROLE_MZ_MONITOR_REDACTED_OID, - attributes: RoleAttributes::new(), + attributes: RoleAttributesRaw::new(), }; pub const MZ_SYSTEM_CLUSTER: BuiltinCluster = BuiltinCluster { diff --git a/src/catalog/src/durable/initialize.rs b/src/catalog/src/durable/initialize.rs index 7e8f54f79cf7c..d97e48098268a 100644 --- a/src/catalog/src/durable/initialize.rs +++ b/src/catalog/src/durable/initialize.rs @@ -31,7 +31,7 @@ use mz_repr::adt::mz_acl_item::{AclMode, MzAclItem}; use mz_repr::network_policy_id::NetworkPolicyId; use mz_repr::role_id::RoleId; use mz_sql::catalog::{ - DefaultPrivilegeAclItem, DefaultPrivilegeObject, ObjectType, RoleAttributes, RoleMembership, + DefaultPrivilegeAclItem, DefaultPrivilegeObject, ObjectType, RoleAttributesRaw, RoleMembership, RoleVars, SystemObjectType, }; use mz_sql::names::{ @@ -290,7 +290,7 @@ pub(crate) async fn initialize( tx.insert_builtin_role( RoleId::Public, PUBLIC_ROLE_NAME.as_str().to_lowercase(), - RoleAttributes::new(), + RoleAttributesRaw::new(), RoleMembership::new(), RoleVars::default(), ROLE_PUBLIC_OID, @@ -298,7 +298,7 @@ pub(crate) async fn initialize( // If provided, generate a new Id for the bootstrap role. let bootstrap_role = if let Some(role) = &options.bootstrap_role { - let attributes = RoleAttributes::new(); + let attributes = RoleAttributesRaw::new(); let membership = RoleMembership::new(); let vars = RoleVars::default(); @@ -322,7 +322,7 @@ pub(crate) async fn initialize( Some(Role { id, name: role.to_string(), - attributes, + attributes: attributes.into(), membership, vars, oid, diff --git a/src/catalog/src/durable/transaction.rs b/src/catalog/src/durable/transaction.rs index 7bc3192acb49f..8a517d0993b18 100644 --- a/src/catalog/src/durable/transaction.rs +++ b/src/catalog/src/durable/transaction.rs @@ -30,14 +30,15 @@ use mz_repr::network_policy_id::NetworkPolicyId; use mz_repr::role_id::RoleId; use mz_repr::{CatalogItemId, Diff, GlobalId, RelationVersion}; use mz_sql::catalog::{ - CatalogError as SqlCatalogError, CatalogItemType, ObjectType, RoleAttributes, RoleMembership, - RoleVars, + CatalogError as SqlCatalogError, CatalogItemType, ObjectType, PasswordAction, + RoleAttributesRaw, RoleMembership, RoleVars, }; use mz_sql::names::{CommentObjectId, DatabaseId, ResolvedDatabaseSpecifier, SchemaId}; use mz_sql::plan::NetworkPolicyRule; use mz_sql_parser::ast::QualifiedReplica; use mz_storage_client::controller::StorageTxn; use mz_storage_types::controller::StorageError; +use tracing::warn; use crate::builtin::BuiltinLog; use crate::durable::initialize::{ @@ -320,7 +321,7 @@ impl<'a> Transaction<'a> { &mut self, id: RoleId, name: String, - attributes: RoleAttributes, + attributes: RoleAttributesRaw, membership: RoleMembership, vars: RoleVars, oid: u32, @@ -333,7 +334,7 @@ impl<'a> Transaction<'a> { pub fn insert_user_role( &mut self, name: String, - attributes: RoleAttributes, + attributes: RoleAttributesRaw, membership: RoleMembership, vars: RoleVars, temporary_oids: &HashSet, @@ -349,7 +350,7 @@ impl<'a> Transaction<'a> { &mut self, id: RoleId, name: String, - attributes: RoleAttributes, + attributes: RoleAttributesRaw, membership: RoleMembership, vars: RoleVars, oid: u32, @@ -376,7 +377,7 @@ impl<'a> Transaction<'a> { RoleKey { id }, RoleValue { name: name.clone(), - attributes, + attributes: attributes.into(), membership, vars, oid, @@ -1474,35 +1475,43 @@ impl<'a> Transaction<'a> { /// Runtime is linear with respect to the total number of items in the catalog. /// DO NOT call this function in a loop, implement and use some `Self::update_roles` instead. /// You should model it after [`Self::update_items`]. - pub fn update_role(&mut self, id: RoleId, role: Role) -> Result<(), CatalogError> { + pub fn update_role( + &mut self, + id: RoleId, + role: Role, + password: PasswordAction, + ) -> Result<(), CatalogError> { let key = RoleKey { id }; if self.roles.get(&key).is_some() { let auth_key = RoleAuthKey { role_id: id }; - if let Some(ref password) = role.attributes.password { - let hash = - mz_auth::hash::scram256_hash(password).expect("password hash should be valid"); - let value = RoleAuthValue { - password_hash: Some(hash), - updated_at: SYSTEM_TIME(), - }; + match password { + PasswordAction::Set(new_password) => { + let hash = mz_auth::hash::scram256_hash(&new_password) + .expect("password hash should be valid"); + let value = RoleAuthValue { + password_hash: Some(hash), + updated_at: SYSTEM_TIME(), + }; - if self.role_auth.get(&auth_key).is_some() { - self.role_auth - .update_by_key(auth_key.clone(), value, self.op_id)?; - } else { - self.role_auth.insert(auth_key.clone(), value, self.op_id)?; + if self.role_auth.get(&auth_key).is_some() { + self.role_auth + .update_by_key(auth_key.clone(), value, self.op_id)?; + } else { + self.role_auth.insert(auth_key.clone(), value, self.op_id)?; + } } - } else if self.role_auth.get(&auth_key).is_some() { - // If the role is being updated to not have a password, we need to - // remove the password hash from the role_auth catalog. - let value = RoleAuthValue { - password_hash: None, - updated_at: SYSTEM_TIME(), - }; - - self.role_auth - .update_by_key(auth_key.clone(), value, self.op_id)?; + PasswordAction::Clear => { + let value = RoleAuthValue { + password_hash: None, + updated_at: SYSTEM_TIME(), + }; + if self.role_auth.get(&auth_key).is_some() { + self.role_auth + .update_by_key(auth_key.clone(), value, self.op_id)?; + } + } + PasswordAction::NoChange => {} } self.roles diff --git a/src/catalog/src/durable/upgrade.rs b/src/catalog/src/durable/upgrade.rs index 4d02c7c2317eb..5243b726e8a40 100644 --- a/src/catalog/src/durable/upgrade.rs +++ b/src/catalog/src/durable/upgrade.rs @@ -207,6 +207,7 @@ mod v70_to_v71; mod v71_to_v72; mod v72_to_v73; mod v73_to_v74; +mod v74_mz_system; /// Describes a single action to take during a migration from `V1` to `V2`. #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] @@ -264,7 +265,10 @@ pub(crate) async fn upgrade( .await? .expect("initialized catalog must have a version"); // Run migrations until we're up-to-date. - while version < CATALOG_VERSION { + // In 0.147 a bug whas introduce that necessitates + // a migration of v74 -> v74 to add login to mz_system roles + // this < is changed to a <= to pick that migration up. + while version <= CATALOG_VERSION { (version, commit_ts) = run_upgrade(persist_handle, version, commit_ts).await?; } @@ -352,9 +356,16 @@ async fn run_upgrade( ) .await } - + 74 => { + run_versioned_upgrade( + unopened_catalog_state, + version, + commit_ts, + v74_mz_system::upgrade, + ) + .await + } // Up-to-date, no migration needed! - CATALOG_VERSION => Ok((CATALOG_VERSION, commit_ts)), FUTURE_VERSION.. => Err(incompatible), } } diff --git a/src/catalog/src/durable/upgrade/v74_mz_system.rs b/src/catalog/src/durable/upgrade/v74_mz_system.rs new file mode 100644 index 0000000000000..f4a5f3c7a4fa7 --- /dev/null +++ b/src/catalog/src/durable/upgrade/v74_mz_system.rs @@ -0,0 +1,163 @@ +// Copyright Materialize, Inc. and contributors. All rights reserved. +// +// Use of this software is governed by the Business Source License +// included in the LICENSE file. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0. + +use crate::durable::traits::UpgradeFrom; +use crate::durable::upgrade::MigrationAction; +use crate::durable::upgrade::objects_v74 as v74; + +/// This upgrade doesn't change any protos, simply retroactively marks mz_system as login +pub fn upgrade( + snapshot: Vec, +) -> Vec> { + let mut migrations = Vec::new(); + for update in snapshot { + match update.kind { + Some(v74::state_update_kind::Kind::Role(old_role)) => { + let new_role = v74::state_update_kind::Role::upgrade_from(old_role.clone()); + let old_role = v74::StateUpdateKind { + kind: Some(v74::state_update_kind::Kind::Role(old_role)), + }; + let new_role = v74::StateUpdateKind { + kind: Some(v74::state_update_kind::Kind::Role(new_role)), + }; + let migration = MigrationAction::Update(old_role, new_role); + migrations.push(migration); + } + _ => {} + } + } + migrations +} + +impl UpgradeFrom for v74::state_update_kind::Role { + fn upgrade_from(value: v74::state_update_kind::Role) -> Self { + let new_key = value.key.map(|key| v74::RoleKey { + id: key.id.map(v74::RoleId::upgrade_from), + }); + + let is_mz_system = value + .value + .as_ref() + .map_or(false, |v| v.name == "mz_system"); + + let mut new_value = value.value.map(|value| v74::RoleValue { + name: value.name, + oid: value.oid, + attributes: value.attributes.map(v74::RoleAttributes::upgrade_from), + membership: value.membership.map(v74::RoleMembership::upgrade_from), + vars: value.vars.map(v74::RoleVars::upgrade_from), + }); + + if is_mz_system { + if let Some(ref mut value) = new_value { + if let Some(ref mut attrs) = value.attributes { + attrs.login = Some(true); + } + } + } + + v74::state_update_kind::Role { + key: new_key, + value: new_value, + } + } +} + +impl UpgradeFrom for v74::RoleVars { + fn upgrade_from(value: v74::RoleVars) -> Self { + v74::RoleVars { + entries: value + .entries + .iter() + .map(|val| v74::role_vars::Entry::upgrade_from(val.clone())) + .collect(), + } + } +} + +impl UpgradeFrom for v74::RoleMembership { + fn upgrade_from(value: v74::RoleMembership) -> Self { + v74::RoleMembership { + map: value + .map + .iter() + .map(|val| v74::role_membership::Entry::upgrade_from(*val)) + .collect(), + } + } +} + +impl UpgradeFrom for v74::role_membership::Entry { + fn upgrade_from(value: v74::role_membership::Entry) -> Self { + v74::role_membership::Entry { + key: value.key.map(v74::RoleId::upgrade_from), + value: value.value.map(v74::RoleId::upgrade_from), + } + } +} + +impl UpgradeFrom for v74::role_vars::Entry { + fn upgrade_from(value: v74::role_vars::Entry) -> Self { + v74::role_vars::Entry { + key: value.key, + val: value.val.map(v74::role_vars::entry::Val::upgrade_from), + } + } +} + +impl UpgradeFrom for v74::role_vars::entry::Val { + fn upgrade_from(value: v74::role_vars::entry::Val) -> Self { + match value { + v74::role_vars::entry::Val::Flat(x) => v74::role_vars::entry::Val::Flat(x), + v74::role_vars::entry::Val::SqlSet(x) => { + v74::role_vars::entry::Val::SqlSet(v74::role_vars::SqlSet::upgrade_from(x)) + } + } + } +} + +impl UpgradeFrom for v74::role_vars::SqlSet { + fn upgrade_from(value: v74::role_vars::SqlSet) -> Self { + v74::role_vars::SqlSet { + entries: value.entries, + } + } +} + +impl UpgradeFrom for v74::RoleAttributes { + fn upgrade_from(value: v74::RoleAttributes) -> Self { + v74::RoleAttributes { + inherit: value.inherit, + ..Default::default() + } + } +} + +impl UpgradeFrom for v74::RoleId { + fn upgrade_from(value: v74::RoleId) -> Self { + let value = match value.value { + Some(v74::role_id::Value::System(x)) => Some(v74::role_id::Value::System(x)), + Some(v74::role_id::Value::User(x)) => Some(v74::role_id::Value::User(x)), + Some(v74::role_id::Value::Public(_)) => { + Some(v74::role_id::Value::Public(v74::Empty {})) + } + Some(v74::role_id::Value::Predefined(x)) => Some(v74::role_id::Value::Predefined(x)), + None => None, + }; + v74::RoleId { value } + } +} + +impl UpgradeFrom for v74::RoleKey { + fn upgrade_from(value: v74::RoleKey) -> Self { + Self { + id: value.id.map(v74::RoleId::upgrade_from), + } + } +} diff --git a/src/catalog/tests/open.rs b/src/catalog/tests/open.rs index 14622211954c2..81a545de7121d 100644 --- a/src/catalog/tests/open.rs +++ b/src/catalog/tests/open.rs @@ -27,7 +27,7 @@ use mz_persist_client::{PersistClient, PersistLocation}; use mz_persist_types::ShardId; use mz_proto::RustType; use mz_repr::role_id::RoleId; -use mz_sql::catalog::{RoleAttributes, RoleMembership, RoleVars}; +use mz_sql::catalog::{RoleAttributesRaw, RoleMembership, RoleVars}; use uuid::Uuid; /// A new type for [`Snapshot`] that excludes fields that change often from the debug output. It's @@ -449,7 +449,7 @@ async fn test_open_read_only(state_builder: TestCatalogStateBuilder) { let (role_id, _) = txn .insert_user_role( "joe".to_string(), - RoleAttributes::new(), + RoleAttributesRaw::new(), RoleMembership::new(), RoleVars::default(), &HashSet::new(), diff --git a/src/catalog/tests/read-write.rs b/src/catalog/tests/read-write.rs index d35e4c653872f..964f52df216be 100644 --- a/src/catalog/tests/read-write.rs +++ b/src/catalog/tests/read-write.rs @@ -25,7 +25,7 @@ use mz_persist_client::PersistClient; use mz_proto::RustType; use mz_repr::role_id::RoleId; use mz_repr::{CatalogItemId, GlobalId}; -use mz_sql::catalog::{RoleAttributes, RoleMembership, RoleVars}; +use mz_sql::catalog::{RoleAttributesRaw, RoleMembership, RoleVars}; use mz_sql::names::{DatabaseId, ResolvedDatabaseSpecifier, SchemaId}; #[mz_ore::test(tokio::test)] @@ -389,7 +389,7 @@ async fn test_non_writer_commits(state_builder: TestCatalogStateBuilder) { let (role_id, _) = txn .insert_user_role( role_name.to_string(), - RoleAttributes::new(), + RoleAttributesRaw::new(), RoleMembership::new(), RoleVars::default(), &HashSet::new(), diff --git a/src/cloud-provider/src/lib.rs b/src/cloud-provider/src/lib.rs index d9f889a611a3a..f8c764bee7ac9 100644 --- a/src/cloud-provider/src/lib.rs +++ b/src/cloud-provider/src/lib.rs @@ -11,7 +11,7 @@ use std::fmt; use std::str::FromStr; /// Identifies a supported cloud provider. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum CloudProvider { /// A pseudo-provider value used by local development environments. Local, diff --git a/src/cloud-resources/src/crd/materialize.rs b/src/cloud-resources/src/crd/materialize.rs index e32b34df0c07b..93977335efe80 100644 --- a/src/cloud-resources/src/crd/materialize.rs +++ b/src/cloud-resources/src/crd/materialize.rs @@ -82,8 +82,11 @@ pub mod v1alpha1 { pub environmentd_extra_args: Option>, // Extra environment variables to pass to the environmentd binary pub environmentd_extra_env: Option>, + // DEPRECATED // If running in AWS, override the IAM role to use to give - // environmentd access to the persist S3 bucket + // environmentd access to the persist S3 bucket. + // DEPRECATED + // Use `service_account_annotations` to set "eks.amazonaws.com/role-arn" instead. pub environmentd_iam_role_arn: Option, // If running in AWS, override the IAM role to use to support // the CREATE CONNECTION feature @@ -97,6 +100,23 @@ pub mod v1alpha1 { // Resource requirements for the console pod pub console_resource_requirements: Option, + // Name of the kubernetes service account to use. + // If not set, we will create one with the same name as this Materialize object. + pub service_account_name: Option, + // Annotations to apply to the service account + // + // Annotations on service accounts are commonly used by cloud providers for IAM. + // AWS uses "eks.amazonaws.com/role-arn". + // Azure uses "azure.workload.identity/client-id", but + // additionally requires "azure.workload.identity/use": "true" on the pods. + pub service_account_annotations: Option>, + // Labels to apply to the service account + pub service_account_labels: Option>, + // Annotations to apply to the pods + pub pod_annotations: Option>, + // Labels to apply to the pods + pub pod_labels: Option>, + // When changes are made to the environmentd resources (either via // modifying fields in the spec here or by deploying a new // orchestratord version which changes how resources are generated), @@ -177,8 +197,15 @@ pub mod v1alpha1 { self.meta().namespace.clone().unwrap() } + pub fn create_service_account(&self) -> bool { + self.spec.service_account_name.is_none() + } + pub fn service_account_name(&self) -> String { - self.name_unchecked() + self.spec + .service_account_name + .clone() + .unwrap_or_else(|| self.name_unchecked()) } pub fn role_name(&self) -> String { diff --git a/src/cluster-client/Cargo.toml b/src/cluster-client/Cargo.toml index f28c16b48a6e0..b1259f7692d77 100644 --- a/src/cluster-client/Cargo.toml +++ b/src/cluster-client/Cargo.toml @@ -23,6 +23,7 @@ serde = { version = "1.0.219", features = ["derive"] } serde_json = "1.0.127" tokio-stream = "0.1.17" tonic = "0.12.1" +uuid = "1.17.0" workspace-hack = { version = "0.0.0", path = "../workspace-hack", optional = true } [build-dependencies] diff --git a/src/cluster-client/src/client.proto b/src/cluster-client/src/client.proto index 6652283ddc0e0..800c05c05ae87 100644 --- a/src/cluster-client/src/client.proto +++ b/src/cluster-client/src/client.proto @@ -13,11 +13,6 @@ syntax = "proto3"; package mz_cluster_client.client; -message ProtoClusterStartupEpoch { - int64 envd = 1; - uint64 replica = 2; -} - message ProtoTimelyConfig { uint64 workers = 1; uint64 process = 2; diff --git a/src/cluster-client/src/client.rs b/src/cluster-client/src/client.rs index d47b9759beba9..6f1aab9f020c8 100644 --- a/src/cluster-client/src/client.rs +++ b/src/cluster-client/src/client.rs @@ -9,134 +9,15 @@ //! Types for commands to clusters. -use std::num::NonZeroI64; use std::str::FromStr; use mz_proto::{ProtoType, RustType, TryFromProtoError}; -use proptest::prelude::{Arbitrary, any}; -use proptest::strategy::{BoxedStrategy, Strategy}; use proptest_derive::Arbitrary; use serde::{Deserialize, Serialize}; +use uuid::Uuid; include!(concat!(env!("OUT_DIR"), "/mz_cluster_client.client.rs")); -/// A value generated by environmentd and passed to the clusterd processes -/// to help them disambiguate different `CreateTimely` commands. -/// -/// The semantics of this value are not important, except that they -/// must be totally ordered, and any value (for a given replica) must -/// be greater than any that were generated before (for that replica). -/// This is the reason for having two -/// components (one from the catalog storage that increases on every environmentd restart, -/// another in-memory and local to the current incarnation of environmentd) -#[derive(PartialEq, Eq, Debug, Copy, Clone, Serialize, Deserialize)] -pub struct ClusterStartupEpoch { - /// The environment incarnation. - envd: NonZeroI64, - /// The replica incarnation. - replica: u64, -} - -impl ClusterStartupEpoch { - /// Increases the replica incarnation counter. - pub fn bump_replica(&mut self) { - self.replica += 1; - } -} - -impl RustType for ClusterStartupEpoch { - fn into_proto(&self) -> ProtoClusterStartupEpoch { - let Self { envd, replica } = self; - ProtoClusterStartupEpoch { - envd: envd.get(), - replica: *replica, - } - } - - fn from_proto(proto: ProtoClusterStartupEpoch) -> Result { - let ProtoClusterStartupEpoch { envd, replica } = proto; - Ok(Self { - envd: envd.try_into().unwrap(), - replica, - }) - } -} - -impl Arbitrary for ClusterStartupEpoch { - type Strategy = BoxedStrategy; - type Parameters = (); - - fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { - (any::(), any::()) - .prop_map(|(envd, replica)| ClusterStartupEpoch { - envd: NonZeroI64::new(if envd == 0 { envd + 1 } else { envd }).unwrap(), - replica, - }) - .boxed() - } -} - -impl ClusterStartupEpoch { - /// Construct a new cluster startup epoch, from the environment epoch and replica incarnation. - pub fn new(envd: NonZeroI64, replica: u64) -> Self { - Self { envd, replica } - } - - /// Serialize for transfer over the network - pub fn to_bytes(&self) -> [u8; 16] { - let mut ret = [0; 16]; - let mut p = &mut ret[..]; - use std::io::Write; - p.write_all(&self.envd.get().to_be_bytes()[..]).unwrap(); - p.write_all(&self.replica.to_be_bytes()[..]).unwrap(); - ret - } - - /// Inverse of `to_bytes` - pub fn from_bytes(bytes: [u8; 16]) -> Self { - let envd = i64::from_be_bytes((&bytes[0..8]).try_into().unwrap()); - let replica = u64::from_be_bytes((&bytes[8..16]).try_into().unwrap()); - Self { - envd: envd.try_into().unwrap(), - replica, - } - } - - /// The environment epoch. - pub fn envd(&self) -> NonZeroI64 { - self.envd - } - - /// The replica incarnation. - pub fn replica(&self) -> u64 { - self.replica - } -} - -impl std::fmt::Display for ClusterStartupEpoch { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let Self { envd, replica } = self; - write!(f, "({envd}, {replica})") - } -} - -impl PartialOrd for ClusterStartupEpoch { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for ClusterStartupEpoch { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - let Self { envd, replica } = self; - let Self { - envd: other_envd, - replica: other_replica, - } = other; - (envd, replica).cmp(&(other_envd, other_replica)) - } -} - /// Configuration of the cluster we will spin up #[derive(Arbitrary, Clone, Debug, Default, PartialEq, Serialize, Deserialize)] pub struct TimelyConfig { @@ -218,9 +99,9 @@ impl TimelyConfig { /// A trait for specific cluster commands that can be unpacked into /// `CreateTimely` variants. pub trait TryIntoTimelyConfig { - /// Attempt to unpack `self` into a `(TimelyConfig, ClusterStartupEpoch)`. Otherwise, + /// Attempt to unpack `self` into a `(TimelyConfig, Uuid)`. Otherwise, /// fail and return `self` back. - fn try_into_timely_config(self) -> Result<(TimelyConfig, ClusterStartupEpoch), Self> + fn try_into_timely_config(self) -> Result<(TimelyConfig, Uuid), Self> where Self: Sized; } @@ -245,7 +126,7 @@ pub struct ClusterReplicaLocation { mod tests { use mz_ore::assert_ok; use mz_proto::protobuf_roundtrip; - use proptest::prelude::ProptestConfig; + use proptest::prelude::{ProptestConfig, any}; use proptest::proptest; use super::*; @@ -260,13 +141,5 @@ mod tests { assert_ok!(actual); assert_eq!(actual.unwrap(), expect); } - - #[mz_ore::test] - #[cfg_attr(miri, ignore)] // slow - fn cluster_startup_epoch_protobuf_roundtrip(expect in any::() ) { - let actual = protobuf_roundtrip::<_, ProtoClusterStartupEpoch>(&expect); - assert_ok!(actual); - assert_eq!(actual.unwrap(), expect); - } } } diff --git a/src/cluster/Cargo.toml b/src/cluster/Cargo.toml index abd31731d8866..2b95ed7818eef 100644 --- a/src/cluster/Cargo.toml +++ b/src/cluster/Cargo.toml @@ -24,6 +24,7 @@ regex = "1.11.1" timely = "0.21.0" tokio = { version = "1.44.1", features = ["fs", "rt", "sync", "net"] } tracing = "0.1.37" +uuid = "1.17.0" workspace-hack = { version = "0.0.0", path = "../workspace-hack", optional = true } [dev-dependencies] diff --git a/src/cluster/src/client.rs b/src/cluster/src/client.rs index d98196b8f95ac..4b9d44be6333f 100644 --- a/src/cluster/src/client.rs +++ b/src/cluster/src/client.rs @@ -32,6 +32,7 @@ use timely::worker::Worker as TimelyWorker; use tokio::runtime::Handle; use tokio::sync::mpsc; use tracing::{info, warn}; +use uuid::Uuid; use crate::communication::initialize_networking; @@ -59,6 +60,7 @@ pub struct TimelyContainer { /// Channels over which to send endpoints for wiring up a new Client client_txs: Vec< crossbeam_channel::Sender<( + Uuid, crossbeam_channel::Receiver, mpsc::UnboundedSender, )>, @@ -105,7 +107,7 @@ where } } - async fn build(&mut self, config: TimelyConfig) -> Result<(), Error> { + async fn build(&mut self, config: TimelyConfig, nonce: Uuid) -> Result<(), Error> { let workers = config.workers; // Check if we can reuse the existing timely instance. @@ -145,7 +147,7 @@ where let (resp_tx, resp_rx) = mpsc::unbounded_channel(); client_tx - .send((cmd_rx, resp_tx)) + .send((nonce, cmd_rx, resp_tx)) .expect("worker not dropped"); command_txs.push(cmd_tx); @@ -183,7 +185,7 @@ where // Changing this debug statement requires changing the replica-isolation test tracing::debug!("ClusterClient send={:?}", &cmd); match cmd.try_into_timely_config() { - Ok((config, _epoch)) => self.build(config).await, + Ok((config, nonce)) => self.build(config, nonce).await, Err(cmd) => self.inner.as_mut().expect("initialized").send(cmd).await, } } @@ -219,6 +221,7 @@ pub trait ClusterSpec: Clone + Send + Sync + 'static { &self, timely_worker: &mut TimelyWorker, client_rx: crossbeam_channel::Receiver<( + Uuid, crossbeam_channel::Receiver, mpsc::UnboundedSender, )>, diff --git a/src/clusterd/BUILD.bazel b/src/clusterd/BUILD.bazel index ef3179f4f3a75..145b4a02d2dba 100644 --- a/src/clusterd/BUILD.bazel +++ b/src/clusterd/BUILD.bazel @@ -33,7 +33,7 @@ rust_library( proc_macro_deps = [] + all_crate_deps(proc_macro = True), rustc_env = {}, rustc_flags = [], - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ "//src/alloc:mz_alloc", "//src/alloc-default:mz_alloc_default", @@ -84,7 +84,7 @@ rust_test( ), rustc_env = {}, rustc_flags = [], - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ "//src/alloc:mz_alloc", "//src/alloc-default:mz_alloc_default", @@ -163,7 +163,7 @@ rust_binary( "@//misc/bazel/platforms:xlang_lto_enabled": ["-Clinker-plugin-lto"], "//conditions:default": [], }), - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ ":mz_clusterd", "//src/alloc:mz_alloc", diff --git a/src/clusterd/Cargo.toml b/src/clusterd/Cargo.toml index c79dfca90c336..a4607e0e34f3d 100644 --- a/src/clusterd/Cargo.toml +++ b/src/clusterd/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "mz-clusterd" description = "Materialize's cluster server." -version = "0.147.0-dev.0" +version = "0.147.13" edition.workspace = true rust-version.workspace = true publish = false diff --git a/src/compute-client/src/controller.rs b/src/compute-client/src/controller.rs index f158ae56bdd79..236e6a7c54bc6 100644 --- a/src/compute-client/src/controller.rs +++ b/src/compute-client/src/controller.rs @@ -29,7 +29,6 @@ //! recover each dataflow to its current state in case of failure or other reconfiguration. use std::collections::{BTreeMap, BTreeSet}; -use std::num::NonZeroI64; use std::sync::{Arc, Mutex}; use std::time::Duration; @@ -204,8 +203,6 @@ pub struct ComputeController { peek_stash_persist_location: PersistLocation, /// A controller response to be returned on the next call to [`ComputeController::process`]. stashed_response: Option>, - /// A number that increases on every `environmentd` restart. - envd_epoch: NonZeroI64, /// The compute controller metrics. metrics: ComputeControllerMetrics, /// A function that produces the current wallclock time. @@ -241,7 +238,6 @@ impl ComputeController { pub fn new( build_info: &'static BuildInfo, storage_collections: StorageCollections, - envd_epoch: NonZeroI64, read_only: bool, metrics_registry: &MetricsRegistry, peek_stash_persist_location: PersistLocation, @@ -309,7 +305,6 @@ impl ComputeController { config: Default::default(), peek_stash_persist_location, stashed_response: None, - envd_epoch, metrics, now, wallclock_lag, @@ -499,7 +494,6 @@ impl ComputeController { config: _, peek_stash_persist_location: _, stashed_response, - envd_epoch, metrics: _, now: _, wallclock_lag: _, @@ -539,7 +533,6 @@ impl ComputeController { field("initialized", initialized)?, field("read_only", read_only)?, field("stashed_response", format!("{stashed_response:?}"))?, - field("envd_epoch", envd_epoch)?, field("maintenance_scheduled", maintenance_scheduled)?, ]); Ok(serde_json::Value::Object(map)) @@ -577,7 +570,6 @@ where Arc::clone(&self.storage_collections), self.peek_stash_persist_location.clone(), logs, - self.envd_epoch, self.metrics.for_instance(id), self.now.clone(), self.wallclock_lag.clone(), diff --git a/src/compute-client/src/controller/instance.rs b/src/compute-client/src/controller/instance.rs index 85cfd8463065c..f6652332ce517 100644 --- a/src/compute-client/src/controller/instance.rs +++ b/src/compute-client/src/controller/instance.rs @@ -11,14 +11,12 @@ use std::collections::{BTreeMap, BTreeSet}; use std::fmt::Debug; -use std::num::NonZeroI64; use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; use chrono::{DateTime, DurationRound, TimeDelta, Utc}; use mz_build_info::BuildInfo; use mz_cluster_client::WallclockLagFn; -use mz_cluster_client::client::ClusterStartupEpoch; use mz_compute_types::ComputeInstanceId; use mz_compute_types::dataflows::{BuildDesc, DataflowDescription}; use mz_compute_types::plan::LirId; @@ -165,7 +163,6 @@ where storage: StorageCollections, peek_stash_persist_location: PersistLocation, arranged_logs: Vec<(LogVariant, GlobalId, SharedCollectionState)>, - envd_epoch: NonZeroI64, metrics: InstanceMetrics, now: NowFn, wallclock_lag: WallclockLagFn, @@ -193,7 +190,6 @@ where storage, peek_stash_persist_location, arranged_logs, - envd_epoch, metrics, now, wallclock_lag, @@ -286,8 +282,6 @@ pub(super) struct Instance { response_tx: mpsc::UnboundedSender>, /// Sender for introspection updates to be recorded. introspection_tx: mpsc::UnboundedSender, - /// A number that increases with each restart of `environmentd`. - envd_epoch: NonZeroI64, /// Numbers that increase with each restart of a replica. replica_epochs: BTreeMap, /// The registry the controller uses to report metrics. @@ -426,7 +420,7 @@ impl Instance { id: ReplicaId, client: ReplicaClient, config: ReplicaConfig, - epoch: ClusterStartupEpoch, + epoch: u64, ) { let log_ids: BTreeSet<_> = config.logging.index_logs.values().copied().collect(); @@ -959,7 +953,6 @@ impl Instance { command_rx: _, response_tx: _, introspection_tx: _, - envd_epoch, replica_epochs, metrics: _, dyncfg: _, @@ -1011,7 +1004,6 @@ impl Instance { field("peeks", peeks)?, field("subscribes", subscribes)?, field("copy_tos", copy_tos)?, - field("envd_epoch", envd_epoch)?, field("replica_epochs", replica_epochs)?, field("wallclock_lag_last_recorded", wallclock_lag_last_recorded)?, ]); @@ -1029,7 +1021,6 @@ where storage: StorageCollections, peek_stash_persist_location: PersistLocation, arranged_logs: Vec<(LogVariant, GlobalId, SharedCollectionState)>, - envd_epoch: NonZeroI64, metrics: InstanceMetrics, now: NowFn, wallclock_lag: WallclockLagFn, @@ -1077,7 +1068,6 @@ where command_rx, response_tx, introspection_tx, - envd_epoch, replica_epochs: Default::default(), metrics, dyncfg, @@ -1093,7 +1083,7 @@ where async fn run(mut self) { self.send(ComputeCommand::CreateTimely { config: Default::default(), - epoch: ClusterStartupEpoch::new(self.envd_epoch, 0), + nonce: Uuid::default(), }); let instance_config = InstanceConfig { @@ -1227,8 +1217,9 @@ where let replica_epoch = self.replica_epochs.entry(id).or_default(); *replica_epoch += 1; + let epoch = *replica_epoch; + let metrics = self.metrics.for_replica(id); - let epoch = ClusterStartupEpoch::new(self.envd_epoch, *replica_epoch); let client = ReplicaClient::spawn( id, self.build_info, @@ -1941,19 +1932,19 @@ where } /// Handles a response from a replica. Replica IDs are re-used across replica restarts, so we - /// use the replica incarnation to drop stale responses. - fn handle_response(&mut self, (replica_id, incarnation, response): ReplicaResponse) { + /// use the replica epoch to drop stale responses. + fn handle_response(&mut self, (replica_id, epoch, response): ReplicaResponse) { // Filter responses from non-existing or stale replicas. if self .replicas .get(&replica_id) - .filter(|replica| replica.epoch.replica() == incarnation) + .filter(|replica| replica.epoch == epoch) .is_none() { return; } - // Invariant: the replica exists and has the expected incarnation. + // Invariant: the replica exists and has the expected epoch. match response { ComputeResponse::Frontiers(id, frontiers) => { @@ -2839,7 +2830,7 @@ struct ReplicaState { /// Per-replica collection state. collections: BTreeMap>, /// The epoch of the replica. - epoch: ClusterStartupEpoch, + epoch: u64, } impl ReplicaState { @@ -2849,7 +2840,7 @@ impl ReplicaState { config: ReplicaConfig, metrics: ReplicaMetrics, introspection_tx: mpsc::UnboundedSender, - epoch: ClusterStartupEpoch, + epoch: u64, ) -> Self { Self { id, diff --git a/src/compute-client/src/controller/replica.rs b/src/compute-client/src/controller/replica.rs index 33dd557f8a95f..5158291499eea 100644 --- a/src/compute-client/src/controller/replica.rs +++ b/src/compute-client/src/controller/replica.rs @@ -15,7 +15,7 @@ use std::time::{Duration, Instant}; use anyhow::bail; use mz_build_info::BuildInfo; -use mz_cluster_client::client::{ClusterReplicaLocation, ClusterStartupEpoch, TimelyConfig}; +use mz_cluster_client::client::{ClusterReplicaLocation, TimelyConfig}; use mz_compute_types::dyncfgs::ENABLE_COMPUTE_REPLICA_EXPIRATION; use mz_dyncfg::ConfigSet; use mz_ore::channel::InstrumentedUnboundedSender; @@ -27,6 +27,7 @@ use tokio::select; use tokio::sync::mpsc::error::SendError; use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel}; use tracing::{debug, info, trace, warn}; +use uuid::Uuid; use crate::controller::instance::ReplicaResponse; use crate::controller::sequential_hydration::SequentialHydration; @@ -78,7 +79,7 @@ where id: ReplicaId, build_info: &'static BuildInfo, config: ReplicaConfig, - epoch: ClusterStartupEpoch, + epoch: u64, metrics: ReplicaMetrics, dyncfg: Arc, response_tx: InstrumentedUnboundedSender, IntCounter>, @@ -149,9 +150,9 @@ struct ReplicaTask { command_rx: UnboundedReceiver>, /// A channel upon which responses from the replica are delivered. response_tx: InstrumentedUnboundedSender, IntCounter>, - /// A number (technically, pair of numbers) identifying this incarnation of the replica. + /// A number identifying this incarnation of the replica. /// The semantics of this don't matter, except that it must strictly increase. - epoch: ClusterStartupEpoch, + epoch: u64, /// Replica metrics. metrics: ReplicaMetrics, /// Flag to report successful replica connection. @@ -238,8 +239,6 @@ where T: ComputeControllerTimestamp, ComputeGrpcClient: ComputeClient, { - let id = self.replica_id; - let incarnation = self.epoch.replica(); loop { select! { // Command from controller to forward to replica. @@ -261,7 +260,7 @@ where self.observe_response(&response); - if self.response_tx.send((id, incarnation, response)).is_err() { + if self.response_tx.send((self.replica_id, self.epoch, response)).is_err() { // Controller is no longer interested in this replica. Shut down. break; } @@ -278,7 +277,7 @@ where /// contain replica-specific fields that must be adjusted before sending. fn specialize_command(&self, command: &mut ComputeCommand) { match command { - ComputeCommand::CreateTimely { config, epoch } => { + ComputeCommand::CreateTimely { config, nonce } => { **config = TimelyConfig { workers: self.config.location.workers, process: 0, @@ -290,7 +289,7 @@ where enable_zero_copy_lgalloc: self.config.enable_zero_copy_lgalloc, zero_copy_limit: self.config.zero_copy_limit, }; - *epoch = self.epoch; + *nonce = Uuid::new_v4(); } ComputeCommand::CreateInstance(config) => { config.logging = self.config.logging.clone(); diff --git a/src/compute-client/src/protocol/command.proto b/src/compute-client/src/protocol/command.proto index 253f9042588ae..df277e20675a7 100644 --- a/src/compute-client/src/protocol/command.proto +++ b/src/compute-client/src/protocol/command.proto @@ -32,7 +32,7 @@ import "tracing/src/params.proto"; message ProtoComputeCommand { message ProtoCreateTimely { mz_cluster_client.client.ProtoTimelyConfig config = 1; - mz_cluster_client.client.ProtoClusterStartupEpoch epoch = 2; + mz_proto.ProtoU128 nonce = 2; } oneof kind { diff --git a/src/compute-client/src/protocol/command.rs b/src/compute-client/src/protocol/command.rs index 956f7a03b5d3c..877ffb37ec145 100644 --- a/src/compute-client/src/protocol/command.rs +++ b/src/compute-client/src/protocol/command.rs @@ -12,7 +12,7 @@ use std::str::FromStr; use std::time::Duration; -use mz_cluster_client::client::{ClusterStartupEpoch, TimelyConfig, TryIntoTimelyConfig}; +use mz_cluster_client::client::{TimelyConfig, TryIntoTimelyConfig}; use mz_compute_types::dataflows::DataflowDescription; use mz_compute_types::plan::render_plan::RenderPlan; use mz_dyncfg::ConfigUpdates; @@ -59,16 +59,16 @@ pub enum ComputeCommand { /// distribution requires the timely dataflow runtime to be initialized, which is why the /// `CreateTimely` command exists. /// - /// The `epoch` value imposes an ordering on iterations of the compute protocol. When the - /// compute controller connects to a replica, it must send an `epoch` that is greater than all - /// epochs it sent to the same replica on previous connections. Multi-process replicas should - /// use the `epoch` to ensure that their individual processes agree on which protocol iteration + /// The `nonce` value allows identifying different iterations of the compute protocol. When the + /// compute controller connects to a replica, it must send a `nonce` that is different from all + /// nonces it sent to the same replica on previous connections. Multi-process replicas should + /// use the `nonce` to ensure that their individual processes agree on which protocol iteration /// they are in. CreateTimely { - /// TODO(database-issues#7533): Add documentation. + /// The Timely runtime configuration. config: Box, - /// TODO(database-issues#7533): Add documentation. - epoch: ClusterStartupEpoch, + /// A nonce unique to the current iteration of the compute protocol. + nonce: Uuid, }, /// `CreateInstance` must be sent after `CreateTimely` to complete the [Creation Stage] of the @@ -280,9 +280,9 @@ impl RustType for ComputeCommand { use proto_compute_command::*; ProtoComputeCommand { kind: Some(match self { - ComputeCommand::CreateTimely { config, epoch } => CreateTimely(ProtoCreateTimely { + ComputeCommand::CreateTimely { config, nonce } => CreateTimely(ProtoCreateTimely { config: Some(*config.into_proto()), - epoch: Some(epoch.into_proto()), + nonce: Some(nonce.into_proto()), }), ComputeCommand::CreateInstance(config) => CreateInstance(*config.into_proto()), ComputeCommand::InitializationComplete => InitializationComplete(()), @@ -308,10 +308,10 @@ impl RustType for ComputeCommand { use proto_compute_command::Kind::*; use proto_compute_command::*; match proto.kind { - Some(CreateTimely(ProtoCreateTimely { config, epoch })) => { + Some(CreateTimely(ProtoCreateTimely { config, nonce })) => { let config = Box::new(config.into_rust_if_some("ProtoCreateTimely::config")?); - let epoch = epoch.into_rust_if_some("ProtoCreateTimely::epoch")?; - Ok(ComputeCommand::CreateTimely { config, epoch }) + let nonce = nonce.into_rust_if_some("ProtoCreateTimely::nonce")?; + Ok(ComputeCommand::CreateTimely { config, nonce }) } Some(CreateInstance(config)) => { let config = Box::new(config.into_rust()?); @@ -701,9 +701,9 @@ fn empty_otel_ctx() -> impl Strategy { } impl TryIntoTimelyConfig for ComputeCommand { - fn try_into_timely_config(self) -> Result<(TimelyConfig, ClusterStartupEpoch), Self> { + fn try_into_timely_config(self) -> Result<(TimelyConfig, Uuid), Self> { match self { - ComputeCommand::CreateTimely { config, epoch } => Ok((*config, epoch)), + ComputeCommand::CreateTimely { config, nonce } => Ok((*config, nonce)), cmd => Err(cmd), } } diff --git a/src/compute-client/src/service.rs b/src/compute-client/src/service.rs index 606ff26a18f8c..9f50595c26c73 100644 --- a/src/compute-client/src/service.rs +++ b/src/compute-client/src/service.rs @@ -235,7 +235,7 @@ where // * Forward `CreateTimely` and `UpdateConfiguration` commands to all shards. // * Forward all other commands to the first shard only. match command { - ComputeCommand::CreateTimely { config, epoch } => { + ComputeCommand::CreateTimely { config, nonce } => { let timely_cmds = config.split_command(self.parts); timely_cmds @@ -243,7 +243,7 @@ where .map(|config| { Some(ComputeCommand::CreateTimely { config: Box::new(config), - epoch, + nonce, }) }) .collect() diff --git a/src/compute/src/command_channel.rs b/src/compute/src/command_channel.rs index 3505858e99974..371d6d778a389 100644 --- a/src/compute/src/command_channel.rs +++ b/src/compute/src/command_channel.rs @@ -18,7 +18,7 @@ //! broadcasts them to other workers through the Timely fabric, taking care of the correct //! sequencing. //! -//! Commands in the command channel are tagged with an epoch identifying the incarnation of the +//! Commands in the command channel are tagged with a nonce identifying the incarnation of the //! compute protocol the command belongs to, allowing workers to recognize client reconnects that //! require a reconciliation. @@ -34,16 +34,17 @@ use timely::dataflow::operators::Operator; use timely::dataflow::operators::generic::source; use timely::scheduling::{Scheduler, SyncActivator}; use timely::worker::Worker as TimelyWorker; +use uuid::Uuid; /// A sender pushing commands onto the command channel. pub struct Sender { - tx: crossbeam_channel::Sender<(ComputeCommand, u64)>, + tx: crossbeam_channel::Sender<(ComputeCommand, Uuid)>, activator: Arc>>, } impl Sender { /// Broadcasts the given command to all workers. - pub fn send(&self, message: (ComputeCommand, u64)) { + pub fn send(&self, message: (ComputeCommand, Uuid)) { if self.tx.send(message).is_err() { unreachable!("command channel never shuts down"); } @@ -58,7 +59,7 @@ impl Sender { /// A receiver reading commands from the command channel. pub struct Receiver { - rx: crossbeam_channel::Receiver<(ComputeCommand, u64)>, + rx: crossbeam_channel::Receiver<(ComputeCommand, Uuid)>, } impl Receiver { @@ -66,7 +67,7 @@ impl Receiver { /// /// This returns `None` when there are currently no commands but there might be commands again /// in the future. - pub fn try_recv(&self) -> Option<(ComputeCommand, u64)> { + pub fn try_recv(&self) -> Option<(ComputeCommand, Uuid)> { match self.rx.try_recv() { Ok(msg) => Some(msg), Err(TryRecvError::Empty) => None, @@ -104,7 +105,7 @@ pub fn render(timely_worker: &mut TimelyWorker) -> (Sender, Rece let Some(cap) = &mut capability else { // Non-leader workers will still receive `UpdateConfiguration` commands and // we must drain those to not leak memory. - while let Ok((cmd, _epoch)) = input_rx.try_recv() { + while let Ok((cmd, _nonce)) = input_rx.try_recv() { assert_ne!(worker_id, 0); assert!(matches!(cmd, ComputeCommand::UpdateConfiguration(_))); } @@ -114,9 +115,9 @@ pub fn render(timely_worker: &mut TimelyWorker) -> (Sender, Rece assert_eq!(worker_id, 0); let input: Vec<_> = input_rx.try_iter().collect(); - for (cmd, epoch) in input { + for (cmd, nonce) in input { let worker_cmds = - split_command(cmd, peers).map(|(idx, cmd)| (idx, cmd, epoch)); + split_command(cmd, peers).map(|(idx, cmd)| (idx, cmd, nonce)); output.session(&cap).give_iterator(worker_cmds); cap.downgrade(&(cap.time() + 1)); @@ -128,8 +129,8 @@ pub fn render(timely_worker: &mut TimelyWorker) -> (Sender, Rece "command_channel::sink", move |input| { while let Some((_cap, data)) = input.next() { - for (_idx, cmd, epoch) in data.drain(..) { - let _ = output_tx.send((cmd, epoch)); + for (_idx, cmd, nonce) in data.drain(..) { + let _ = output_tx.send((cmd, nonce)); } } }, diff --git a/src/compute/src/server.rs b/src/compute/src/server.rs index 22f6a0be442cd..028d5c5d6f983 100644 --- a/src/compute/src/server.rs +++ b/src/compute/src/server.rs @@ -10,7 +10,6 @@ //! An interactive dataflow server. use std::cell::RefCell; -use std::cmp::Ordering; use std::collections::{BTreeMap, BTreeSet}; use std::convert::Infallible; use std::fmt::Debug; @@ -39,6 +38,7 @@ use timely::progress::Antichain; use timely::worker::Worker as TimelyWorker; use tokio::sync::mpsc; use tracing::{info, trace, warn}; +use uuid::Uuid; use crate::command_channel; use crate::compute_state::{ActiveComputeState, ComputeState, ReportedFrontier}; @@ -112,22 +112,22 @@ pub async fn serve( Ok(client_builder) } -/// Error type returned on connection epoch changes. +/// Error type returned on connection nonce changes. /// -/// An epoch change informs workers that subsequent commands come a from a new client connection +/// A nonce change informs workers that subsequent commands come a from a new client connection /// and therefore require reconciliation. -struct EpochChange(u64); +struct NonceChange(Uuid); /// Endpoint used by workers to receive compute commands. /// -/// Observes epoch changes in the command stream and converts them into receive errors. +/// Observes nonce changes in the command stream and converts them into receive errors. struct CommandReceiver { /// The channel supplying commands. inner: command_channel::Receiver, /// The ID of the Timely worker. worker_id: usize, - /// The epoch identifying the current cluster protocol incarnation. - epoch: Option, + /// The nonce identifying the current cluster protocol incarnation. + nonce: Option, /// A stash to enable peeking the next command, used in `try_recv`. stashed_command: Option, } @@ -137,76 +137,69 @@ impl CommandReceiver { Self { inner, worker_id, - epoch: None, + nonce: None, stashed_command: None, } } /// Receive the next pending command, if any. /// - /// If the next command is at a different epoch, this method instead returns an `Err` - /// containing the new epoch. - fn try_recv(&mut self) -> Result, EpochChange> { + /// If the next command has a different nonce, this method instead returns an `Err` + /// containing the new nonce. + fn try_recv(&mut self) -> Result, NonceChange> { if let Some(command) = self.stashed_command.take() { return Ok(Some(command)); } - let Some((command, epoch)) = self.inner.try_recv() else { + let Some((command, nonce)) = self.inner.try_recv() else { return Ok(None); }; - trace!(worker = self.worker_id, %epoch, ?command, "received command"); + trace!(worker = self.worker_id, %nonce, ?command, "received command"); - match self.epoch.cmp(&Some(epoch)) { - Ordering::Less => { - self.epoch = Some(epoch); - self.stashed_command = Some(command); - Err(EpochChange(epoch)) - } - Ordering::Equal => Ok(Some(command)), - Ordering::Greater => panic!("epoch regression: {epoch} < {}", self.epoch.unwrap()), + if Some(nonce) == self.nonce { + Ok(Some(command)) + } else { + self.nonce = Some(nonce); + self.stashed_command = Some(command); + Err(NonceChange(nonce)) } } } /// Endpoint used by workers to send sending compute responses. /// -/// Tags responses with the current epoch, allowing receivers to filter out responses intended for +/// Tags responses with the current nonce, allowing receivers to filter out responses intended for /// previous client connections. pub(crate) struct ResponseSender { /// The channel consuming responses. - inner: crossbeam_channel::Sender<(ComputeResponse, u64)>, + inner: crossbeam_channel::Sender<(ComputeResponse, Uuid)>, /// The ID of the Timely worker. worker_id: usize, - /// The epoch identifying the current cluster protocol incarnation. - epoch: Option, + /// The nonce identifying the current cluster protocol incarnation. + nonce: Option, } impl ResponseSender { - fn new(inner: crossbeam_channel::Sender<(ComputeResponse, u64)>, worker_id: usize) -> Self { + fn new(inner: crossbeam_channel::Sender<(ComputeResponse, Uuid)>, worker_id: usize) -> Self { Self { inner, worker_id, - epoch: None, + nonce: None, } } - /// Advance to the given epoch. - fn advance_epoch(&mut self, epoch: u64) { - assert!( - Some(epoch) > self.epoch, - "epoch regression: {epoch} <= {}", - self.epoch.unwrap(), - ); - self.epoch = Some(epoch); + /// Set the cluster protocol nonce. + fn set_nonce(&mut self, nonce: Uuid) { + self.nonce = Some(nonce); } /// Send a compute response. pub fn send(&self, response: ComputeResponse) -> Result<(), SendError> { - let epoch = self.epoch.expect("epoch must be initialized"); + let nonce = self.nonce.expect("nonce must be initialized"); - trace!(worker = self.worker_id, %epoch, ?response, "sending response"); + trace!(worker = self.worker_id, %nonce, ?response, "sending response"); self.inner - .send((response, epoch)) + .send((response, nonce)) .map_err(|SendError((resp, _))| SendError(resp)) } } @@ -243,6 +236,7 @@ impl ClusterSpec for Config { &self, timely_worker: &mut TimelyWorker, client_rx: crossbeam_channel::Receiver<( + Uuid, crossbeam_channel::Receiver, mpsc::UnboundedSender, )>, @@ -325,23 +319,23 @@ fn set_core_affinity(_worker_id: usize) { impl<'w, A: Allocate + 'static> Worker<'w, A> { /// Runs a compute worker. pub fn run(&mut self) { - // The command receiver is initialized without an epoch, so receiving the first command - // always triggers an epoch change. - let EpochChange(epoch) = self.recv_command().expect_err("change to first epoch"); - self.advance_epoch(epoch); + // The command receiver is initialized without an nonce, so receiving the first command + // always triggers a nonce change. + let NonceChange(nonce) = self.recv_command().expect_err("change to first nonce"); + self.set_nonce(nonce); loop { - let Err(EpochChange(epoch)) = self.run_client(); - self.advance_epoch(epoch); + let Err(NonceChange(nonce)) = self.run_client(); + self.set_nonce(nonce); } } - fn advance_epoch(&mut self, epoch: u64) { - self.response_tx.advance_epoch(epoch); + fn set_nonce(&mut self, nonce: Uuid) { + self.response_tx.set_nonce(nonce); } - /// Handles commands for a client connection, returns when the epoch changes. - fn run_client(&mut self) -> Result { + /// Handles commands for a client connection, returns when the nonce changes. + fn run_client(&mut self) -> Result { self.reconcile()?; // The last time we did periodic maintenance. @@ -397,7 +391,7 @@ impl<'w, A: Allocate + 'static> Worker<'w, A> { } } - fn handle_pending_commands(&mut self) -> Result<(), EpochChange> { + fn handle_pending_commands(&mut self) -> Result<(), NonceChange> { while let Some(cmd) = self.command_rx.try_recv()? { self.handle_command(cmd); } @@ -436,7 +430,7 @@ impl<'w, A: Allocate + 'static> Worker<'w, A> { /// /// This method blocks if no command is currently available, but takes care to step the Timely /// worker while doing so. - fn recv_command(&mut self) -> Result { + fn recv_command(&mut self) -> Result { loop { if let Some(cmd) = self.command_rx.try_recv()? { return Ok(cmd); @@ -468,7 +462,7 @@ impl<'w, A: Allocate + 'static> Worker<'w, A> { /// Some additional tidying happens, cleaning up pending peeks, reported frontiers, and creating a new /// subscribe response buffer. We will need to be vigilant with future modifications to `ComputeState` to /// line up changes there with clean resets here. - fn reconcile(&mut self) -> Result<(), EpochChange> { + fn reconcile(&mut self) -> Result<(), NonceChange> { // To initialize the connection, we want to drain all commands until we receive a // `ComputeCommand::InitializationComplete` command to form a target command state. let mut new_commands = Vec::new(); @@ -751,11 +745,12 @@ impl<'w, A: Allocate + 'static> Worker<'w, A> { /// while the [`ClusterClient`] provides a new pair of channels on each reconnect. fn spawn_channel_adapter( client_rx: crossbeam_channel::Receiver<( + Uuid, crossbeam_channel::Receiver, mpsc::UnboundedSender, )>, command_tx: command_channel::Sender, - response_rx: crossbeam_channel::Receiver<(ComputeResponse, u64)>, + response_rx: crossbeam_channel::Receiver<(ComputeResponse, Uuid)>, worker_id: usize, ) { thread::Builder::new() @@ -764,26 +759,36 @@ fn spawn_channel_adapter( .name(format!("cca-{worker_id}")) .spawn(move || { // To make workers aware of the individual client connections, we tag forwarded - // commands with an epoch that increases on every new client connection. Additionally, - // we use the epoch to filter out responses with a different epoch, which were intended - // for previous clients. - let mut epoch = 0; - - // It's possible that we receive responses with epochs from the future: Worker 0 might - // have increased its epoch before us and broadcasted it to our Timely cluster. When we - // receive a response with a future epoch, we need to wait with forwarding it until we - // have increased our own epoch sufficiently (by observing new client connections). We - // need to stash the response in the meantime. - let mut stashed_response = None; - - while let Ok((command_rx, response_tx)) = client_rx.recv() { - epoch += 1; + // commands with the client nonce. Additionally, we use the nonce to filter out + // responses with a different nonce, which are intended for different client + // connections. + // + // It's possible that we receive responses with nonces from the past but also from the + // future: Worker 0 might have received a new nonce before us and broadcasted it to our + // Timely cluster. When we receive a response with a future nonce, we need to wait with + // forwarding it until we have received the same nonce from a client connection. + // + // Nonces are not ordered so we don't know whether a response nonce is from the past or + // the future. We thus assume that every response with an unknown nonce might be from + // the future and stash them all. Every time we reconnect, we immediately send all + // stashed responses with a matching nonce. Every time we receive a new response with a + // nonce that matches our current one, we can discard the entire response stash as we + // know that all stashed responses must be from the past. + let mut stashed_responses = BTreeMap::>::new(); + + while let Ok((nonce, command_rx, response_tx)) = client_rx.recv() { + // Send stashed responses for this client. + if let Some(resps) = stashed_responses.remove(&nonce) { + for resp in resps { + let _ = response_tx.send(resp); + } + } // Wait for a new response while forwarding received commands. let serve_rx_channels = || loop { crossbeam_channel::select! { recv(command_rx) -> msg => match msg { - Ok(cmd) => command_tx.send((cmd, epoch)), + Ok(cmd) => command_tx.send((cmd, nonce)), Err(_) => return Err(()), }, recv(response_rx) -> msg => { @@ -794,26 +799,20 @@ fn spawn_channel_adapter( // Serve this connection until we see any of the channels disconnect. loop { - let (resp, resp_epoch) = match stashed_response.take() { - Some(stashed) => stashed, - None => match serve_rx_channels() { - Ok(response) => response, - Err(()) => break, - }, + let Ok((resp, resp_nonce)) = serve_rx_channels() else { + break; }; - if resp_epoch < epoch { - // Response for a previous connection; discard it. - continue; - } else if resp_epoch > epoch { - // Response for a future connection; stash it and reconnect. - stashed_response = Some((resp, resp_epoch)); - break; - } else { + if resp_nonce == nonce { // Response for the current connection; forward it. + stashed_responses.clear(); if response_tx.send(resp).is_err() { break; } + } else { + // Response for a past or future connection; stash it. + let stash = stashed_responses.entry(resp_nonce).or_default(); + stash.push(resp); } } } diff --git a/src/controller/src/clusters.rs b/src/controller/src/clusters.rs index c8d4a3af85472..ff8782b418702 100644 --- a/src/controller/src/clusters.rs +++ b/src/controller/src/clusters.rs @@ -398,6 +398,8 @@ where &mut self, cluster_id: ClusterId, replica_id: ReplicaId, + cluster_name: String, + replica_name: String, role: ClusterRole, config: ReplicaConfig, enable_worker_core_affinity: bool, @@ -432,6 +434,8 @@ where let (service, metrics_task_join_handle) = self.provision_replica( cluster_id, replica_id, + cluster_name, + replica_name, role, m, enable_worker_core_affinity, @@ -618,6 +622,8 @@ where &self, cluster_id: ClusterId, replica_id: ReplicaId, + cluster_name: String, + replica_name: String, role: ClusterRole, location: ManagedReplicaLocation, enable_worker_core_affinity: bool, @@ -770,6 +776,10 @@ where ("workers".into(), location.allocation.workers.to_string()), ("size".into(), location.size.to_string()), ]), + annotations: BTreeMap::from([ + ("replica-name".into(), replica_name), + ("cluster-name".into(), cluster_name), + ]), availability_zones: match location.availability_zones { ManagedReplicaAvailabilityZones::FromCluster(azs) => azs, ManagedReplicaAvailabilityZones::FromReplica(az) => az.map(|z| vec![z]), diff --git a/src/controller/src/lib.rs b/src/controller/src/lib.rs index 921d6be169677..17aab8ddb67f1 100644 --- a/src/controller/src/lib.rs +++ b/src/controller/src/lib.rs @@ -685,7 +685,6 @@ where config.now.clone(), wallclock_lag_fn.clone(), Arc::clone(&txns_metrics), - envd_epoch, read_only, &config.metrics_registry, controller_metrics.clone(), @@ -699,7 +698,6 @@ where let compute_controller = ComputeController::new( config.build_info, storage_collections, - envd_epoch, read_only, &config.metrics_registry, config.persist_location, diff --git a/src/environmentd/BUILD.bazel b/src/environmentd/BUILD.bazel index 28eb1c6eca685..bbaa3003aa7b7 100644 --- a/src/environmentd/BUILD.bazel +++ b/src/environmentd/BUILD.bazel @@ -44,7 +44,7 @@ rust_library( proc_macro_deps = [] + all_crate_deps(proc_macro = True), rustc_env = {}, rustc_flags = [], - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ ":mz_environmentd_build_script", "//src/adapter:mz_adapter", @@ -120,7 +120,7 @@ rust_test( ), rustc_env = {}, rustc_flags = [], - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ "//src/adapter:mz_adapter", "//src/adapter-types:mz_adapter_types", @@ -254,7 +254,7 @@ rust_test( ), rustc_env = {}, rustc_flags = [], - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ "//src/adapter:mz_adapter", "//src/adapter-types:mz_adapter_types", @@ -325,7 +325,7 @@ rust_test( ), rustc_env = {}, rustc_flags = [], - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ "//src/adapter:mz_adapter", "//src/adapter-types:mz_adapter_types", @@ -396,7 +396,7 @@ rust_test( ), rustc_env = {}, rustc_flags = [], - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ "//src/adapter:mz_adapter", "//src/adapter-types:mz_adapter_types", @@ -467,7 +467,7 @@ rust_test( ), rustc_env = {}, rustc_flags = [], - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ "//src/adapter:mz_adapter", "//src/adapter-types:mz_adapter_types", @@ -538,7 +538,7 @@ rust_test( ), rustc_env = {}, rustc_flags = [], - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ "//src/adapter:mz_adapter", "//src/adapter-types:mz_adapter_types", @@ -609,7 +609,7 @@ rust_test( ), rustc_env = {}, rustc_flags = [], - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ "//src/adapter:mz_adapter", "//src/adapter-types:mz_adapter_types", @@ -680,7 +680,7 @@ rust_test( ), rustc_env = {}, rustc_flags = [], - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ "//src/adapter:mz_adapter", "//src/adapter-types:mz_adapter_types", @@ -751,7 +751,7 @@ rust_test( ), rustc_env = {}, rustc_flags = [], - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ "//src/adapter:mz_adapter", "//src/adapter-types:mz_adapter_types", @@ -819,7 +819,7 @@ rust_binary( "@//misc/bazel/platforms:xlang_lto_enabled": ["-Clinker-plugin-lto"], "//conditions:default": [], }), - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ ":mz_environmentd", "//src/adapter:mz_adapter", diff --git a/src/environmentd/Cargo.toml b/src/environmentd/Cargo.toml index 4cc7074890a36..0281cf535ec03 100644 --- a/src/environmentd/Cargo.toml +++ b/src/environmentd/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "mz-environmentd" description = "Manages a single Materialize environment." -version = "0.147.0-dev.0" +version = "0.147.13" authors = ["Materialize, Inc."] license = "proprietary" edition.workspace = true diff --git a/src/environmentd/src/environmentd/main.rs b/src/environmentd/src/environmentd/main.rs index 3f871b39bac95..767f681a7a4e5 100644 --- a/src/environmentd/src/environmentd/main.rs +++ b/src/environmentd/src/environmentd/main.rs @@ -182,6 +182,10 @@ pub struct Args { /// Name of a non-default Kubernetes scheduler, if any. #[structopt(long, env = "ORCHESTRATOR_KUBERNETES_SCHEDULER_NAME")] orchestrator_kubernetes_scheduler_name: Option, + /// Annotations to apply to all services created by the Kubernetes orchestrator + /// in the form `KEY=VALUE`. + #[structopt(long, env = "ORCHESTRATOR_KUBERNETES_SERVICE_ANNOTATION")] + orchestrator_kubernetes_service_annotation: Vec>, /// Labels to apply to all services created by the Kubernetes orchestrator /// in the form `KEY=VALUE`. #[structopt(long, env = "ORCHESTRATOR_KUBERNETES_SERVICE_LABEL")] @@ -417,6 +421,10 @@ pub struct Args { /// which the event was sent. #[clap(long, env = "SEGMENT_CLIENT_SIDE")] segment_client_side: bool, + /// Only create a dummy segment client when no segment api key is provided, only to get more + /// testing coverage. + #[clap(long, env = "TEST_ONLY_DUMMY_SEGMENT_CLIENT")] + test_only_dummy_segment_client: bool, /// An SDK key for LaunchDarkly. /// /// Setting this in combination with [`Self::config_sync_loop_interval`] @@ -809,6 +817,11 @@ fn run(mut args: Args) -> Result<(), anyhow::Error> { .block_on(KubernetesOrchestrator::new(KubernetesOrchestratorConfig { context: args.orchestrator_kubernetes_context.clone(), scheduler_name: args.orchestrator_kubernetes_scheduler_name, + service_annotations: args + .orchestrator_kubernetes_service_annotation + .into_iter() + .map(|l| (l.key, l.value)) + .collect(), service_labels: args .orchestrator_kubernetes_service_label .into_iter() @@ -1103,6 +1116,7 @@ fn run(mut args: Args) -> Result<(), anyhow::Error> { timestamp_oracle_url, segment_api_key: args.segment_api_key, segment_client_side: args.segment_client_side, + test_only_dummy_segment_client: args.test_only_dummy_segment_client, launchdarkly_sdk_key: args.launchdarkly_sdk_key, launchdarkly_key_map: args .launchdarkly_key_map diff --git a/src/environmentd/src/http.rs b/src/environmentd/src/http.rs index f4f55d8bce7c6..a947857c085d1 100644 --- a/src/environmentd/src/http.rs +++ b/src/environmentd/src/http.rs @@ -36,7 +36,8 @@ use futures::future::{Shared, TryFutureExt}; use headers::authorization::{Authorization, Basic, Bearer}; use headers::{HeaderMapExt, HeaderName}; use http::header::{AUTHORIZATION, CONTENT_TYPE}; -use http::{Method, StatusCode}; +use http::uri::Scheme; +use http::{HeaderMap, HeaderValue, Method, StatusCode, Uri}; use hyper_openssl::SslStream; use hyper_openssl::client::legacy::MaybeHttpsStream; use hyper_util::rt::TokioIo; @@ -100,6 +101,8 @@ pub const MAX_REQUEST_SIZE: usize = u64_to_usize(5 * bytesize::MIB); const SESSION_DURATION: Duration = Duration::from_secs(3600); // 1 hour +const PROFILING_API_ENDPOINTS: &[&str] = &["/memory", "/hierarchical-memory", "/prof/"]; + #[derive(Debug)] pub struct HttpConfig { pub source: &'static str, @@ -685,37 +688,42 @@ where #[derive(Debug, Error)] enum AuthError { - #[error("HTTPS is required")] - HttpsRequired, - #[error("invalid username in client certificate")] - InvalidLogin(String), + #[error("role dissallowed")] + RoleDisallowed(String), #[error("{0}")] Frontegg(#[from] FronteggError), #[error("missing authorization header")] - MissingHttpAuthentication, + MissingHttpAuthentication { + include_www_authenticate_header: bool, + }, #[error("{0}")] MismatchedUser(String), #[error("session expired")] SessionExpired, #[error("failed to update session")] FailedToUpdateSession, + #[error("invalid credentials")] + InvalidCredentials, } impl IntoResponse for AuthError { fn into_response(self) -> Response { warn!("HTTP request failed authentication: {}", self); + let mut headers = HeaderMap::new(); + match self { + AuthError::MissingHttpAuthentication { + include_www_authenticate_header, + } if include_www_authenticate_header => { + headers.insert( + http::header::WWW_AUTHENTICATE, + HeaderValue::from_static("Basic realm=Materialize"), + ); + } + _ => {} + }; // We omit most detail from the error message we send to the client, to // avoid giving attackers unnecessary information. - let message = match self { - AuthError::HttpsRequired => self.to_string(), - _ => "unauthorized".into(), - }; - ( - StatusCode::UNAUTHORIZED, - [(http::header::WWW_AUTHENTICATE, "Basic realm=Materialize")], - message, - ) - .into_response() + (StatusCode::UNAUTHORIZED, headers, "unauthorized").into_response() } } @@ -806,7 +814,16 @@ async fn http_auth( match (tls_enabled, &conn_protocol) { (false, ConnProtocol::Http) => {} (false, ConnProtocol::Https { .. }) => unreachable!(), - (true, ConnProtocol::Http) => return Err(AuthError::HttpsRequired), + (true, ConnProtocol::Http) => { + let mut parts = req.uri().clone().into_parts(); + parts.scheme = Some(Scheme::HTTPS); + return Ok(Redirect::permanent( + &Uri::from_parts(parts) + .expect("it was already a URI, just changed the scheme") + .to_string(), + ) + .into_response()); + } (true, ConnProtocol::Https { .. }) => {} } // If we've already passed some other auth, just use that. @@ -826,7 +843,18 @@ async fn http_auth( None }; - let user = auth(&authenticator, creds, allowed_roles).await?; + let path = req.uri().path(); + let include_www_authenticate_header = path == "/" + || PROFILING_API_ENDPOINTS + .iter() + .any(|prefix| path.starts_with(prefix)); + let user = auth( + &authenticator, + creds, + allowed_roles, + include_www_authenticate_header, + ) + .await?; // Add the authenticated user as an extension so downstream handlers can // inspect it if necessary. @@ -898,7 +926,7 @@ async fn init_ws( anyhow::bail!("expected auth information"); } }; - let user = auth(&authenticator, Some(creds), *allowed_roles).await?; + let user = auth(&authenticator, Some(creds), *allowed_roles, false).await?; (user, options) }; @@ -931,6 +959,7 @@ async fn auth( authenticator: &Authenticator, creds: Option, allowed_roles: AllowedRoles, + include_www_authenticate_header: bool, ) -> Result { // TODO pass session data here? let (name, external_metadata_rx) = match authenticator { @@ -949,13 +978,25 @@ async fn auth( }); (claims.user, Some(external_metadata_rx)) } - None => return Err(AuthError::MissingHttpAuthentication), + None => { + return Err(AuthError::MissingHttpAuthentication { + include_www_authenticate_header, + }); + } + }, + Authenticator::Password(adapter_client) => match creds { + Some(Credentials::Password { username, password }) => { + if let Err(_) = adapter_client.authenticate(&username, &password).await { + return Err(AuthError::InvalidCredentials); + } + (username, None) + } + _ => { + return Err(AuthError::MissingHttpAuthentication { + include_www_authenticate_header, + }); + } }, - Authenticator::Password(_) => { - warn!("self hosted auth only, but somehow missing session data"); - // TODO tell the user to login here? - return Err(AuthError::MissingHttpAuthentication); - } Authenticator::None => { // If no authentication, use whatever is in the HTTP auth // header (without checking the password), or fall back to the @@ -989,7 +1030,7 @@ fn check_role_allowed(name: &str, allowed_roles: AllowedRoles) -> Result<(), Aut if role_allowed { Ok(()) } else { - Err(AuthError::InvalidLogin(name.to_owned())) + Err(AuthError::RoleDisallowed(name.to_owned())) } } diff --git a/src/environmentd/src/lib.rs b/src/environmentd/src/lib.rs index 578a50309dc21..a57afb63e0bb6 100644 --- a/src/environmentd/src/lib.rs +++ b/src/environmentd/src/lib.rs @@ -151,6 +151,8 @@ pub struct Config { /// Whether the Segment client is being used on the client side /// (rather than the server side). pub segment_client_side: bool, + /// Only create a dummy segment client, only to get more testing coverage. + pub test_only_dummy_segment_client: bool, /// An SDK key for LaunchDarkly. Enables system parameter synchronization /// with LaunchDarkly. pub launchdarkly_sdk_key: Option, @@ -868,6 +870,21 @@ impl Listeners { segment_client, adapter_client: adapter_client.clone(), environment_id: config.environment_id, + report_interval: Duration::from_secs(3600), + }); + } else if config.test_only_dummy_segment_client { + // We only have access to a segment client in production but we + // still want to exercise the telemetry reporting code to a degree. + // So we create a dummy client and report telemetry into the void. + // This way we at least run the telemetry queries the way a + // production environment would. + tracing::debug!("starting telemetry reporting with a dummy segment client"); + let segment_client = mz_segment::Client::new_dummy_client(); + telemetry::start_reporting(telemetry::Config { + segment_client, + adapter_client: adapter_client.clone(), + environment_id: config.environment_id, + report_interval: Duration::from_secs(180), }); } diff --git a/src/environmentd/src/telemetry.rs b/src/environmentd/src/telemetry.rs index def84b3ef0e3f..5781ad38a8ecc 100644 --- a/src/environmentd/src/telemetry.rs +++ b/src/environmentd/src/telemetry.rs @@ -81,16 +81,13 @@ use anyhow::bail; use futures::StreamExt; use mz_adapter::PeekResponseUnary; use mz_adapter::telemetry::{EventDetails, SegmentClientExt}; +use mz_ore::collections::CollectionExt; use mz_ore::retry::Retry; -use mz_ore::{assert_none, task}; +use mz_ore::{soft_panic_or_log, task}; use mz_repr::adt::jsonb::Jsonb; use mz_sql::catalog::EnvironmentId; use serde_json::json; use tokio::time::{self, Duration}; -use tracing::warn; - -/// How frequently to send a summary to Segment. -const REPORT_INTERVAL: Duration = Duration::from_secs(3600); /// Telemetry configuration. #[derive(Clone)] @@ -101,6 +98,8 @@ pub struct Config { pub adapter_client: mz_adapter::Client, /// The ID of the environment for which to report data. pub environment_id: EnvironmentId, + /// How frequently to send a summary to Segment. + pub report_interval: Duration, } /// Starts reporting telemetry events to Segment. @@ -113,6 +112,7 @@ async fn report_loop( segment_client, adapter_client, environment_id, + report_interval, }: Config, ) { struct Stats { @@ -125,7 +125,7 @@ async fn report_loop( let mut last_stats: Option = None; - let mut interval = time::interval(REPORT_INTERVAL); + let mut interval = time::interval(report_interval); loop { interval.tick().await; @@ -170,21 +170,25 @@ async fn report_loop( )", )).await?; - // The introspection query returns only one row, and that should - // easily fit into our max_result_size and not go through the - // peek stash, which would lead to a result that streams back - // multiple batches of rows. - let rows = rows_stream.next().await.expect("expected at least one result batch"); - assert_none!(rows_stream.next().await, "expected at most one result batch"); + let mut row_iters = Vec::new(); - let mut rows = match rows { - PeekResponseUnary::Rows(rows) => rows, - PeekResponseUnary::Canceled => bail!("query canceled"), - PeekResponseUnary::Error(e) => bail!(e), - }; + while let Some(rows) = rows_stream.next().await { + match rows { + PeekResponseUnary::Rows(rows) => row_iters.push(rows), + PeekResponseUnary::Canceled => bail!("query canceled"), + PeekResponseUnary::Error(e) => bail!(e), + } + } - let row = rows.next().expect("expected at least one row").to_owned(); - assert_none!(rows.next(), "introspection query had more than one row?"); + let mut rows = Vec::new(); + for mut row_iter in row_iters { + while let Some(row) = row_iter.next() { + rows.push(row.to_owned()); + } + } + + assert_eq!(1, rows.len(), "expected one row but got: {:?}", rows); + let row = rows.into_first(); let jsonb = Jsonb::from_row(row); Ok::<_, anyhow::Error>(jsonb.as_ref().to_serde_json()) @@ -194,11 +198,13 @@ async fn report_loop( let traits = match traits { Ok(traits) => traits, Err(e) => { - warn!("unable to collect telemetry traits: {e}"); + soft_panic_or_log!("unable to collect telemetry traits: {e}"); continue; } }; + tracing::info!(?traits, "telemetry traits"); + segment_client.group( // We use the organization ID as the user ID for events // that are not associated with a particular user. diff --git a/src/environmentd/src/test_util.rs b/src/environmentd/src/test_util.rs index 2e68a01d16c4d..e39756d6c6e44 100644 --- a/src/environmentd/src/test_util.rs +++ b/src/environmentd/src/test_util.rs @@ -728,6 +728,7 @@ impl Listeners { storage_usage_retention_period: config.storage_usage_retention_period, segment_api_key: None, segment_client_side: false, + test_only_dummy_segment_client: false, egress_addresses: vec![], aws_account_id: None, aws_privatelink_availability_zones: None, diff --git a/src/expr/src/relation.proto b/src/expr/src/relation.proto index 8ce2acb5e28c9..956467a69ab58 100644 --- a/src/expr/src/relation.proto +++ b/src/expr/src/relation.proto @@ -182,6 +182,10 @@ message ProtoTableFunc { mz_repr.relation_and_scalar.ProtoRelationType relation = 2; } + message ProtoWithOrdinality { + ProtoTableFunc inner = 1; + } + oneof kind { bool jsonb_each = 1; google.protobuf.Empty jsonb_object_keys = 2; @@ -202,5 +206,6 @@ message ProtoTableFunc { google.protobuf.Empty mz_acl_explode = 17; mz_repr.relation_and_scalar.ProtoScalarType unnest_map = 18; google.protobuf.Empty regexp_matches = 19; + ProtoWithOrdinality with_ordinality = 21; } } diff --git a/src/expr/src/relation.rs b/src/expr/src/relation.rs index fbed24241b9df..e3c575dcea1ff 100644 --- a/src/expr/src/relation.rs +++ b/src/expr/src/relation.rs @@ -1228,9 +1228,7 @@ impl MirRelationExpr { } Project { outputs, .. } => outputs.len(), Map { scalars, .. } => input_arities.next().unwrap() + scalars.len(), - FlatMap { func, .. } => { - input_arities.next().unwrap() + func.output_type().column_types.len() - } + FlatMap { func, .. } => input_arities.next().unwrap() + func.output_arity(), Join { .. } => input_arities.sum(), Reduce { input: _, diff --git a/src/expr/src/relation/func.rs b/src/expr/src/relation/func.rs index 66523b029bc5c..d271003fcf584 100644 --- a/src/expr/src/relation/func.rs +++ b/src/expr/src/relation/func.rs @@ -21,8 +21,8 @@ use itertools::{Either, Itertools}; use mz_lowertest::MzReflect; use mz_ore::cast::CastFrom; -use mz_ore::soft_assert_or_log; use mz_ore::str::separated; +use mz_ore::{soft_assert_eq_no_log, soft_assert_or_log}; use mz_proto::{IntoRustIfSome, ProtoType, RustType, TryFromProtoError}; use mz_repr::adt::array::ArrayDimension; use mz_repr::adt::date::Date; @@ -31,7 +31,8 @@ use mz_repr::adt::numeric::{self, Numeric, NumericMaxScale}; use mz_repr::adt::regex::Regex as ReprRegex; use mz_repr::adt::timestamp::{CheckedTimestamp, TimestampLike}; use mz_repr::{ - ColumnName, ColumnType, Datum, Diff, RelationType, Row, RowArena, ScalarType, SharedRow, + ColumnName, ColumnType, Datum, Diff, RelationType, Row, RowArena, RowPacker, ScalarType, + SharedRow, datum_size, }; use num::{CheckedAdd, Integer, Signed, ToPrimitive}; use ordered_float::OrderedFloat; @@ -51,7 +52,7 @@ use crate::explain::{HumanizedExpr, HumanizerMode}; use crate::relation::proto_aggregate_func::{ self, ProtoColumnOrders, ProtoFusedValueWindowFunc, ProtoFusedWindowAggregate, }; -use crate::relation::proto_table_func::ProtoTabletizedScalar; +use crate::relation::proto_table_func::{ProtoTabletizedScalar, ProtoWithOrdinality}; use crate::relation::{ ColumnOrder, ProtoAggregateFunc, ProtoTableFunc, WindowFrame, WindowFrameBound, WindowFrameUnits, compare_columns, proto_table_func, @@ -3715,9 +3716,10 @@ fn mz_acl_explode<'a>( Ok(res.into_iter()) } -#[derive( - Arbitrary, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize, Hash, MzReflect, -)] +/// Don't add table functions that emit negative diffs! These are undefined with `WITH ORDINALITY`. +/// (`Repeat` _can_ emit negative diffs, on which `WITH ORDINALITY` panics, but this is in +/// mz_unsafe, so not callable by users.) +#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize, Hash, MzReflect)] pub enum TableFunc { AclExplode, MzAclExplode, @@ -3760,6 +3762,49 @@ pub enum TableFunc { relation: RelationType, }, RegexpMatches, + /// This is a special table function that is used to implement the `WITH ORDINALITY` clause: + /// When the user does + /// `f(...) WITH ORDINALITY`, + /// we plan this as + /// `TableFunc::WithOrdinality { inner: f(...) }` + /// + /// WITH ORDINALITY means that we append an extra output column, whose value is 1,2,3,... for + /// the output rows corresponding to a call of the table function on one input row. For example, + /// WITH ORDINALITY numbers the elements of a list when calling unnest_list. + WithOrdinality { + inner: Box, + }, +} + +/// Manual `Arbitrary`, because proptest-derive is choking on the recursive `WithOrdinality` +/// variant. +impl Arbitrary for TableFunc { + type Parameters = (); + type Strategy = BoxedStrategy; + + fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { + let leaf = Union::new(vec![ + Just(TableFunc::AclExplode), + Just(TableFunc::MzAclExplode), + Just(TableFunc::JsonbObjectKeys), + Just(TableFunc::GenerateSeriesInt32), + Just(TableFunc::GenerateSeriesInt64), + Just(TableFunc::GenerateSeriesTimestamp), + Just(TableFunc::GenerateSeriesTimestampTz), + Just(TableFunc::Repeat), + Just(TableFunc::GenerateSubscriptsArray), + Just(TableFunc::RegexpMatches), + ]) + .boxed(); + + // recursive WithOrdinality variant + leaf.prop_recursive(2, 256, 2, |inner| { + inner.clone().prop_map(|tf| TableFunc::WithOrdinality { + inner: Box::new(tf), + }) + }) + .boxed() + } } impl RustType for TableFunc { @@ -3795,6 +3840,11 @@ impl RustType for TableFunc { }) } TableFunc::RegexpMatches => Kind::RegexpMatches(()), + TableFunc::WithOrdinality { inner } => { + Kind::WithOrdinality(Box::new(ProtoWithOrdinality { + inner: Some(inner.into_proto()), + })) + } }), } } @@ -3840,6 +3890,11 @@ impl RustType for TableFunc { .into_rust_if_some("ProtoTabletizedScalar::relation")?, }, Kind::RegexpMatches(_) => TableFunc::RegexpMatches, + Kind::WithOrdinality(inner) => TableFunc::WithOrdinality { + inner: inner + .inner + .into_rust_if_some("ProtoWithOrdinality::inner")?, + }, }) } } @@ -3920,6 +3975,7 @@ impl TableFunc { Ok(Box::new(std::iter::once((r, Diff::ONE)))) } TableFunc::RegexpMatches => Ok(Box::new(regexp_matches(datums)?)), + TableFunc::WithOrdinality { inner } => with_ordinality(inner, datums, temp_storage), } } @@ -4055,8 +4111,18 @@ impl TableFunc { (column_types, keys) } + TableFunc::WithOrdinality { inner } => { + let mut typ = inner.output_type(); + // Add the ordinality column. + typ.column_types.push(ScalarType::Int64.nullable(false)); + // The ordinality column is always a key. + typ.keys.push(vec![typ.column_types.len() - 1]); + (typ.column_types, typ.keys) + } }; + soft_assert_eq_no_log!(column_types.len(), self.output_arity()); + if !keys.is_empty() { RelationType::new(column_types).with_keys(keys) } else { @@ -4085,6 +4151,7 @@ impl TableFunc { TableFunc::Wrap { width, .. } => *width, TableFunc::TabletizedScalar { relation, .. } => relation.column_types.len(), TableFunc::RegexpMatches => 1, + TableFunc::WithOrdinality { inner } => inner.output_arity() + 1, } } @@ -4109,6 +4176,7 @@ impl TableFunc { | TableFunc::RegexpMatches => true, TableFunc::Wrap { .. } => false, TableFunc::TabletizedScalar { .. } => false, + TableFunc::WithOrdinality { inner } => inner.empty_on_null_input(), } } @@ -4136,6 +4204,7 @@ impl TableFunc { TableFunc::Wrap { .. } => true, TableFunc::TabletizedScalar { .. } => true, TableFunc::RegexpMatches => true, + TableFunc::WithOrdinality { inner } => inner.preserves_monotonicity(), } } } @@ -4162,10 +4231,53 @@ impl fmt::Display for TableFunc { TableFunc::Wrap { width, .. } => write!(f, "wrap{}", width), TableFunc::TabletizedScalar { name, .. } => f.write_str(name), TableFunc::RegexpMatches => write!(f, "regexp_matches(_, _, _)"), + TableFunc::WithOrdinality { inner } => write!(f, "{}[with_ordinality]", inner), } } } +pub fn with_ordinality<'a>( + inner: &'a TableFunc, + datums: &'a [Datum<'a>], + temp_storage: &'a RowArena, +) -> Result + 'a>, EvalError> { + // zip 1, 2, 3, 4, ... to the output of the table function. We need to blow up non-1 diffs, + // because the ordinality column will have different values for each copy. + let mut next_ordinal: i64 = 1; + let it = inner + .eval(datums, temp_storage)? + .flat_map(move |(mut row, diff)| { + let diff = diff.into_inner(); + // WITH ORDINALITY is not well-defined for negative diffs. This is ok, since the + // only table function that can emit negative diffs is `repeat_row`, which is in + // `mz_unsafe`, so users can never call it. + // + // (We also don't need to worry about negative diffs in FlatMap's input, because + // the diff of the input of the FlatMap is factored in after we return from here.) + assert!(diff >= 0); + // The ordinals that will be associated with this row. + let mut ordinals = next_ordinal..(next_ordinal + diff); + next_ordinal += diff; + // The maximum byte capacity we need for the original row and its ordinal. + let cap = row.data_len() + datum_size(&Datum::Int64(next_ordinal)); + iter::from_fn(move || { + let ordinal = ordinals.next()?; + let mut row = if ordinals.is_empty() { + // This is the last row, so no need to clone. (Most table functions emit + // only 1 diffs, so this completely avoids cloning in most cases.) + std::mem::take(&mut row) + } else { + let mut new_row = Row::with_capacity(cap); + new_row.clone_from(&row); + new_row + }; + RowPacker::for_existing_row(&mut row).push(Datum::Int64(ordinal)); + Some((row, Diff::ONE)) + }) + }); + Ok(Box::new(it)) +} + #[cfg(test)] mod tests { use super::{AggregateFunc, ProtoAggregateFunc, ProtoTableFunc, TableFunc}; diff --git a/src/materialized/BUILD.bazel b/src/materialized/BUILD.bazel index a597e0aac69fc..82bc27db80da5 100644 --- a/src/materialized/BUILD.bazel +++ b/src/materialized/BUILD.bazel @@ -34,7 +34,7 @@ rust_binary( "@//misc/bazel/platforms:xlang_lto_enabled": ["-Clinker-plugin-lto"], "//conditions:default": [], }), - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ "//src/clusterd:mz_clusterd", "//src/environmentd:mz_environmentd", diff --git a/src/materialized/Cargo.toml b/src/materialized/Cargo.toml index a333d71e46817..8f2c8b4a4b818 100644 --- a/src/materialized/Cargo.toml +++ b/src/materialized/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "mz-materialized" description = "Materialize's unified binary." -version = "0.147.0-dev.0" +version = "0.147.13" edition.workspace = true rust-version.workspace = true publish = false diff --git a/src/orchestrator-kubernetes/src/cloud_resource_controller.rs b/src/orchestrator-kubernetes/src/cloud_resource_controller.rs index c2cf8349b24c2..b26fdc28b117f 100644 --- a/src/orchestrator-kubernetes/src/cloud_resource_controller.rs +++ b/src/orchestrator-kubernetes/src/cloud_resource_controller.rs @@ -105,52 +105,55 @@ impl CloudResourceController for KubernetesOrchestrator { } async fn watch_vpc_endpoints(&self) -> BoxStream<'static, VpcEndpointEvent> { - let stream = watcher(self.vpc_endpoint_api.clone(), watcher::Config::default()) - .touched_objects() - .filter_map(|object| async move { - match object { - Ok(vpce) => { - let connection_id = - mz_cloud_resources::id_from_vpc_endpoint_name(&vpce.name_any())?; + let stream = watcher( + self.vpc_endpoint_api.clone(), + // This watcher timeout must be shorter than the client read timeout. + watcher::Config::default().timeout(59), + ) + .touched_objects() + .filter_map(|object| async move { + match object { + Ok(vpce) => { + let connection_id = + mz_cloud_resources::id_from_vpc_endpoint_name(&vpce.name_any())?; - if let Some(state) = vpce.status.as_ref().and_then(|st| st.state.to_owned()) - { - Some(VpcEndpointEvent { - connection_id, - status: state, - // Use the 'Available' Condition on the VPCE Status to set the event-time, falling back - // to now if it's not set - time: vpce - .status - .unwrap() - .conditions - .and_then(|c| c.into_iter().find(|c| &c.type_ == "Available")) - .and_then(|condition| Some(condition.last_transition_time.0)) - .unwrap_or_else(Utc::now), - }) - } else { - // The Status/State is not yet populated on the VpcEndpoint, which means it was just - // initialized and hasn't yet been reconciled by the environment-controller - // We return an event with an 'unknown' state so that watchers know the VpcEndpoint was created - // even if we don't yet have an accurate status - Some(VpcEndpointEvent { - connection_id, - status: VpcEndpointState::Unknown, - time: vpce.creation_timestamp()?.0, - }) - } - // TODO: Should we also check for the deletion_timestamp on the vpce? That would indicate that the - // resource is about to be deleted; however there is already a 'deleted' enum val on VpcEndpointState - // which refers to the state of the customer's VPC Endpoint Service, so we'd need to introduce a new state val - } - Err(error) => { - // We assume that errors returned by Kubernetes are usually transient, so we - // just log a warning and ignore them otherwise. - tracing::warn!("vpc endpoint watch error: {error}"); - None + if let Some(state) = vpce.status.as_ref().and_then(|st| st.state.to_owned()) { + Some(VpcEndpointEvent { + connection_id, + status: state, + // Use the 'Available' Condition on the VPCE Status to set the event-time, falling back + // to now if it's not set + time: vpce + .status + .unwrap() + .conditions + .and_then(|c| c.into_iter().find(|c| &c.type_ == "Available")) + .and_then(|condition| Some(condition.last_transition_time.0)) + .unwrap_or_else(Utc::now), + }) + } else { + // The Status/State is not yet populated on the VpcEndpoint, which means it was just + // initialized and hasn't yet been reconciled by the environment-controller + // We return an event with an 'unknown' state so that watchers know the VpcEndpoint was created + // even if we don't yet have an accurate status + Some(VpcEndpointEvent { + connection_id, + status: VpcEndpointState::Unknown, + time: vpce.creation_timestamp()?.0, + }) } + // TODO: Should we also check for the deletion_timestamp on the vpce? That would indicate that the + // resource is about to be deleted; however there is already a 'deleted' enum val on VpcEndpointState + // which refers to the state of the customer's VPC Endpoint Service, so we'd need to introduce a new state val + } + Err(error) => { + // We assume that errors returned by Kubernetes are usually transient, so we + // just log a warning and ignore them otherwise. + tracing::warn!("vpc endpoint watch error: {error}"); + None } - }); + } + }); Box::pin(stream) } diff --git a/src/orchestrator-kubernetes/src/lib.rs b/src/orchestrator-kubernetes/src/lib.rs index e40a661cedea0..c328e4ad7ea01 100644 --- a/src/orchestrator-kubernetes/src/lib.rs +++ b/src/orchestrator-kubernetes/src/lib.rs @@ -73,6 +73,8 @@ pub struct KubernetesOrchestratorConfig { pub context: String, /// The name of a non-default Kubernetes scheduler to use, if any. pub scheduler_name: Option, + /// Annotations to install on every service created by the orchestrator. + pub service_annotations: BTreeMap, /// Labels to install on every service created by the orchestrator. pub service_labels: BTreeMap, /// Node selector to install on every service created by the orchestrator. @@ -379,7 +381,8 @@ impl NamespacedKubernetesOrchestrator { "environmentd.materialize.cloud/namespace={}", self.namespace ); - watcher::Config::default().labels(&ns_selector) + // This watcher timeout must be shorter than the client read timeout. + watcher::Config::default().timeout(59).labels(&ns_selector) } /// Convert a higher-level label key to the actual one we @@ -561,6 +564,7 @@ impl NamespacedOrchestrator for NamespacedKubernetesOrchestrator { cpu_limit, scale, labels: labels_in, + annotations: annotations_in, availability_zones, other_replicas_selector, replicas_selector, @@ -599,10 +603,14 @@ impl NamespacedOrchestrator for NamespacedKubernetesOrchestrator { // `StatefulSet` is created is not permitted by Kubernetes, and we're // not yet smart enough to handle deleting and recreating the // `StatefulSet`. - let match_labels = btreemap! { + let mut match_labels = btreemap! { "environmentd.materialize.cloud/namespace".into() => self.namespace.clone(), "environmentd.materialize.cloud/service-id".into() => id.into(), }; + for (key, value) in &self.config.service_labels { + match_labels.insert(key.clone(), value.clone()); + } + let mut labels = match_labels.clone(); for (key, value) in labels_in { labels.insert(self.make_label_key(&key), value); @@ -616,9 +624,6 @@ impl NamespacedOrchestrator for NamespacedKubernetesOrchestrator { "true".into(), ); } - for (key, value) in &self.config.service_labels { - labels.insert(key.clone(), value.clone()); - } let mut limits = BTreeMap::new(); let mut requests = BTreeMap::new(); if let Some(memory_limit) = memory_limit { @@ -833,6 +838,10 @@ impl NamespacedOrchestrator for NamespacedKubernetesOrchestrator { // It's called do-not-disrupt in newer versions of karpenter, so adding for forward/backward compatibility "karpenter.sh/do-not-disrupt".to_owned() => "true".to_string(), }; + for (key, value) in annotations_in { + // We want to use the same prefix as our labels keys + pod_annotations.insert(self.make_label_key(&key), value); + } if self.config.enable_prometheus_scrape_annotations { if let Some(internal_http_port) = ports_in .iter() @@ -846,6 +855,9 @@ impl NamespacedOrchestrator for NamespacedKubernetesOrchestrator { pod_annotations.insert("prometheus.io/scheme".to_owned(), "http".to_string()); } } + for (key, value) in &self.config.service_annotations { + pod_annotations.insert(key.clone(), value.clone()); + } let default_node_selector = if disk { vec![("materialize.cloud/disk".to_string(), disk.to_string())] diff --git a/src/orchestrator/src/lib.rs b/src/orchestrator/src/lib.rs index 8c0ac2ac72bcd..ef54c5d3cadb7 100644 --- a/src/orchestrator/src/lib.rs +++ b/src/orchestrator/src/lib.rs @@ -215,6 +215,11 @@ pub struct ServiceConfig { /// /// The orchestrator backend may apply a prefix to the key if appropriate. pub labels: BTreeMap, + /// Arbitrary key–value pairs to attach to the service as annotations in the + /// orchestrator backend. + /// + /// The orchestrator backend may apply a prefix to the key if appropriate. + pub annotations: BTreeMap, /// The availability zones the service can be run in. If no availability /// zones are specified, the orchestrator is free to choose one. pub availability_zones: Option>, diff --git a/src/orchestratord/BUILD.bazel b/src/orchestratord/BUILD.bazel index 34b51c91cc023..132bbe0e4238f 100644 --- a/src/orchestratord/BUILD.bazel +++ b/src/orchestratord/BUILD.bazel @@ -32,7 +32,7 @@ rust_library( proc_macro_deps = [] + all_crate_deps(proc_macro = True), rustc_env = {}, rustc_flags = [], - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ "//src/alloc:mz_alloc", "//src/alloc-default:mz_alloc_default", @@ -71,7 +71,7 @@ rust_test( ), rustc_env = {}, rustc_flags = [], - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ "//src/alloc:mz_alloc", "//src/alloc-default:mz_alloc_default", @@ -128,7 +128,7 @@ rust_binary( "@//misc/bazel/platforms:xlang_lto_enabled": ["-Clinker-plugin-lto"], "//conditions:default": [], }), - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ ":mz_orchestratord", "//src/alloc:mz_alloc", diff --git a/src/orchestratord/Cargo.toml b/src/orchestratord/Cargo.toml index 7a305706e1127..83af6d2e8a744 100644 --- a/src/orchestratord/Cargo.toml +++ b/src/orchestratord/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "mz-orchestratord" description = "Kubernetes operator for Materialize regions" -version = "0.147.0-dev.0" +version = "0.147.13" edition.workspace = true rust-version.workspace = true publish = false diff --git a/src/orchestratord/src/controller/materialize.rs b/src/orchestratord/src/controller/materialize.rs index 7a396a005237a..f626136d2c3b3 100644 --- a/src/orchestratord/src/controller/materialize.rs +++ b/src/orchestratord/src/controller/materialize.rs @@ -263,10 +263,6 @@ impl Context { config.aws_info.aws_account_id.is_some(), "--aws-account-id is required when using --cloud-provider=aws" ); - assert!( - config.aws_info.environmentd_iam_role_arn.is_some(), - "--environmentd-iam-role-arn is required when using --cloud-provider=aws" - ); } Self { diff --git a/src/orchestratord/src/controller/materialize/environmentd.rs b/src/orchestratord/src/controller/materialize/environmentd.rs index 3a9b366429d0b..e5f5929c2e21e 100644 --- a/src/orchestratord/src/controller/materialize/environmentd.rs +++ b/src/orchestratord/src/controller/materialize/environmentd.rs @@ -45,7 +45,7 @@ use reqwest::StatusCode; use semver::{BuildMetadata, Prerelease, Version}; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; -use tracing::trace; +use tracing::{trace, warn}; use super::matching_image_from_environmentd_image_ref; use crate::controller::materialize::tls::{create_certificate, issuer_ref_defined}; @@ -72,6 +72,13 @@ static V147_DEV0: LazyLock = LazyLock::new(|| Version { pre: Prerelease::new("dev.0").expect("dev.0 is valid prerelease"), build: BuildMetadata::new("").expect("empty string is valid buildmetadata"), }); +static V154_DEV0: LazyLock = LazyLock::new(|| Version { + major: 0, + minor: 154, + patch: 0, + pre: Prerelease::new("dev.0").expect("dev.0 is valid prerelease"), + build: BuildMetadata::new("").expect("empty string is valid buildmetadata"), +}); /// Describes the status of a deployment. /// @@ -108,7 +115,7 @@ pub struct ConnectionInfo { pub struct Resources { pub generation: u64, pub environmentd_network_policies: Vec, - pub service_account: Box, + pub service_account: Box>, pub role: Box, pub role_binding: Box, pub public_service: Box, @@ -183,8 +190,10 @@ impl Resources { apply_resource(&environmentd_network_policy_api, policy).await?; } - trace!("applying environmentd service account"); - apply_resource(&service_account_api, &*self.service_account).await?; + if let Some(service_account) = &*self.service_account { + trace!("applying environmentd service account"); + apply_resource(&service_account_api, service_account).await?; + } trace!("applying environmentd role"); apply_resource(&role_api, &*self.role).await?; @@ -638,27 +647,42 @@ fn create_environmentd_network_policies( fn create_service_account_object( config: &super::MaterializeControllerArgs, mz: &Materialize, -) -> ServiceAccount { - let annotations = if config.cloud_provider == CloudProvider::Aws { - let role_arn = mz +) -> Option { + if mz.create_service_account() { + let mut annotations: BTreeMap = mz .spec - .environmentd_iam_role_arn - .as_deref() - .or(config.aws_info.environmentd_iam_role_arn.as_deref()) - .unwrap() - .to_string(); - Some(btreemap! { - "eks.amazonaws.com/role-arn".to_string() => role_arn + .service_account_annotations + .clone() + .unwrap_or_default(); + if let (CloudProvider::Aws, Some(role_arn)) = ( + config.cloud_provider, + mz.spec + .environmentd_iam_role_arn + .as_deref() + .or(config.aws_info.environmentd_iam_role_arn.as_deref()), + ) { + warn!( + "Use of Materialize.spec.environmentd_iam_role_arn is deprecated. Please set \"eks.amazonaws.com/role-arn\" in Materialize.spec.service_account_annotations instead." + ); + annotations.insert( + "eks.amazonaws.com/role-arn".to_string(), + role_arn.to_string(), + ); + }; + + let mut labels = mz.default_labels(); + labels.extend(mz.spec.service_account_labels.clone().unwrap_or_default()); + + Some(ServiceAccount { + metadata: ObjectMeta { + annotations: Some(annotations), + labels: Some(labels), + ..mz.managed_resource_meta(mz.service_account_name()) + }, + ..Default::default() }) } else { None - }; - ServiceAccount { - metadata: ObjectMeta { - annotations, - ..mz.managed_resource_meta(mz.service_account_name()) - }, - ..Default::default() } } @@ -1107,11 +1131,30 @@ fn create_environmentd_statefulset_object( scheduler_name )); } - for (key, val) in mz.default_labels() { - args.push(format!( - "--orchestrator-kubernetes-service-label={key}={val}" - )); + if mz.meets_minimum_version(&V154_DEV0) { + args.extend( + mz.spec + .pod_annotations + .as_ref() + .map(|annotations| annotations.iter()) + .unwrap_or_default() + .map(|(key, val)| { + format!("--orchestrator-kubernetes-service-annotation={key}={val}") + }), + ); } + args.extend( + mz.default_labels() + .iter() + .chain( + mz.spec + .pod_labels + .as_ref() + .map(|labels| labels.iter()) + .unwrap_or_default(), + ) + .map(|(key, val)| format!("--orchestrator-kubernetes-service-label={key}={val}")), + ); if let Some(status) = &mz.status { args.push(format!( "--orchestrator-kubernetes-name-prefix=mz{}-", @@ -1421,6 +1464,14 @@ fn create_environmentd_statefulset_object( mz.environmentd_app_name(), ); pod_template_labels.insert("app".to_owned(), "environmentd".to_string()); + pod_template_labels.extend( + mz.spec + .pod_labels + .as_ref() + .map(|labels| labels.iter()) + .unwrap_or_default() + .map(|(key, value)| (key.clone(), value.clone())), + ); let mut pod_template_annotations = btreemap! { // We can re-enable eviction once we have HA @@ -1456,6 +1507,14 @@ fn create_environmentd_statefulset_object( "/metrics/mz_storage".to_string(), ); } + pod_template_annotations.extend( + mz.spec + .pod_annotations + .as_ref() + .map(|annotations| annotations.iter()) + .unwrap_or_default() + .map(|(key, value)| (key.clone(), value.clone())), + ); let mut tolerations = vec![ // When the node becomes `NotReady` it indicates there is a problem with the node, diff --git a/src/persist-client/BUILD.bazel b/src/persist-client/BUILD.bazel index 32cc447192514..c2a46b1851878 100644 --- a/src/persist-client/BUILD.bazel +++ b/src/persist-client/BUILD.bazel @@ -30,7 +30,7 @@ rust_library( proc_macro_deps = ["//src/persist-proc:mz_persist_proc"] + all_crate_deps(proc_macro = True), rustc_env = {}, rustc_flags = [], - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ ":mz_persist_client_build_script", "//src/build-info:mz_build_info", @@ -70,7 +70,7 @@ rust_test( ), rustc_env = {}, rustc_flags = [], - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ "//src/build-info:mz_build_info", "//src/dyncfg:mz_dyncfg", diff --git a/src/persist-client/Cargo.toml b/src/persist-client/Cargo.toml index e20529182010a..e6c2cba295d7d 100644 --- a/src/persist-client/Cargo.toml +++ b/src/persist-client/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "mz-persist-client" description = "Client for Materialize pTVC durability system" -version = "0.147.0-dev.0" +version = "0.147.13" edition.workspace = true rust-version.workspace = true publish = false diff --git a/src/persist-client/src/cfg.rs b/src/persist-client/src/cfg.rs index 3a3d6fd5ee078..0b2d834e77065 100644 --- a/src/persist-client/src/cfg.rs +++ b/src/persist-client/src/cfg.rs @@ -38,6 +38,8 @@ use crate::read::READER_LEASE_DURATION; const SELF_MANAGED_VERSIONS: &[Version] = &[ // 25.1 Version::new(0, 130, 0), + // 25.2 + Version::new(0, 147, 0), ]; /// The tunable knobs for persist. diff --git a/src/postgres-util/src/tunnel.rs b/src/postgres-util/src/tunnel.rs index 31754e0fb81f5..27d6775319bd9 100644 --- a/src/postgres-util/src/tunnel.rs +++ b/src/postgres-util/src/tunnel.rs @@ -14,14 +14,13 @@ use std::time::Duration; use mz_ore::future::{InTask, OreFutureExt}; use mz_ore::option::OptionExt; -use mz_ore::task; +use mz_ore::task::{self, AbortOnDropHandle}; use mz_proto::{RustType, TryFromProtoError}; use mz_repr::CatalogItemId; use mz_ssh_util::tunnel::{SshTimeoutConfig, SshTunnelConfig}; use mz_ssh_util::tunnel_manager::SshTunnelManager; use proptest_derive::Arbitrary; use serde::{Deserialize, Serialize}; -use tokio::io::{AsyncRead, AsyncWrite}; use tokio::net::TcpStream as TokioTcpStream; use tokio_postgres::config::{Host, ReplicationMode}; use tokio_postgres::tls::MakeTlsConnect; @@ -69,40 +68,10 @@ pub const DEFAULT_SNAPSHOT_STATEMENT_TIMEOUT: Duration = Duration::ZERO; /// A wrapper for [`tokio_postgres::Client`] that can report the server version. pub struct Client { inner: tokio_postgres::Client, - server_version: Option, -} - -impl Client { - fn new( - client: tokio_postgres::Client, - connection: &tokio_postgres::Connection, - ) -> Client - where - S: AsyncRead + AsyncWrite + Unpin, - T: AsyncRead + AsyncWrite + Unpin, - { - let server_version = connection - .parameter("server_version") - .map(|v| v.to_string()); - Client { - inner: client, - server_version, - } - } - - /// Reports the value of the `server_version` parameter reported by the - /// server. - pub fn server_version(&self) -> Option<&str> { - self.server_version.as_deref() - } - - /// Reports the postgres flavor as indicated by the server version. - pub fn server_flavor(&self) -> PostgresFlavor { - match self.server_version.as_ref() { - Some(v) if v.contains("-YB-") => PostgresFlavor::Yugabyte, - _ => PostgresFlavor::Vanilla, - } - } + // Holds a handle to the task with the connection to ensure that when + // the client is dropped, the task can be aborted to close the connection. + // This is also useful for maintaining the lifetimes of dependent object (e.g. ssh tunnel). + _connection_handle: AbortOnDropHandle<()>, } impl Deref for Client { @@ -287,8 +256,16 @@ impl Config { let (client, connection) = async move { postgres_config.connect(tls).await } .run_in_task_if(self.in_task, || "pg_connect".to_string()) .await?; - let client = Client::new(client, &connection); - task::spawn(|| task_name, connection); + + let client = Client { + inner: client, + _connection_handle: task::spawn(|| task_name, async { + if let Err(e) = connection.await { + warn!("postgres direct connection failed: {e}"); + } + }) + .abort_on_drop(), + }; Ok(client) } TunnelConfig::Ssh { config } => { @@ -319,14 +296,17 @@ impl Config { async move { postgres_config.connect_raw(tcp_stream, tls).await } .run_in_task_if(self.in_task, || "pg_connect".to_string()) .await?; - let client = Client::new(client, &connection); - task::spawn(|| task_name, async { - let _tunnel = tunnel; // Keep SSH tunnel alive for duration of connection. - if let Err(e) = connection.await { - warn!("postgres connection failed: {e}"); - } - }); + let client = Client { + inner: client, + _connection_handle: task::spawn(|| task_name, async { + let _tunnel = tunnel; // Keep SSH tunnel alive for duration of connection. + if let Err(e) = connection.await { + warn!("postgres via SSH tunnel connection failed: {e}"); + } + }) + .abort_on_drop(), + }; Ok(client) } TunnelConfig::AwsPrivatelink { connection_id } => { @@ -363,8 +343,16 @@ impl Config { let (client, connection) = async move { postgres_config.connect(tls).await } .run_in_task_if(self.in_task, || "pg_connect".to_string()) .await?; - let client = Client::new(client, &connection); - task::spawn(|| task_name, connection); + + let client = Client { + inner: client, + _connection_handle: task::spawn(|| task_name, async { + if let Err(e) = connection.await { + warn!("postgres AWS link connection failed: {e}"); + } + }) + .abort_on_drop(), + }; Ok(client) } } diff --git a/src/segment/src/lib.rs b/src/segment/src/lib.rs index 2d4095b46510a..f5abd93585a56 100644 --- a/src/segment/src/lib.rs +++ b/src/segment/src/lib.rs @@ -82,6 +82,23 @@ impl Client { Client { client_side, tx } } + /// Creates a new dummy client that doesn't report anything but still + /// accepts updates. + pub fn new_dummy_client() -> Client { + let (tx, mut rx) = mpsc::channel(MAX_PENDING_EVENTS); + + mz_ore::task::spawn(|| "segment_send_task", async move { + while let Some(msg) = rx.recv().await { + tracing::debug!(?msg, "segment update"); + } + }); + + Client { + client_side: true, + tx, + } + } + /// Sends a new [track event] to Segment. /// /// Delivery happens asynchronously on a background thread. It is best diff --git a/src/sql-parser/src/ast/defs/query.rs b/src/sql-parser/src/ast/defs/query.rs index 969454a77db1a..fd7afe1c4aef5 100644 --- a/src/sql-parser/src/ast/defs/query.rs +++ b/src/sql-parser/src/ast/defs/query.rs @@ -600,13 +600,13 @@ impl AstDisplay for TableFactor { with_ordinality, } => { f.write_node(function); + if *with_ordinality { + f.write_str(" WITH ORDINALITY"); + } if let Some(alias) = &alias { f.write_str(" AS "); f.write_node(alias); } - if *with_ordinality { - f.write_str(" WITH ORDINALITY"); - } } TableFactor::RowsFrom { functions, @@ -616,13 +616,13 @@ impl AstDisplay for TableFactor { f.write_str("ROWS FROM ("); f.write_node(&display::comma_separated(functions)); f.write_str(")"); + if *with_ordinality { + f.write_str(" WITH ORDINALITY"); + } if let Some(alias) = alias { f.write_str(" AS "); f.write_node(alias); } - if *with_ordinality { - f.write_str(" WITH ORDINALITY"); - } } TableFactor::Derived { lateral, diff --git a/src/sql-parser/src/parser.rs b/src/sql-parser/src/parser.rs index ee538e78e1194..b6101be0fb6f4 100644 --- a/src/sql-parser/src/parser.rs +++ b/src/sql-parser/src/parser.rs @@ -8125,8 +8125,7 @@ impl<'a> Parser<'a> { let name = self.parse_raw_name()?; self.expect_token(&Token::LParen)?; let args = self.parse_optional_args(false)?; - let alias = self.parse_optional_table_alias()?; - let with_ordinality = self.parse_keywords(&[WITH, ORDINALITY]); + let (with_ordinality, alias) = self.parse_table_function_suffix()?; return Ok(TableFactor::Function { function: Function { name, @@ -8197,8 +8196,7 @@ impl<'a> Parser<'a> { let name = self.parse_raw_name()?; if self.consume_token(&Token::LParen) { let args = self.parse_optional_args(false)?; - let alias = self.parse_optional_table_alias()?; - let with_ordinality = self.parse_keywords(&[WITH, ORDINALITY]); + let (with_ordinality, alias) = self.parse_table_function_suffix()?; Ok(TableFactor::Function { function: Function { name, @@ -8223,8 +8221,7 @@ impl<'a> Parser<'a> { self.expect_token(&Token::LParen)?; let functions = self.parse_comma_separated(Parser::parse_named_function)?; self.expect_token(&Token::RParen)?; - let alias = self.parse_optional_table_alias()?; - let with_ordinality = self.parse_keywords(&[WITH, ORDINALITY]); + let (with_ordinality, alias) = self.parse_table_function_suffix()?; Ok(TableFactor::RowsFrom { functions, alias, @@ -8232,6 +8229,26 @@ impl<'a> Parser<'a> { }) } + /// Parses the things that can come after the argument list of a table function call. These are + /// - optional WITH ORDINALITY + /// - optional table alias + /// - optional WITH ORDINALITY again! This is allowed just to keep supporting our earlier buggy + /// order where we allowed WITH ORDINALITY only after the table alias. (Postgres and other + /// systems support it only before the table alias.) + fn parse_table_function_suffix(&mut self) -> Result<(bool, Option), ParserError> { + let with_ordinality_1 = self.parse_keywords(&[WITH, ORDINALITY]); + let alias = self.parse_optional_table_alias()?; + let with_ordinality_2 = self.parse_keywords(&[WITH, ORDINALITY]); + if with_ordinality_1 && with_ordinality_2 { + return parser_err!( + self, + self.peek_prev_pos(), + "WITH ORDINALITY specified twice" + ); + } + Ok((with_ordinality_1 || with_ordinality_2, alias)) + } + fn parse_named_function(&mut self) -> Result, ParserError> { let name = self.parse_raw_name()?; self.parse_function(name) diff --git a/src/sql-parser/tests/testdata/select b/src/sql-parser/tests/testdata/select index cf7398b0505da..2fa32f7c8e05f 100644 --- a/src/sql-parser/tests/testdata/select +++ b/src/sql-parser/tests/testdata/select @@ -1095,6 +1095,34 @@ error: Expected left parenthesis, found EOF SELECT foo FROM LATERAL bar ^ +parse-statement +SELECT foo FROM LATERAL bar(1) WITH ORDINALITY +---- +SELECT foo FROM bar(1) WITH ORDINALITY +=> +Select(SelectStatement { query: Query { ctes: Simple([]), body: Select(Select { distinct: None, projection: [Expr { expr: Identifier([Ident("foo")]), alias: None }], from: [TableWithJoins { relation: Function { function: Function { name: Name(UnresolvedItemName([Ident("bar")])), args: Args { args: [Value(Number("1"))], order_by: [] }, filter: None, over: None, distinct: false }, alias: None, with_ordinality: true }, joins: [] }], selection: None, group_by: [], having: None, qualify: None, options: [] }), order_by: [], limit: None, offset: None }, as_of: None }) + +parse-statement +SELECT foo FROM LATERAL bar(1) AS alias +---- +SELECT foo FROM bar(1) AS alias +=> +Select(SelectStatement { query: Query { ctes: Simple([]), body: Select(Select { distinct: None, projection: [Expr { expr: Identifier([Ident("foo")]), alias: None }], from: [TableWithJoins { relation: Function { function: Function { name: Name(UnresolvedItemName([Ident("bar")])), args: Args { args: [Value(Number("1"))], order_by: [] }, filter: None, over: None, distinct: false }, alias: Some(TableAlias { name: Ident("alias"), columns: [], strict: false }), with_ordinality: false }, joins: [] }], selection: None, group_by: [], having: None, qualify: None, options: [] }), order_by: [], limit: None, offset: None }, as_of: None }) + +parse-statement +SELECT foo FROM LATERAL bar(1) WITH ORDINALITY AS alias +---- +SELECT foo FROM bar(1) WITH ORDINALITY AS alias +=> +Select(SelectStatement { query: Query { ctes: Simple([]), body: Select(Select { distinct: None, projection: [Expr { expr: Identifier([Ident("foo")]), alias: None }], from: [TableWithJoins { relation: Function { function: Function { name: Name(UnresolvedItemName([Ident("bar")])), args: Args { args: [Value(Number("1"))], order_by: [] }, filter: None, over: None, distinct: false }, alias: Some(TableAlias { name: Ident("alias"), columns: [], strict: false }), with_ordinality: true }, joins: [] }], selection: None, group_by: [], having: None, qualify: None, options: [] }), order_by: [], limit: None, offset: None }, as_of: None }) + +parse-statement +SELECT foo FROM LATERAL bar(1) AS alias WITH ORDINALITY +---- +SELECT foo FROM bar(1) WITH ORDINALITY AS alias +=> +Select(SelectStatement { query: Query { ctes: Simple([]), body: Select(Select { distinct: None, projection: [Expr { expr: Identifier([Ident("foo")]), alias: None }], from: [TableWithJoins { relation: Function { function: Function { name: Name(UnresolvedItemName([Ident("bar")])), args: Args { args: [Value(Number("1"))], order_by: [] }, filter: None, over: None, distinct: false }, alias: Some(TableAlias { name: Ident("alias"), columns: [], strict: false }), with_ordinality: true }, joins: [] }], selection: None, group_by: [], having: None, qualify: None, options: [] }), order_by: [], limit: None, offset: None }, as_of: None }) + parse-statement SELECT 'foo' OFFSET 0 ROWS ---- @@ -1289,6 +1317,34 @@ SELECT * FROM customer LEFT JOIN generate_series(1, customer.id) ON true => Select(SelectStatement { query: Query { ctes: Simple([]), body: Select(Select { distinct: None, projection: [Wildcard], from: [TableWithJoins { relation: Table { name: Name(UnresolvedItemName([Ident("customer")])), alias: None }, joins: [Join { relation: Function { function: Function { name: Name(UnresolvedItemName([Ident("generate_series")])), args: Args { args: [Value(Number("1")), Identifier([Ident("customer"), Ident("id")])], order_by: [] }, filter: None, over: None, distinct: false }, alias: None, with_ordinality: false }, join_operator: LeftOuter(On(Value(Boolean(true)))) }] }], selection: None, group_by: [], having: None, qualify: None, options: [] }), order_by: [], limit: None, offset: None }, as_of: None }) +parse-statement +SELECT * FROM customer LEFT JOIN LATERAL generate_series(1, customer.id) WITH ORDINALITY ON true +---- +SELECT * FROM customer LEFT JOIN generate_series(1, customer.id) WITH ORDINALITY ON true +=> +Select(SelectStatement { query: Query { ctes: Simple([]), body: Select(Select { distinct: None, projection: [Wildcard], from: [TableWithJoins { relation: Table { name: Name(UnresolvedItemName([Ident("customer")])), alias: None }, joins: [Join { relation: Function { function: Function { name: Name(UnresolvedItemName([Ident("generate_series")])), args: Args { args: [Value(Number("1")), Identifier([Ident("customer"), Ident("id")])], order_by: [] }, filter: None, over: None, distinct: false }, alias: None, with_ordinality: true }, join_operator: LeftOuter(On(Value(Boolean(true)))) }] }], selection: None, group_by: [], having: None, qualify: None, options: [] }), order_by: [], limit: None, offset: None }, as_of: None }) + +parse-statement +SELECT * FROM customer LEFT JOIN LATERAL generate_series(1, customer.id) AS alias ON true +---- +SELECT * FROM customer LEFT JOIN generate_series(1, customer.id) AS alias ON true +=> +Select(SelectStatement { query: Query { ctes: Simple([]), body: Select(Select { distinct: None, projection: [Wildcard], from: [TableWithJoins { relation: Table { name: Name(UnresolvedItemName([Ident("customer")])), alias: None }, joins: [Join { relation: Function { function: Function { name: Name(UnresolvedItemName([Ident("generate_series")])), args: Args { args: [Value(Number("1")), Identifier([Ident("customer"), Ident("id")])], order_by: [] }, filter: None, over: None, distinct: false }, alias: Some(TableAlias { name: Ident("alias"), columns: [], strict: false }), with_ordinality: false }, join_operator: LeftOuter(On(Value(Boolean(true)))) }] }], selection: None, group_by: [], having: None, qualify: None, options: [] }), order_by: [], limit: None, offset: None }, as_of: None }) + +parse-statement +SELECT * FROM customer LEFT JOIN LATERAL generate_series(1, customer.id) WITH ORDINALITY AS alias ON true +---- +SELECT * FROM customer LEFT JOIN generate_series(1, customer.id) WITH ORDINALITY AS alias ON true +=> +Select(SelectStatement { query: Query { ctes: Simple([]), body: Select(Select { distinct: None, projection: [Wildcard], from: [TableWithJoins { relation: Table { name: Name(UnresolvedItemName([Ident("customer")])), alias: None }, joins: [Join { relation: Function { function: Function { name: Name(UnresolvedItemName([Ident("generate_series")])), args: Args { args: [Value(Number("1")), Identifier([Ident("customer"), Ident("id")])], order_by: [] }, filter: None, over: None, distinct: false }, alias: Some(TableAlias { name: Ident("alias"), columns: [], strict: false }), with_ordinality: true }, join_operator: LeftOuter(On(Value(Boolean(true)))) }] }], selection: None, group_by: [], having: None, qualify: None, options: [] }), order_by: [], limit: None, offset: None }, as_of: None }) + +parse-statement +SELECT * FROM customer LEFT JOIN LATERAL generate_series(1, customer.id) AS alias WITH ORDINALITY ON true +---- +SELECT * FROM customer LEFT JOIN generate_series(1, customer.id) WITH ORDINALITY AS alias ON true +=> +Select(SelectStatement { query: Query { ctes: Simple([]), body: Select(Select { distinct: None, projection: [Wildcard], from: [TableWithJoins { relation: Table { name: Name(UnresolvedItemName([Ident("customer")])), alias: None }, joins: [Join { relation: Function { function: Function { name: Name(UnresolvedItemName([Ident("generate_series")])), args: Args { args: [Value(Number("1")), Identifier([Ident("customer"), Ident("id")])], order_by: [] }, filter: None, over: None, distinct: false }, alias: Some(TableAlias { name: Ident("alias"), columns: [], strict: false }), with_ordinality: true }, join_operator: LeftOuter(On(Value(Boolean(true)))) }] }], selection: None, group_by: [], having: None, qualify: None, options: [] }), order_by: [], limit: None, offset: None }, as_of: None }) + parse-statement SELECT * FROM LATERAL ROWS FROM (generate_series(1, 2), generate_series(3, 5)) ---- @@ -1303,6 +1359,20 @@ SELECT * FROM ROWS FROM (generate_series(1, 2), generate_series(3, 5)) AS alias => Select(SelectStatement { query: Query { ctes: Simple([]), body: Select(Select { distinct: None, projection: [Wildcard], from: [TableWithJoins { relation: RowsFrom { functions: [Function { name: Name(UnresolvedItemName([Ident("generate_series")])), args: Args { args: [Value(Number("1")), Value(Number("2"))], order_by: [] }, filter: None, over: None, distinct: false }, Function { name: Name(UnresolvedItemName([Ident("generate_series")])), args: Args { args: [Value(Number("3")), Value(Number("5"))], order_by: [] }, filter: None, over: None, distinct: false }], alias: Some(TableAlias { name: Ident("alias"), columns: [], strict: false }), with_ordinality: false }, joins: [] }], selection: None, group_by: [], having: None, qualify: None, options: [] }), order_by: [], limit: None, offset: None }, as_of: None }) +parse-statement +SELECT * FROM LATERAL ROWS FROM (generate_series(1, 2), generate_series(3, 5)) WITH ORDINALITY AS alias +---- +SELECT * FROM ROWS FROM (generate_series(1, 2), generate_series(3, 5)) WITH ORDINALITY AS alias +=> +Select(SelectStatement { query: Query { ctes: Simple([]), body: Select(Select { distinct: None, projection: [Wildcard], from: [TableWithJoins { relation: RowsFrom { functions: [Function { name: Name(UnresolvedItemName([Ident("generate_series")])), args: Args { args: [Value(Number("1")), Value(Number("2"))], order_by: [] }, filter: None, over: None, distinct: false }, Function { name: Name(UnresolvedItemName([Ident("generate_series")])), args: Args { args: [Value(Number("3")), Value(Number("5"))], order_by: [] }, filter: None, over: None, distinct: false }], alias: Some(TableAlias { name: Ident("alias"), columns: [], strict: false }), with_ordinality: true }, joins: [] }], selection: None, group_by: [], having: None, qualify: None, options: [] }), order_by: [], limit: None, offset: None }, as_of: None }) + +parse-statement +SELECT * FROM LATERAL ROWS FROM (generate_series(1, 2), generate_series(3, 5)) AS alias WITH ORDINALITY +---- +SELECT * FROM ROWS FROM (generate_series(1, 2), generate_series(3, 5)) WITH ORDINALITY AS alias +=> +Select(SelectStatement { query: Query { ctes: Simple([]), body: Select(Select { distinct: None, projection: [Wildcard], from: [TableWithJoins { relation: RowsFrom { functions: [Function { name: Name(UnresolvedItemName([Ident("generate_series")])), args: Args { args: [Value(Number("1")), Value(Number("2"))], order_by: [] }, filter: None, over: None, distinct: false }, Function { name: Name(UnresolvedItemName([Ident("generate_series")])), args: Args { args: [Value(Number("3")), Value(Number("5"))], order_by: [] }, filter: None, over: None, distinct: false }], alias: Some(TableAlias { name: Ident("alias"), columns: [], strict: false }), with_ordinality: true }, joins: [] }], selection: None, group_by: [], having: None, qualify: None, options: [] }), order_by: [], limit: None, offset: None }, as_of: None }) + parse-statement SELECT * FROM generate_series(1, 2) WITH ORDINALITY ---- @@ -1324,6 +1394,35 @@ SELECT * FROM ROWS FROM (generate_series(1, 2), generate_series(3, 5)) WITH ORDI => Select(SelectStatement { query: Query { ctes: Simple([]), body: Select(Select { distinct: None, projection: [Wildcard], from: [TableWithJoins { relation: RowsFrom { functions: [Function { name: Name(UnresolvedItemName([Ident("generate_series")])), args: Args { args: [Value(Number("1")), Value(Number("2"))], order_by: [] }, filter: None, over: None, distinct: false }, Function { name: Name(UnresolvedItemName([Ident("generate_series")])), args: Args { args: [Value(Number("3")), Value(Number("5"))], order_by: [] }, filter: None, over: None, distinct: false }], alias: None, with_ordinality: true }, joins: [] }], selection: None, group_by: [], having: None, qualify: None, options: [] }), order_by: [], limit: None, offset: None }, as_of: None }) +parse-statement +SELECT * FROM unnest(ARRAY['a', 'b', 'c']) WITH ORDINALITY AS t (letter, position) +---- +SELECT * FROM unnest(ARRAY['a', 'b', 'c']) WITH ORDINALITY AS t (letter, position) +=> +Select(SelectStatement { query: Query { ctes: Simple([]), body: Select(Select { distinct: None, projection: [Wildcard], from: [TableWithJoins { relation: Function { function: Function { name: Name(UnresolvedItemName([Ident("unnest")])), args: Args { args: [Array([Value(String("a")), Value(String("b")), Value(String("c"))])], order_by: [] }, filter: None, over: None, distinct: false }, alias: Some(TableAlias { name: Ident("t"), columns: [Ident("letter"), Ident("position")], strict: false }), with_ordinality: true }, joins: [] }], selection: None, group_by: [], having: None, qualify: None, options: [] }), order_by: [], limit: None, offset: None }, as_of: None }) + +# Weird order; supported only for backcompat reasons +parse-statement +SELECT * FROM unnest(ARRAY['a', 'b', 'c']) AS t (letter, position) WITH ORDINALITY +---- +SELECT * FROM unnest(ARRAY['a', 'b', 'c']) WITH ORDINALITY AS t (letter, position) +=> +Select(SelectStatement { query: Query { ctes: Simple([]), body: Select(Select { distinct: None, projection: [Wildcard], from: [TableWithJoins { relation: Function { function: Function { name: Name(UnresolvedItemName([Ident("unnest")])), args: Args { args: [Array([Value(String("a")), Value(String("b")), Value(String("c"))])], order_by: [] }, filter: None, over: None, distinct: false }, alias: Some(TableAlias { name: Ident("t"), columns: [Ident("letter"), Ident("position")], strict: false }), with_ordinality: true }, joins: [] }], selection: None, group_by: [], having: None, qualify: None, options: [] }), order_by: [], limit: None, offset: None }, as_of: None }) + +parse-statement +SELECT * FROM unnest(ARRAY['a', 'b', 'c']) WITH ORDINALITY AS t (letter, position) WITH ORDINALITY +---- +error: WITH ORDINALITY specified twice +SELECT * FROM unnest(ARRAY['a', 'b', 'c']) WITH ORDINALITY AS t (letter, position) WITH ORDINALITY + ^ + +parse-statement +SELECT * FROM unnest(ARRAY['a', 'b', 'c']) AS WITH ORDINALITY t (letter, position) WITH ORDINALITY +---- +error: Expected end of statement, found ORDINALITY +SELECT * FROM unnest(ARRAY['a', 'b', 'c']) AS WITH ORDINALITY t (letter, position) WITH ORDINALITY + ^ + parse-statement SELECT * FROM ROWS FROM (generate_series(1, 2), generate_series(3, 5)) ---- @@ -1331,6 +1430,41 @@ SELECT * FROM ROWS FROM (generate_series(1, 2), generate_series(3, 5)) => Select(SelectStatement { query: Query { ctes: Simple([]), body: Select(Select { distinct: None, projection: [Wildcard], from: [TableWithJoins { relation: RowsFrom { functions: [Function { name: Name(UnresolvedItemName([Ident("generate_series")])), args: Args { args: [Value(Number("1")), Value(Number("2"))], order_by: [] }, filter: None, over: None, distinct: false }, Function { name: Name(UnresolvedItemName([Ident("generate_series")])), args: Args { args: [Value(Number("3")), Value(Number("5"))], order_by: [] }, filter: None, over: None, distinct: false }], alias: None, with_ordinality: false }, joins: [] }], selection: None, group_by: [], having: None, qualify: None, options: [] }), order_by: [], limit: None, offset: None }, as_of: None }) +parse-statement +SELECT * FROM ROWS FROM (generate_series(1, 2), generate_series(3, 5)) WITH ORDINALITY +---- +SELECT * FROM ROWS FROM (generate_series(1, 2), generate_series(3, 5)) WITH ORDINALITY +=> +Select(SelectStatement { query: Query { ctes: Simple([]), body: Select(Select { distinct: None, projection: [Wildcard], from: [TableWithJoins { relation: RowsFrom { functions: [Function { name: Name(UnresolvedItemName([Ident("generate_series")])), args: Args { args: [Value(Number("1")), Value(Number("2"))], order_by: [] }, filter: None, over: None, distinct: false }, Function { name: Name(UnresolvedItemName([Ident("generate_series")])), args: Args { args: [Value(Number("3")), Value(Number("5"))], order_by: [] }, filter: None, over: None, distinct: false }], alias: None, with_ordinality: true }, joins: [] }], selection: None, group_by: [], having: None, qualify: None, options: [] }), order_by: [], limit: None, offset: None }, as_of: None }) + +parse-statement +SELECT * FROM ROWS FROM (generate_series(1, 2), generate_series(3, 5)) AS t (a) +---- +SELECT * FROM ROWS FROM (generate_series(1, 2), generate_series(3, 5)) AS t (a) +=> +Select(SelectStatement { query: Query { ctes: Simple([]), body: Select(Select { distinct: None, projection: [Wildcard], from: [TableWithJoins { relation: RowsFrom { functions: [Function { name: Name(UnresolvedItemName([Ident("generate_series")])), args: Args { args: [Value(Number("1")), Value(Number("2"))], order_by: [] }, filter: None, over: None, distinct: false }, Function { name: Name(UnresolvedItemName([Ident("generate_series")])), args: Args { args: [Value(Number("3")), Value(Number("5"))], order_by: [] }, filter: None, over: None, distinct: false }], alias: Some(TableAlias { name: Ident("t"), columns: [Ident("a")], strict: false }), with_ordinality: false }, joins: [] }], selection: None, group_by: [], having: None, qualify: None, options: [] }), order_by: [], limit: None, offset: None }, as_of: None }) + +parse-statement +SELECT * FROM ROWS FROM (generate_series(1, 2), generate_series(3, 5)) WITH ORDINALITY AS t (a, b) +---- +SELECT * FROM ROWS FROM (generate_series(1, 2), generate_series(3, 5)) WITH ORDINALITY AS t (a, b) +=> +Select(SelectStatement { query: Query { ctes: Simple([]), body: Select(Select { distinct: None, projection: [Wildcard], from: [TableWithJoins { relation: RowsFrom { functions: [Function { name: Name(UnresolvedItemName([Ident("generate_series")])), args: Args { args: [Value(Number("1")), Value(Number("2"))], order_by: [] }, filter: None, over: None, distinct: false }, Function { name: Name(UnresolvedItemName([Ident("generate_series")])), args: Args { args: [Value(Number("3")), Value(Number("5"))], order_by: [] }, filter: None, over: None, distinct: false }], alias: Some(TableAlias { name: Ident("t"), columns: [Ident("a"), Ident("b")], strict: false }), with_ordinality: true }, joins: [] }], selection: None, group_by: [], having: None, qualify: None, options: [] }), order_by: [], limit: None, offset: None }, as_of: None }) + +parse-statement +SELECT * FROM ROWS FROM (generate_series(1, 2), generate_series(3, 5)) AS t (a, b) WITH ORDINALITY +---- +SELECT * FROM ROWS FROM (generate_series(1, 2), generate_series(3, 5)) WITH ORDINALITY AS t (a, b) +=> +Select(SelectStatement { query: Query { ctes: Simple([]), body: Select(Select { distinct: None, projection: [Wildcard], from: [TableWithJoins { relation: RowsFrom { functions: [Function { name: Name(UnresolvedItemName([Ident("generate_series")])), args: Args { args: [Value(Number("1")), Value(Number("2"))], order_by: [] }, filter: None, over: None, distinct: false }, Function { name: Name(UnresolvedItemName([Ident("generate_series")])), args: Args { args: [Value(Number("3")), Value(Number("5"))], order_by: [] }, filter: None, over: None, distinct: false }], alias: Some(TableAlias { name: Ident("t"), columns: [Ident("a"), Ident("b")], strict: false }), with_ordinality: true }, joins: [] }], selection: None, group_by: [], having: None, qualify: None, options: [] }), order_by: [], limit: None, offset: None }, as_of: None }) + +parse-statement +SELECT * FROM ROWS FROM () +---- +error: Expected identifier, found right parenthesis +SELECT * FROM ROWS FROM () + ^ + # Ensure parsing AS OF is case-insensitive parse-statement SELECT * FROM data as of now() diff --git a/src/sql-server-util/Cargo.toml b/src/sql-server-util/Cargo.toml index c5253897cb3b4..923416903a4cf 100644 --- a/src/sql-server-util/Cargo.toml +++ b/src/sql-server-util/Cargo.toml @@ -35,7 +35,7 @@ serde = { version = "1.0.218", features = ["derive"] } smallvec = { version = "1.15.1", features = ["union"] } static_assertions = "1.1" thiserror = "2.0.11" -tiberius = { version = "0.12", features = ["chrono", "sql-browser-tokio", "tds73"], default-features = false } +tiberius = { version = "0.12", features = ["chrono", "sql-browser-tokio", "tds73", "native-tls"], default-features = false } timely = "0.21.0" tokio = { version = "1.44.1", features = ["net"] } tokio-stream = "0.1.17" diff --git a/src/sql-server-util/src/config.rs b/src/sql-server-util/src/config.rs index 2a858a437bd3f..fa7fcaa93e080 100644 --- a/src/sql-server-util/src/config.rs +++ b/src/sql-server-util/src/config.rs @@ -120,3 +120,14 @@ impl From for tiberius::EncryptionLevel { } } } + +/// Policy that dictates validation of the SQL-SERVER certificate. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Arbitrary, Serialize, Deserialize)] +pub enum CertificateValidationPolicy { + /// Don't validate the server's certificate; trust all certificates. + TrustAll, + /// Validate server's certificate using system certificates. + VerifySystem, + /// Validate server's certifiacte using provided CA certificate. + VerifyCA, +} diff --git a/src/sql/src/catalog.rs b/src/sql/src/catalog.rs index 162c71f00336f..0ce8874518a9d 100644 --- a/src/sql/src/catalog.rs +++ b/src/sql/src/catalog.rs @@ -481,9 +481,23 @@ pub trait CatalogSchema { fn privileges(&self) -> &PrivilegeMap; } -/// Attributes belonging to a [`CatalogRole`]. -#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, Ord, PartialOrd, Arbitrary)] -pub struct RoleAttributes { +/// A modification of a role password in the catalog +#[derive(Debug, Clone, Eq, PartialEq, Arbitrary)] +pub enum PasswordAction { + /// Set a new password. + Set(Password), + /// Remove the existing password. + Clear, + /// Leave the existing password unchanged. + NoChange, +} + +/// A raw representation of attributes belonging to a [`CatalogRole`] that we might +/// get as input from the user. This includes the password. +/// This struct explicity does not implement `Serialize` or `Deserialize` to avoid +/// accidentally serializing passwords. +#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Arbitrary)] +pub struct RoleAttributesRaw { /// Indicates whether the role has inheritance of privileges. pub inherit: bool, /// The raw password of the role. This is for self managed auth, not cloud. @@ -496,12 +510,45 @@ pub struct RoleAttributes { _private: (), } +/// Attributes belonging to a [`CatalogRole`]. +#[derive(Debug, Clone, Eq, Serialize, Deserialize, PartialEq, Ord, PartialOrd, Arbitrary)] +pub struct RoleAttributes { + /// Indicates whether the role has inheritance of privileges. + pub inherit: bool, + /// Whether or not this user is a superuser. + pub superuser: Option, + /// Whether this role is login + pub login: Option, + // Force use of constructor. + _private: (), +} + +impl RoleAttributesRaw { + /// Creates a new [`RoleAttributesRaw`] with default attributes. + pub const fn new() -> RoleAttributesRaw { + RoleAttributesRaw { + inherit: true, + password: None, + superuser: None, + login: None, + _private: (), + } + } + + /// Adds all attributes excluding password. + pub const fn with_all(mut self) -> RoleAttributesRaw { + self.inherit = true; + self.superuser = Some(true); + self.login = Some(true); + self + } +} + impl RoleAttributes { /// Creates a new [`RoleAttributes`] with default attributes. pub const fn new() -> RoleAttributes { RoleAttributes { inherit: true, - password: None, superuser: None, login: None, _private: (), @@ -520,25 +567,46 @@ impl RoleAttributes { pub const fn is_inherit(&self) -> bool { self.inherit } +} - /// Returns whether or not the role has a password. - pub const fn has_password(&self) -> bool { - self.password.is_some() +impl From for RoleAttributes { + fn from( + RoleAttributesRaw { + inherit, + superuser, + login, + .. + }: RoleAttributesRaw, + ) -> RoleAttributes { + RoleAttributes { + inherit, + superuser, + login, + _private: (), + } } +} - /// Returns self without the password. - pub fn without_password(self) -> RoleAttributes { +impl From for RoleAttributesRaw { + fn from( RoleAttributes { - inherit: self.inherit, + inherit, + superuser, + login, + .. + }: RoleAttributes, + ) -> RoleAttributesRaw { + RoleAttributesRaw { + inherit, password: None, - superuser: self.superuser, - login: self.login, + superuser, + login, _private: (), } } } -impl From for RoleAttributes { +impl From for RoleAttributesRaw { fn from( PlannedRoleAttributes { inherit, @@ -547,9 +615,9 @@ impl From for RoleAttributes { login, .. }: PlannedRoleAttributes, - ) -> RoleAttributes { - let default_attributes = RoleAttributes::new(); - RoleAttributes { + ) -> RoleAttributesRaw { + let default_attributes = RoleAttributesRaw::new(); + RoleAttributesRaw { inherit: inherit.unwrap_or(default_attributes.inherit), password, superuser, diff --git a/src/sql/src/func.rs b/src/sql/src/func.rs index ebac1098ff6d1..e5d54e1d1e61c 100644 --- a/src/sql/src/func.rs +++ b/src/sql/src/func.rs @@ -460,12 +460,16 @@ fn sql_impl_table_func_inner( let (mut expr, scope) = invoke(ecx.qcx, types)?; expr.splice_parameters(&args, 0); Ok(TableFuncPlan { - expr, + imp: TableFuncImpl::Expr(expr), column_names: scope.column_names().cloned().collect(), }) }) } +/// Implements a table function using SQL. +/// +/// Warning: These implementations are currently defective for WITH ORDINALITY / FROM ROWS, see +/// comment in `plan_table_function_internal`. fn sql_impl_table_func(sql: &'static str) -> Operation { sql_impl_table_func_inner(sql, None) } @@ -1657,10 +1661,49 @@ macro_rules! builtins { #[derive(Debug)] pub struct TableFuncPlan { - pub expr: HirRelationExpr, + pub imp: TableFuncImpl, pub column_names: Vec, } +/// The implementation of a table function is either +/// 1. just a `CallTable` HIR node (in which case we can put `WITH ORDINALITY` into it when we +/// create the actual HIR node in `plan_table_function_internal`), +/// 2. or a general HIR expression. This happens when it's implemented as SQL, i.e., by a call to +/// `sql_impl_table_func_inner`. +/// +/// TODO(ggevay): when a table function in 2. is used with WITH ORDINALITY or ROWS FROM, we fall +/// back to the legacy WITH ORDINALITY implementation, which relies on the row_number window +/// function, and is mostly broken. It can give an incorrect ordering, and also has an extreme +/// performance problem in some cases, see +/// +/// +/// These table functions are somewhat exotic, and WITH ORDINALITY / ROWS FROM are also somewhat +/// exotic, so let's hope that the combination of these is so exotic that nobody will need it for +/// quite a while. Note that the SQL standard only allows WITH ORDINALITY on `unnest_...` functions, +/// of which none fall into the 2. category, so we are fine with these; it's only a Postgres +/// extension to support WITH ORDINALITY on arbitrary table functions. When this combination arises, +/// we emit a Sentry error, so that we'll know about it. +/// +/// When we eventually need to fix this, a possible approach would be to write two SQL +/// implementations: one would be their current implementation, and the other would be +/// WITH ORDINALITY. +/// - This will be trivial for some table functions, e.g., those that end with an UNNEST just need a +/// WITH ORDINALITY on this UNNEST (e.g., `regexp_split_to_table`). +/// - `_pg_expandarray` and `date_bin_hopping` also look easy. +/// - `mz_name_rank` and `mz_resolve_object_name` look more complicated but hopefully solvable. +/// +/// Another approach to fixing this would be to add an ORDER BY to the SQL definitions and then +/// move this ORDER BY into a row_number window function call. This would at least solve the +/// correctness problem, but not the performance problem. +#[derive(Debug)] +pub enum TableFuncImpl { + CallTable { + func: TableFunc, + exprs: Vec, + }, + Expr(HirRelationExpr), +} + #[derive(Debug)] pub enum Func { Scalar(Vec>), @@ -1782,7 +1825,7 @@ pub static PG_CATALOG_BUILTINS: LazyLock> = LazyLoc "aclexplode" => Table { params!(ScalarType::Array(Box::new(ScalarType::AclItem))) => Operation::unary(move |_ecx, aclitems| { Ok(TableFuncPlan { - expr: HirRelationExpr::CallTable { + imp: TableFuncImpl::CallTable { func: TableFunc::AclExplode, exprs: vec![aclitems], }, @@ -3234,7 +3277,7 @@ pub static PG_CATALOG_BUILTINS: LazyLock> = LazyLoc "generate_series" => Table { params!(Int32, Int32, Int32) => Operation::variadic(move |_ecx, exprs| { Ok(TableFuncPlan { - expr: HirRelationExpr::CallTable { + imp: TableFuncImpl::CallTable { func: TableFunc::GenerateSeriesInt32, exprs, }, @@ -3243,7 +3286,7 @@ pub static PG_CATALOG_BUILTINS: LazyLock> = LazyLoc }) => ReturnType::set_of(Int32.into()), 1066; params!(Int32, Int32) => Operation::binary(move |_ecx, start, stop| { Ok(TableFuncPlan { - expr: HirRelationExpr::CallTable { + imp: TableFuncImpl::CallTable { func: TableFunc::GenerateSeriesInt32, exprs: vec![start, stop, HirScalarExpr::literal(Datum::Int32(1), ScalarType::Int32)], }, @@ -3252,7 +3295,7 @@ pub static PG_CATALOG_BUILTINS: LazyLock> = LazyLoc }) => ReturnType::set_of(Int32.into()), 1067; params!(Int64, Int64, Int64) => Operation::variadic(move |_ecx, exprs| { Ok(TableFuncPlan { - expr: HirRelationExpr::CallTable { + imp: TableFuncImpl::CallTable { func: TableFunc::GenerateSeriesInt64, exprs, }, @@ -3261,7 +3304,7 @@ pub static PG_CATALOG_BUILTINS: LazyLock> = LazyLoc }) => ReturnType::set_of(Int64.into()), 1068; params!(Int64, Int64) => Operation::binary(move |_ecx, start, stop| { Ok(TableFuncPlan { - expr: HirRelationExpr::CallTable { + imp: TableFuncImpl::CallTable { func: TableFunc::GenerateSeriesInt64, exprs: vec![start, stop, HirScalarExpr::literal(Datum::Int64(1), ScalarType::Int64)], }, @@ -3270,7 +3313,7 @@ pub static PG_CATALOG_BUILTINS: LazyLock> = LazyLoc }) => ReturnType::set_of(Int64.into()), 1069; params!(Timestamp, Timestamp, Interval) => Operation::variadic(move |_ecx, exprs| { Ok(TableFuncPlan { - expr: HirRelationExpr::CallTable { + imp: TableFuncImpl::CallTable { func: TableFunc::GenerateSeriesTimestamp, exprs, }, @@ -3279,7 +3322,7 @@ pub static PG_CATALOG_BUILTINS: LazyLock> = LazyLoc }) => ReturnType::set_of(Timestamp.into()), 938; params!(TimestampTz, TimestampTz, Interval) => Operation::variadic(move |_ecx, exprs| { Ok(TableFuncPlan { - expr: HirRelationExpr::CallTable { + imp: TableFuncImpl::CallTable { func: TableFunc::GenerateSeriesTimestampTz, exprs, }, @@ -3291,7 +3334,7 @@ pub static PG_CATALOG_BUILTINS: LazyLock> = LazyLoc "generate_subscripts" => Table { params!(ArrayAny, Int32) => Operation::variadic(move |_ecx, exprs| { Ok(TableFuncPlan { - expr: HirRelationExpr::CallTable { + imp: TableFuncImpl::CallTable { func: TableFunc::GenerateSubscriptsArray, exprs, }, @@ -3303,7 +3346,7 @@ pub static PG_CATALOG_BUILTINS: LazyLock> = LazyLoc "jsonb_array_elements" => Table { params!(Jsonb) => Operation::unary(move |_ecx, jsonb| { Ok(TableFuncPlan { - expr: HirRelationExpr::CallTable { + imp: TableFuncImpl::CallTable { func: TableFunc::JsonbArrayElements { stringify: false }, exprs: vec![jsonb], }, @@ -3314,7 +3357,7 @@ pub static PG_CATALOG_BUILTINS: LazyLock> = LazyLoc "jsonb_array_elements_text" => Table { params!(Jsonb) => Operation::unary(move |_ecx, jsonb| { Ok(TableFuncPlan { - expr: HirRelationExpr::CallTable { + imp: TableFuncImpl::CallTable { func: TableFunc::JsonbArrayElements { stringify: true }, exprs: vec![jsonb], }, @@ -3325,7 +3368,7 @@ pub static PG_CATALOG_BUILTINS: LazyLock> = LazyLoc "jsonb_each" => Table { params!(Jsonb) => Operation::unary(move |_ecx, jsonb| { Ok(TableFuncPlan { - expr: HirRelationExpr::CallTable { + imp: TableFuncImpl::CallTable { func: TableFunc::JsonbEach { stringify: false }, exprs: vec![jsonb], }, @@ -3336,7 +3379,7 @@ pub static PG_CATALOG_BUILTINS: LazyLock> = LazyLoc "jsonb_each_text" => Table { params!(Jsonb) => Operation::unary(move |_ecx, jsonb| { Ok(TableFuncPlan { - expr: HirRelationExpr::CallTable { + imp: TableFuncImpl::CallTable { func: TableFunc::JsonbEach { stringify: true }, exprs: vec![jsonb], }, @@ -3347,7 +3390,7 @@ pub static PG_CATALOG_BUILTINS: LazyLock> = LazyLoc "jsonb_object_keys" => Table { params!(Jsonb) => Operation::unary(move |_ecx, jsonb| { Ok(TableFuncPlan { - expr: HirRelationExpr::CallTable { + imp: TableFuncImpl::CallTable { func: TableFunc::JsonbObjectKeys, exprs: vec![jsonb], }, @@ -3416,7 +3459,7 @@ pub static PG_CATALOG_BUILTINS: LazyLock> = LazyLoc params!(String, String) => Operation::variadic(move |_ecx, exprs| { let column_names = vec!["regexp_matches".into()]; Ok(TableFuncPlan { - expr: HirRelationExpr::CallTable { + imp: TableFuncImpl::CallTable { func: TableFunc::RegexpMatches, exprs: vec![exprs[0].clone(), exprs[1].clone()], }, @@ -3426,7 +3469,7 @@ pub static PG_CATALOG_BUILTINS: LazyLock> = LazyLoc params!(String, String, String) => Operation::variadic(move |_ecx, exprs| { let column_names = vec!["regexp_matches".into()]; Ok(TableFuncPlan { - expr: HirRelationExpr::CallTable { + imp: TableFuncImpl::CallTable { func: TableFunc::RegexpMatches, exprs: vec![exprs[0].clone(), exprs[1].clone(), exprs[2].clone()], }, @@ -3533,7 +3576,7 @@ pub static MZ_CATALOG_BUILTINS: LazyLock> = LazyLoc let column_names = (1..=ncols).map(|i| format!("column{}", i).into()).collect(); Ok(TableFuncPlan { - expr: HirRelationExpr::CallTable { + imp: TableFuncImpl::CallTable { func: TableFunc::CsvExtract(ncols), exprs: vec![input], }, @@ -3825,7 +3868,7 @@ pub static MZ_CATALOG_BUILTINS: LazyLock> = LazyLoc sql_bail!("regexp_extract must specify at least one capture group"); } Ok(TableFuncPlan { - expr: HirRelationExpr::CallTable { + imp: TableFuncImpl::CallTable { func: TableFunc::RegexpExtract(regex), exprs: vec![haystack], }, @@ -3837,7 +3880,7 @@ pub static MZ_CATALOG_BUILTINS: LazyLock> = LazyLoc params!(Int64) => Operation::unary(move |ecx, n| { ecx.require_feature_flag(&crate::session::vars::ENABLE_REPEAT_ROW)?; Ok(TableFuncPlan { - expr: HirRelationExpr::CallTable { + imp: TableFuncImpl::CallTable { func: TableFunc::Repeat, exprs: vec![n], }, @@ -3864,7 +3907,7 @@ pub static MZ_CATALOG_BUILTINS: LazyLock> = LazyLoc vec![ArrayAny] => Operation::unary(move |ecx, e| { let el_typ = ecx.scalar_type(&e).unwrap_array_element_type().clone(); Ok(TableFuncPlan { - expr: HirRelationExpr::CallTable { + imp: TableFuncImpl::CallTable { func: TableFunc::UnnestArray { el_typ }, exprs: vec![e], }, @@ -3876,7 +3919,7 @@ pub static MZ_CATALOG_BUILTINS: LazyLock> = LazyLoc vec![ListAny] => Operation::unary(move |ecx, e| { let el_typ = ecx.scalar_type(&e).unwrap_list_element_type().clone(); Ok(TableFuncPlan { - expr: HirRelationExpr::CallTable { + imp: TableFuncImpl::CallTable { func: TableFunc::UnnestList { el_typ }, exprs: vec![e], }, @@ -3888,7 +3931,7 @@ pub static MZ_CATALOG_BUILTINS: LazyLock> = LazyLoc vec![MapAny] => Operation::unary(move |ecx, e| { let value_type = ecx.scalar_type(&e).unwrap_map_value_type().clone(); Ok(TableFuncPlan { - expr: HirRelationExpr::CallTable { + imp: TableFuncImpl::CallTable { func: TableFunc::UnnestMap { value_type }, exprs: vec![e], }, @@ -3926,7 +3969,7 @@ pub static MZ_INTERNAL_BUILTINS: LazyLock> = LazyLo "mz_aclexplode" => Table { params!(ScalarType::Array(Box::new(ScalarType::MzAclItem))) => Operation::unary(move |_ecx, mz_aclitems| { Ok(TableFuncPlan { - expr: HirRelationExpr::CallTable { + imp: TableFuncImpl::CallTable { func: TableFunc::MzAclExplode, exprs: vec![mz_aclitems], }, diff --git a/src/sql/src/lib.rs b/src/sql/src/lib.rs index ab665440e74a9..062de6ed0e9c1 100644 --- a/src/sql/src/lib.rs +++ b/src/sql/src/lib.rs @@ -126,6 +126,8 @@ pub const DEFAULT_SCHEMA: &str = "public"; /// The number of concurrent requests we allow at once for webhook sources. pub const WEBHOOK_CONCURRENCY_LIMIT: usize = 500; +pub static ORDINALITY_COL_NAME: &str = "ordinality"; + pub mod ast; pub mod catalog; pub mod func; diff --git a/src/sql/src/plan.rs b/src/sql/src/plan.rs index 0c7511e23a2f8..72dd35a49f646 100644 --- a/src/sql/src/plan.rs +++ b/src/sql/src/plan.rs @@ -78,7 +78,7 @@ use crate::ast::{ }; use crate::catalog::{ CatalogType, DefaultPrivilegeAclItem, DefaultPrivilegeObject, IdReference, ObjectType, - RoleAttributes, + RoleAttributesRaw, }; use crate::names::{ Aug, CommentObjectId, DependencyIds, FullItemName, ObjectId, QualifiedItemName, @@ -550,7 +550,7 @@ pub struct CreateSchemaPlan { #[derive(Debug)] pub struct CreateRolePlan { pub name: String, - pub attributes: RoleAttributes, + pub attributes: RoleAttributesRaw, } #[derive(Debug, PartialEq, Eq, Clone)] diff --git a/src/sql/src/plan/hir.rs b/src/sql/src/plan/hir.rs index fbfdfd6cf49eb..86f4041db3b4f 100644 --- a/src/sql/src/plan/hir.rs +++ b/src/sql/src/plan/hir.rs @@ -1561,7 +1561,7 @@ impl HirRelationExpr { HirRelationExpr::LetRec { body, .. } => body.arity(), HirRelationExpr::Project { outputs, .. } => outputs.len(), HirRelationExpr::Map { input, scalars } => input.arity() + scalars.len(), - HirRelationExpr::CallTable { func, .. } => func.output_arity(), + HirRelationExpr::CallTable { func, exprs: _ } => func.output_arity(), HirRelationExpr::Filter { input, .. } | HirRelationExpr::TopK { input, .. } | HirRelationExpr::Distinct { input } diff --git a/src/sql/src/plan/query.rs b/src/sql/src/plan/query.rs index e9e01810eaa4e..de0c41a8c4bb2 100644 --- a/src/sql/src/plan/query.rs +++ b/src/sql/src/plan/query.rs @@ -48,7 +48,8 @@ use std::{iter, mem}; use itertools::Itertools; use mz_expr::virtual_syntax::AlgExcept; use mz_expr::{ - Id, LetRecLimit, LocalId, MapFilterProject, MirScalarExpr, RowSetFinishing, func as expr_func, + Id, LetRecLimit, LocalId, MapFilterProject, MirScalarExpr, RowSetFinishing, TableFunc, + func as expr_func, }; use mz_ore::assert_none; use mz_ore::collections::CollectionExt; @@ -81,11 +82,10 @@ use mz_sql_parser::ast::{ use mz_sql_parser::ident; use crate::catalog::{CatalogItemType, CatalogType, SessionCatalog}; -use crate::func::{self, Func, FuncSpec}; +use crate::func::{self, Func, FuncSpec, TableFuncImpl}; use crate::names::{ Aug, FullItemName, PartialItemName, ResolvedDataType, ResolvedItemName, SchemaSpecifier, }; -use crate::normalize; use crate::plan::PlanError::InvalidWmrRecursionLimit; use crate::plan::error::PlanError; use crate::plan::hir::{ @@ -103,6 +103,7 @@ use crate::plan::{ literal, transform_ast, }; use crate::session::vars::{self, FeatureFlag}; +use crate::{ORDINALITY_COL_NAME, normalize}; #[derive(Debug)] pub struct PlannedRootQuery { @@ -2128,7 +2129,7 @@ fn plan_values( } } let out = HirRelationExpr::CallTable { - func: mz_expr::TableFunc::Wrap { + func: TableFunc::Wrap { width: ncols, types: col_types, }, @@ -2207,7 +2208,7 @@ fn plan_values_insert( } Ok(HirRelationExpr::CallTable { - func: mz_expr::TableFunc::Wrap { + func: TableFunc::Wrap { width: values[0].len(), types, }, @@ -3095,7 +3096,7 @@ fn plan_rows_from( // If `WITH ORDINALITY` was specified, include the coalesced ordinality // column. Otherwise remove it from the scope. if with_ordinality { - columns.push(scope.items.len()); + columns.push(offset); } else { scope.items.pop(); } @@ -3146,7 +3147,7 @@ fn plan_rows_from_internal<'a>( left_expr = left_expr.map(vec![HirScalarExpr::column(left_scope.len() - 1)]); left_scope .items - .push(ScopeItem::from_column_name("ordinality")); + .push(ScopeItem::from_column_name(ORDINALITY_COL_NAME)); for function in functions { // The right hand side of a join must be planned in a new scope. @@ -3293,11 +3294,44 @@ fn plan_table_function_internal( item: table_name, }); - let (mut expr, mut scope) = match resolve_func(ecx, name, args)? { + let (expr, mut scope) = match resolve_func(ecx, name, args)? { Func::Table(impls) => { let tf = func::select_impl(ecx, FuncSpec::Func(name), impls, scalar_args, vec![])?; let scope = Scope::from_source(scope_name.clone(), tf.column_names); - (tf.expr, scope) + let expr = match tf.imp { + TableFuncImpl::CallTable { mut func, exprs } => { + if with_ordinality { + func = TableFunc::WithOrdinality { + inner: Box::new(func), + }; + } + HirRelationExpr::CallTable { func, exprs } + } + TableFuncImpl::Expr(expr) => { + if !with_ordinality { + expr + } else { + // The table function is defined in SQL (i.e., TableFuncImpl::Expr), so we + // fall back to the legacy WITH ORDINALITY / ROWS FROM implementation. + // Note that this can give an incorrect ordering, and also has an extreme + // performance problem in some cases. See the doc comment of + // `TableFuncImpl`. + tracing::error!( + %name, + "Using the legacy WITH ORDINALITY / ROWS FROM implementation for a table function", + ); + expr.map(vec![HirScalarExpr::windowing(WindowExpr { + func: WindowExprType::Scalar(ScalarWindowExpr { + func: ScalarWindowFunc::RowNumber, + order_by: vec![], + }), + partition_by: vec![], + order_by: vec![], + })]) + } + } + }; + (expr, scope) } Func::Scalar(impls) => { let expr = func::select_impl(ecx, FuncSpec::Func(name), impls, scalar_args, vec![])?; @@ -3315,9 +3349,15 @@ fn plan_table_function_internal( let scope = Scope::from_source(scope_name.clone(), vec![column_name]); + let mut func = TableFunc::TabletizedScalar { relation, name }; + if with_ordinality { + func = TableFunc::WithOrdinality { + inner: Box::new(func), + }; + } ( HirRelationExpr::CallTable { - func: mz_expr::TableFunc::TabletizedScalar { relation, name }, + func, exprs: vec![expr], }, scope, @@ -3330,14 +3370,6 @@ fn plan_table_function_internal( }; if with_ordinality { - expr = expr.map(vec![HirScalarExpr::windowing(WindowExpr { - func: WindowExprType::Scalar(ScalarWindowExpr { - func: ScalarWindowFunc::RowNumber, - order_by: vec![], - }), - partition_by: vec![], - order_by: vec![], - })]); scope .items .push(ScopeItem::from_name(scope_name, "ordinality")); diff --git a/src/sql/src/plan/statement/ddl/connection.rs b/src/sql/src/plan/statement/ddl/connection.rs index fb2ed51b6f6e3..96952986419c4 100644 --- a/src/sql/src/plan/statement/ddl/connection.rs +++ b/src/sql/src/plan/statement/ddl/connection.rs @@ -547,8 +547,39 @@ impl ConnectionOptionExtracted { }); } - // TODO(sql_server2): Parse the encryption level from the create SQL. - let encryption = mz_sql_server_util::config::EncryptionLevel::Preferred; + let (encryption, certificate_validation_policy) = match self + .ssl_mode + .map(|mode| mode.to_uppercase()) + .as_ref() + .map(|mode| mode.as_str()) + { + None | Some("DISABLED") => ( + mz_sql_server_util::config::EncryptionLevel::None, + mz_sql_server_util::config::CertificateValidationPolicy::TrustAll, + ), + Some("REQUIRED") => ( + mz_sql_server_util::config::EncryptionLevel::Required, + mz_sql_server_util::config::CertificateValidationPolicy::TrustAll, + ), + Some("VERIFY") => ( + mz_sql_server_util::config::EncryptionLevel::Required, + mz_sql_server_util::config::CertificateValidationPolicy::VerifySystem, + ), + Some("VERIFY_CA") => { + if self.ssl_certificate_authority.is_none() { + sql_bail!( + "invalid CONNECTION: SSL MODE 'verify_ca' requires SSL CERTIFICATE AUTHORITY" + ); + } + ( + mz_sql_server_util::config::EncryptionLevel::Required, + mz_sql_server_util::config::CertificateValidationPolicy::VerifyCA, + ) + } + Some(mode) => { + sql_bail!("invalid CONNECTION: unknown SSL MODE {}", mode.quoted()) + } + }; // 1433 is the default port for SQL Server instances running over TCP. // @@ -573,6 +604,8 @@ impl ConnectionOptionExtracted { .map(|pass| pass.into())?, tunnel, encryption, + certificate_validation_policy, + tls_root_cert: self.ssl_certificate_authority, }) } }; diff --git a/src/sql/src/plan/transform_ast.rs b/src/sql/src/plan/transform_ast.rs index 431f27ba05318..78de311ebff6c 100644 --- a/src/sql/src/plan/transform_ast.rs +++ b/src/sql/src/plan/transform_ast.rs @@ -24,8 +24,8 @@ use mz_sql_parser::ast::{ use mz_sql_parser::ident; use crate::names::{Aug, PartialItemName, ResolvedDataType, ResolvedItemName}; -use crate::normalize; use crate::plan::{PlanError, StatementContext}; +use crate::{ORDINALITY_COL_NAME, normalize}; pub(crate) fn transform(scx: &StatementContext, node: &mut N) -> Result<(), PlanError> where @@ -455,9 +455,8 @@ impl<'a> FuncRewriter<'a> { let ident = normalize::ident(ident[0].clone()); let fn_ident = match ident.as_str() { "current_role" => Some("current_user"), - "current_schema" | "current_timestamp" | "current_user" | "session_user" => { - Some(ident.as_str()) - } + "current_schema" | "current_timestamp" | "current_user" | "session_user" + | "current_catalog" => Some(ident.as_str()), _ => None, }; match fn_ident { @@ -516,7 +515,7 @@ impl<'ast> VisitMut<'ast, Aug> for FuncRewriter<'_> { if *with_ordinality { select = select.project(SelectItem::Expr { expr: Expr::Value(Value::Number("1".into())), - alias: Some(ident!("ordinality")), + alias: Some(ident!(ORDINALITY_COL_NAME)), }); } diff --git a/src/sql/src/session/vars/definitions.rs b/src/sql/src/session/vars/definitions.rs index 71c4e33d4e483..6cec1bafeac81 100644 --- a/src/sql/src/session/vars/definitions.rs +++ b/src/sql/src/session/vars/definitions.rs @@ -1459,7 +1459,7 @@ pub static ENABLE_CONSOLIDATE_AFTER_UNION_NEGATE: VarDefinition = VarDefinition: pub static ENABLE_REDUCE_REDUCTION: VarDefinition = VarDefinition::new( "enable_reduce_reduction", - value!(bool; true), + value!(bool; false), "split complex reductions in to simpler ones and a join (Materialize).", true, ); diff --git a/src/sqllogictest/src/ast.rs b/src/sqllogictest/src/ast.rs index 9754745104aa3..0abbc5c20cd78 100644 --- a/src/sqllogictest/src/ast.rs +++ b/src/sqllogictest/src/ast.rs @@ -130,6 +130,7 @@ pub enum Record<'a> { location: Location, conn: Option<&'a str>, user: Option<&'a str>, + password: Option<&'a str>, sql: &'a str, sort: Sort, output: Output, diff --git a/src/sqllogictest/src/parser.rs b/src/sqllogictest/src/parser.rs index 044c36a805756..8edb84471c749 100644 --- a/src/sqllogictest/src/parser.rs +++ b/src/sqllogictest/src/parser.rs @@ -374,6 +374,7 @@ impl<'a> Parser<'a> { let location = self.location(); let mut conn = None; let mut user = None; + let mut password = None; let mut multiline = false; let mut sort = Sort::No; if let Some(options) = words.next() { @@ -382,6 +383,8 @@ impl<'a> Parser<'a> { conn = Some(value); } else if let Some(value) = option.strip_prefix("user=") { user = Some(value); + } else if let Some(value) = option.strip_prefix("password=") { + password = Some(value); } else if option == "rowsort" { sort = Sort::Row; } else if option == "multiline" { @@ -394,6 +397,9 @@ impl<'a> Parser<'a> { if user.is_some() && conn.is_none() { bail!("cannot set user without also setting conn"); } + if password.is_some() && user.is_none() { + bail!("cannot set password without also setting user"); + } let sql = self.split_at(&QUERY_OUTPUT_REGEX)?; let output_str = self .split_at(if multiline { @@ -425,6 +431,7 @@ impl<'a> Parser<'a> { location, conn, user, + password, sql, sort, output, diff --git a/src/sqllogictest/src/runner.rs b/src/sqllogictest/src/runner.rs index 630b39ad55e7e..bb4215bc91483 100644 --- a/src/sqllogictest/src/runner.rs +++ b/src/sqllogictest/src/runner.rs @@ -433,6 +433,7 @@ pub struct Runner<'a> { pub struct RunnerInner<'a> { server_addr: SocketAddr, internal_server_addr: SocketAddr, + password_server_addr: SocketAddr, internal_http_server_addr: SocketAddr, // Drop order matters for these fields. client: tokio_postgres::Client, @@ -901,8 +902,10 @@ impl<'a> Runner<'a> { inner.ensure_fixed_features().await?; - inner.client = connect(inner.server_addr, None).await; - inner.system_client = connect(inner.internal_server_addr, Some("mz_system")).await; + inner.client = connect(inner.server_addr, None, None).await.unwrap(); + inner.system_client = connect(inner.internal_server_addr, Some("mz_system"), None) + .await + .unwrap(); inner.clients = BTreeMap::new(); Ok(()) @@ -1025,6 +1028,12 @@ impl<'a> RunnerInner<'a> { allowed_roles: AllowedRoles::Internal, enable_tls: false, }, + "password".to_owned() => SqlListenerConfig { + addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0), + authenticator_kind: AuthenticatorKind::Password, + allowed_roles: AllowedRoles::Normal, + enable_tls: false, + }, }, http: btreemap![ "external".to_owned() => HttpListenerConfig { @@ -1147,6 +1156,8 @@ impl<'a> RunnerInner<'a> { storage_usage_retention_period: None, segment_api_key: None, segment_client_side: false, + // SLT doesn't like eternally running tasks since it waits for them to finish inbetween SLT files + test_only_dummy_segment_client: false, egress_addresses: vec![], aws_account_id: None, aws_privatelink_availability_zones: None, @@ -1171,6 +1182,7 @@ impl<'a> RunnerInner<'a> { let (server_addr_tx, server_addr_rx): (oneshot::Sender>, _) = oneshot::channel(); let (internal_server_addr_tx, internal_server_addr_rx) = oneshot::channel(); + let (password_server_addr_tx, password_server_addr_rx) = oneshot::channel(); let (internal_http_server_addr_tx, internal_http_server_addr_rx) = oneshot::channel(); let (shutdown_trigger, shutdown_trigger_rx) = trigger::channel(); let server_thread = thread::spawn(|| { @@ -1198,6 +1210,9 @@ impl<'a> RunnerInner<'a> { internal_server_addr_tx .send(server.sql_listener_handles["internal"].local_addr) .expect("receiver should not drop first"); + password_server_addr_tx + .send(server.sql_listener_handles["password"].local_addr) + .expect("receiver should not drop first"); internal_http_server_addr_tx .send(server.http_listener_handles["internal"].local_addr) .expect("receiver should not drop first"); @@ -1205,14 +1220,18 @@ impl<'a> RunnerInner<'a> { }); let server_addr = server_addr_rx.await??; let internal_server_addr = internal_server_addr_rx.await?; + let password_server_addr = password_server_addr_rx.await?; let internal_http_server_addr = internal_http_server_addr_rx.await?; - let system_client = connect(internal_server_addr, Some("mz_system")).await; - let client = connect(server_addr, None).await; + let system_client = connect(internal_server_addr, Some("mz_system"), None) + .await + .unwrap(); + let client = connect(server_addr, None, None).await.unwrap(); let inner = RunnerInner { server_addr, internal_server_addr, + password_server_addr, internal_http_server_addr, _shutdown_trigger: shutdown_trigger, _server_thread: server_thread.join_on_drop(), @@ -1304,14 +1323,23 @@ impl<'a> RunnerInner<'a> { Record::Simple { conn, user, + password, sql, sort, output, location, .. } => { - self.run_simple(*conn, *user, sql, sort.clone(), output, location.clone()) - .await + self.run_simple( + *conn, + *user, + *password, + sql, + sort.clone(), + output, + location.clone(), + ) + .await } Record::Copy { table_name, @@ -1747,20 +1775,24 @@ impl<'a> RunnerInner<'a> { &mut self, name: Option<&str>, user: Option<&str>, - ) -> &tokio_postgres::Client { + password: Option<&str>, + ) -> Result<&tokio_postgres::Client, tokio_postgres::Error> { match name { - None => &self.client, + None => Ok(&self.client), Some(name) => { if !self.clients.contains_key(name) { let addr = if matches!(user, Some("mz_system") | Some("mz_support")) { self.internal_server_addr + } else if password.is_some() { + // Use password server for password authentication + self.password_server_addr } else { self.server_addr }; - let client = connect(addr, user).await; + let client = connect(addr, user, password).await?; self.clients.insert(name.into(), client); } - self.clients.get(name).unwrap() + Ok(self.clients.get(name).unwrap()) } } } @@ -1769,46 +1801,53 @@ impl<'a> RunnerInner<'a> { &mut self, conn: Option<&'r str>, user: Option<&'r str>, + password: Option<&'r str>, sql: &'r str, sort: Sort, output: &'r Output, location: Location, ) -> Result, anyhow::Error> { - let client = self.get_conn(conn, user).await; - let actual = Output::Values(match client.simple_query(sql).await { - Ok(result) => { - let mut rows = Vec::new(); - - for m in result.into_iter() { - match m { - SimpleQueryMessage::Row(row) => { - let mut s = vec![]; - for i in 0..row.len() { - s.push(row.get(i).unwrap_or("NULL")); + let actual = match self.get_conn(conn, user, password).await { + Ok(client) => match client.simple_query(sql).await { + Ok(result) => { + let mut rows = Vec::new(); + + for m in result.into_iter() { + match m { + SimpleQueryMessage::Row(row) => { + let mut s = vec![]; + for i in 0..row.len() { + s.push(row.get(i).unwrap_or("NULL")); + } + rows.push(s.join(",")); } - rows.push(s.join(",")); - } - SimpleQueryMessage::CommandComplete(count) => { - // This applies any sort on the COMPLETE line as - // well, but we do the same for the expected output. - rows.push(format!("COMPLETE {}", count)); + SimpleQueryMessage::CommandComplete(count) => { + // This applies any sort on the COMPLETE line as + // well, but we do the same for the expected output. + rows.push(format!("COMPLETE {}", count)); + } + SimpleQueryMessage::RowDescription(_) => {} + _ => panic!("unexpected"), } - SimpleQueryMessage::RowDescription(_) => {} - _ => panic!("unexpected"), } - } - if let Sort::Row = sort { - rows.sort(); - } + if let Sort::Row = sort { + rows.sort(); + } - rows + Output::Values(rows) + } + // Errors can contain multiple lines (say if there are details), and rewrite + // sticks them each on their own line, so we need to split up the lines here to + // each be its own String in the Vec. + Err(error) => { + Output::Values(error.to_string().lines().map(|s| s.to_string()).collect()) + } + }, + Err(error) => { + Output::Values(error.to_string().lines().map(|s| s.to_string()).collect()) } - // Errors can contain multiple lines (say if there are details), and rewrite - // sticks them each on their own line, so we need to split up the lines here to - // each be its own String in the Vec. - Err(error) => error.to_string().lines().map(|s| s.to_string()).collect(), - }); + }; if *output != actual { Ok(Outcome::OutputFailure { expected_output: output, @@ -1838,25 +1877,26 @@ impl<'a> RunnerInner<'a> { } } -async fn connect(addr: SocketAddr, user: Option<&str>) -> tokio_postgres::Client { - let (client, connection) = tokio_postgres::connect( - &format!( - "host={} port={} user={}", - addr.ip(), - addr.port(), - user.unwrap_or("materialize") - ), - NoTls, - ) - .await - .unwrap(); +async fn connect( + addr: SocketAddr, + user: Option<&str>, + password: Option<&str>, +) -> Result { + let mut config = tokio_postgres::Config::new(); + config.host(addr.ip().to_string()); + config.port(addr.port()); + config.user(user.unwrap_or("materialize")); + if let Some(password) = password { + config.password(password); + } + let (client, connection) = config.connect(NoTls).await?; task::spawn(|| "sqllogictest_connect", async move { if let Err(e) = connection.await { eprintln!("connection error: {}", e); } }); - client + Ok(client) } pub trait WriteFmt { diff --git a/src/storage-client/src/client.proto b/src/storage-client/src/client.proto index 6d4f73205165d..b8b0099949e19 100644 --- a/src/storage-client/src/client.proto +++ b/src/storage-client/src/client.proto @@ -62,7 +62,7 @@ message ProtoRunSinkCommand { message ProtoStorageCommand { message ProtoCreateTimely { mz_cluster_client.client.ProtoTimelyConfig config = 1; - mz_cluster_client.client.ProtoClusterStartupEpoch epoch = 2; + mz_proto.ProtoU128 nonce = 2; } oneof kind { diff --git a/src/storage-client/src/client.rs b/src/storage-client/src/client.rs index 01f6f1d87ea68..65c01f7cd9175 100644 --- a/src/storage-client/src/client.rs +++ b/src/storage-client/src/client.rs @@ -22,7 +22,7 @@ use async_trait::async_trait; use differential_dataflow::difference::Semigroup; use differential_dataflow::lattice::Lattice; use mz_cluster_client::ReplicaId; -use mz_cluster_client::client::{ClusterStartupEpoch, TimelyConfig, TryIntoTimelyConfig}; +use mz_cluster_client::client::{TimelyConfig, TryIntoTimelyConfig}; use mz_ore::assert_none; use mz_persist_client::batch::{BatchBuilder, ProtoBatch}; use mz_persist_client::write::WriteHandle; @@ -113,7 +113,7 @@ pub enum StorageCommand { /// we want created, before other commands are sent. CreateTimely { config: Box, - epoch: ClusterStartupEpoch, + nonce: Uuid, }, /// Indicates that the controller has sent all commands reflecting its /// initial state. @@ -298,9 +298,9 @@ impl RustType for StorageCommand { use proto_storage_command::*; ProtoStorageCommand { kind: Some(match self { - StorageCommand::CreateTimely { config, epoch } => CreateTimely(ProtoCreateTimely { + StorageCommand::CreateTimely { config, nonce } => CreateTimely(ProtoCreateTimely { config: Some(*config.into_proto()), - epoch: Some(epoch.into_proto()), + nonce: Some(nonce.into_proto()), }), StorageCommand::InitializationComplete => InitializationComplete(()), StorageCommand::AllowWrites => AllowWrites(()), @@ -327,10 +327,10 @@ impl RustType for StorageCommand { use proto_storage_command::Kind::*; use proto_storage_command::*; match proto.kind { - Some(CreateTimely(ProtoCreateTimely { config, epoch })) => { + Some(CreateTimely(ProtoCreateTimely { config, nonce })) => { let config = Box::new(config.into_rust_if_some("ProtoCreateTimely::config")?); - let epoch = epoch.into_rust_if_some("ProtoCreateTimely::epoch")?; - Ok(StorageCommand::CreateTimely { config, epoch }) + let nonce = nonce.into_rust_if_some("ProtoCreateTimely::nonce")?; + Ok(StorageCommand::CreateTimely { config, nonce }) } Some(InitializationComplete(())) => Ok(StorageCommand::InitializationComplete), Some(AllowWrites(())) => Ok(StorageCommand::AllowWrites), @@ -854,7 +854,7 @@ where self.observe_command(&command); match command { - StorageCommand::CreateTimely { config, epoch } => { + StorageCommand::CreateTimely { config, nonce } => { let timely_cmds = config.split_command(self.parts); let timely_cmds = timely_cmds @@ -862,7 +862,7 @@ where .map(|config| { Some(StorageCommand::CreateTimely { config: Box::new(config), - epoch, + nonce, }) }) .collect(); @@ -1080,9 +1080,9 @@ impl RustType for (GlobalId, Antichain) { } impl TryIntoTimelyConfig for StorageCommand { - fn try_into_timely_config(self) -> Result<(TimelyConfig, ClusterStartupEpoch), Self> { + fn try_into_timely_config(self) -> Result<(TimelyConfig, Uuid), Self> { match self { - StorageCommand::CreateTimely { config, epoch } => Ok((*config, epoch)), + StorageCommand::CreateTimely { config, nonce } => Ok((*config, nonce)), cmd => Err(cmd), } } diff --git a/src/storage-controller/src/instance.rs b/src/storage-controller/src/instance.rs index 6bb0786e973d6..30ad7ad4d4eda 100644 --- a/src/storage-controller/src/instance.rs +++ b/src/storage-controller/src/instance.rs @@ -11,7 +11,6 @@ use crate::CollectionMetadata; use std::collections::{BTreeMap, BTreeSet}; -use std::num::NonZeroI64; use std::sync::atomic::AtomicBool; use std::sync::{Arc, atomic}; use std::time::{Duration, Instant}; @@ -21,7 +20,7 @@ use differential_dataflow::lattice::Lattice; use itertools::Itertools; use mz_build_info::BuildInfo; use mz_cluster_client::ReplicaId; -use mz_cluster_client::client::{ClusterReplicaLocation, ClusterStartupEpoch, TimelyConfig}; +use mz_cluster_client::client::{ClusterReplicaLocation, TimelyConfig}; use mz_dyncfg::ConfigSet; use mz_ore::cast::CastFrom; use mz_ore::now::NowFn; @@ -43,6 +42,7 @@ use timely::progress::{Antichain, Timestamp}; use tokio::select; use tokio::sync::mpsc; use tracing::{debug, info, warn}; +use uuid::Uuid; use crate::history::CommandHistory; @@ -80,11 +80,6 @@ pub(crate) struct Instance { /// The command history, used to replay past commands when introducing new replicas or /// reconnecting to existing replicas. history: CommandHistory, - /// The current cluster startup epoch. - /// - /// The `replica` value of the epoch is increased every time a replica is (re)connected, - /// allowing the distinction of different replica incarnations. - epoch: ClusterStartupEpoch, /// Metrics tracked for this storage instance. metrics: InstanceMetrics, /// A function that returns the current time. @@ -117,7 +112,6 @@ where /// Creates a new [`Instance`]. pub fn new( workload_class: Option, - envd_epoch: NonZeroI64, metrics: InstanceMetrics, dyncfg: Arc, now: NowFn, @@ -125,7 +119,6 @@ where ) -> Self { let enable_snapshot_frontier = STORAGE_SINK_SNAPSHOT_FRONTIER.handle(&dyncfg); let history = CommandHistory::new(metrics.for_history(), enable_snapshot_frontier); - let epoch = ClusterStartupEpoch::new(envd_epoch, 0); let mut instance = Self { workload_class, @@ -134,7 +127,6 @@ where ingestion_exports: Default::default(), active_exports: BTreeMap::new(), history, - epoch, metrics, now, response_tx: instance_response_tx, @@ -142,7 +134,7 @@ where instance.send(StorageCommand::CreateTimely { config: Default::default(), - epoch, + nonce: Default::default(), }); instance @@ -159,9 +151,8 @@ where // enable the `objects_installed` assert below. self.history.reduce(); - self.epoch.bump_replica(); let metrics = self.metrics.for_replica(id); - let replica = Replica::new(id, config, self.epoch, metrics, self.response_tx.clone()); + let replica = Replica::new(id, config, metrics, self.response_tx.clone()); self.replicas.insert(id, replica); @@ -780,7 +771,6 @@ where fn new( id: ReplicaId, config: ReplicaConfig, - epoch: ClusterStartupEpoch, metrics: ReplicaMetrics, response_tx: mpsc::UnboundedSender<(Option, StorageResponse)>, ) -> Self { @@ -792,7 +782,6 @@ where ReplicaTask { replica_id: id, config: config.clone(), - epoch, metrics: metrics.clone(), connected: Arc::clone(&connected), command_rx, @@ -835,8 +824,6 @@ struct ReplicaTask { replica_id: ReplicaId, /// Replica configuration. config: ReplicaConfig, - /// The epoch identifying this incarnation of the replica. - epoch: ClusterStartupEpoch, /// Replica metrics. metrics: ReplicaMetrics, /// Flag to report successful replica connection. @@ -955,7 +942,7 @@ where /// Most [`StorageCommand`]s are independent of the target replica, but some contain /// replica-specific fields that must be adjusted before sending. fn specialize_command(&self, command: &mut StorageCommand) { - if let StorageCommand::CreateTimely { config, epoch } = command { + if let StorageCommand::CreateTimely { config, nonce } = command { **config = TimelyConfig { workers: self.config.location.workers, // Overridden by the storage `PartitionedState` implementation. @@ -972,7 +959,7 @@ where // No limit; zero-copy is disabled. zero_copy_limit: None, }; - *epoch = self.epoch; + *nonce = Uuid::new_v4(); } } } diff --git a/src/storage-controller/src/lib.rs b/src/storage-controller/src/lib.rs index 011d9bcc6951d..fb54497c8a47f 100644 --- a/src/storage-controller/src/lib.rs +++ b/src/storage-controller/src/lib.rs @@ -12,7 +12,6 @@ use std::any::Any; use std::collections::{BTreeMap, BTreeSet}; use std::fmt::{Debug, Display}; -use std::num::NonZeroI64; use std::str::FromStr; use std::sync::{Arc, Mutex}; use std::time::Duration; @@ -137,8 +136,6 @@ pub struct Controller + Tim build_info: &'static BuildInfo, /// A function that returns the current time. now: NowFn, - /// The fencing token for this instance of the controller. - envd_epoch: NonZeroI64, /// Whether or not this controller is in read-only mode. /// @@ -511,7 +508,6 @@ where let metrics = self.metrics.for_instance(id); let mut instance = Instance::new( workload_class, - self.envd_epoch, metrics, Arc::clone(self.config().config_set()), self.now.clone(), @@ -2637,7 +2633,6 @@ where now: NowFn, wallclock_lag: WallclockLagFn, txns_metrics: Arc, - envd_epoch: NonZeroI64, read_only: bool, metrics_registry: &MetricsRegistry, controller_metrics: ControllerMetrics, @@ -2725,7 +2720,6 @@ where introspection_ids, introspection_tokens, now, - envd_epoch, read_only, source_statistics: Arc::new(Mutex::new(statistics::SourceStatistics { source_statistics: BTreeMap::new(), diff --git a/src/storage-types/Cargo.toml b/src/storage-types/Cargo.toml index eb0dd3142edbc..26b46d21fbcb6 100644 --- a/src/storage-types/Cargo.toml +++ b/src/storage-types/Cargo.toml @@ -66,7 +66,7 @@ regex = "1.11.1" serde = { version = "1.0.219", features = ["derive"] } serde_json = { version = "1.0.127", features = ["preserve_order"] } thiserror = "2.0.12" -tiberius = { version = "0.12", features = ["sql-browser-tokio", "tds73"], default-features = false } +tiberius = { version = "0.12", features = ["sql-browser-tokio", "tds73", "native-tls"], default-features = false } timely = "0.21.0" tokio = { version = "1.44.1", features = ["fs", "rt", "sync", "test-util", "time"] } tokio-postgres = { version = "0.7.8", features = ["serde"] } diff --git a/src/storage-types/src/connections.proto b/src/storage-types/src/connections.proto index 4d138162ae1ce..6934b7f49898e 100644 --- a/src/storage-types/src/connections.proto +++ b/src/storage-types/src/connections.proto @@ -142,6 +142,8 @@ message ProtoSqlServerConnectionDetails { mz_repr.catalog_item_id.ProtoCatalogItemId password = 5; ProtoTunnel tunnel = 6; ProtoSqlServerEncryptionLevel encryption = 7; + ProtoSqlServerCertificateValidationPolicy certificate_validation_policy = 8; + string_or_secret.ProtoStringOrSecret tls_root_cert = 9; } enum ProtoSqlServerEncryptionLevel { @@ -150,3 +152,9 @@ enum ProtoSqlServerEncryptionLevel { SQL_SERVER_PREFERRED = 2; SQL_SERVER_REQUIRED = 3; } + +enum ProtoSqlServerCertificateValidationPolicy { + SQL_SERVER_TRUST_ALL = 0; + SQL_SERVER_VERIFY_SYSTEM = 1; + SQL_SERVER_VERIFY_CA = 2; +} diff --git a/src/storage-types/src/connections.rs b/src/storage-types/src/connections.rs index 8d572e8d4987f..912ecc125f9bf 100644 --- a/src/storage-types/src/connections.rs +++ b/src/storage-types/src/connections.rs @@ -14,7 +14,7 @@ use std::collections::{BTreeMap, BTreeSet}; use std::net::SocketAddr; use std::sync::Arc; -use anyhow::{Context, anyhow, bail}; +use anyhow::{Context, anyhow}; use itertools::Itertools; use mz_ccsr::tls::{Certificate, Identity}; use mz_cloud_resources::{AwsExternalIdPrefix, CloudResourceReader, vpc_endpoint_host}; @@ -1534,18 +1534,12 @@ impl PostgresConnection { InTask::No, ) .await?; - let client = config + config .connect( "connection validation", &storage_configuration.connection_context.ssh_tunnel_manager, ) .await?; - use PostgresFlavor::*; - match (client.server_flavor(), &self.flavor) { - (Vanilla, Yugabyte) => bail!("Expected to find PostgreSQL server, found Yugabyte."), - (Yugabyte, Vanilla) => bail!("Expected to find Yugabyte server, found PostgreSQL."), - (Vanilla, Vanilla) | (Yugabyte, Yugabyte) => {} - } Ok(()) } } @@ -2048,6 +2042,10 @@ pub struct SqlServerConnectionDetails { pub tunnel: Tunnel, /// Level of encryption to use for the connection. pub encryption: mz_sql_server_util::config::EncryptionLevel, + /// Certificate validation policy + pub certificate_validation_policy: mz_sql_server_util::config::CertificateValidationPolicy, + /// TLS CA Certifiecate in PEM format + pub tls_root_cert: Option, } impl SqlServerConnectionDetails { @@ -2100,8 +2098,24 @@ impl SqlServerConnectionDetails { inner_config.host(&self.host); inner_config.port(self.port); inner_config.database(self.database.clone()); - // TODO(sql_server1): Figure out the right settings for encryption. - // inner_config.encryption(self.encryption.into()); + inner_config.encryption(self.encryption.into()); + match self.certificate_validation_policy { + mz_sql_server_util::config::CertificateValidationPolicy::TrustAll => { + inner_config.trust_cert() + } + mz_sql_server_util::config::CertificateValidationPolicy::VerifyCA => { + inner_config.trust_cert_ca_pem( + self.tls_root_cert + .as_ref() + .unwrap() + .get_string(in_task, secrets_reader) + .await + .context("ca certificate")?, + ); + } + mz_sql_server_util::config::CertificateValidationPolicy::VerifySystem => (), // no-op + } + inner_config.application_name("materialize"); // Read our auth settings from @@ -2118,12 +2132,6 @@ impl SqlServerConnectionDetails { // username and password. inner_config.authentication(tiberius::AuthMethod::sql_server(user, password)); - // TODO(sql_server2): Fork the tiberius library and add support for - // specifying a cert bundle from a binary blob. - // - // See: - inner_config.trust_cert(); - // Prevent users from probing our internal network ports by trying to // connect to localhost, or another non-external IP. let enfoce_external_addresses = ENFORCE_EXTERNAL_ADDRESSES.get(dyncfg); @@ -2190,6 +2198,8 @@ impl IntoInlineConnection password, tunnel, encryption, + certificate_validation_policy, + tls_root_cert, } = self; SqlServerConnectionDetails { @@ -2200,6 +2210,8 @@ impl IntoInlineConnection password, tunnel: tunnel.into_inline_connection(&r), encryption, + certificate_validation_policy, + tls_root_cert, } } } @@ -2219,6 +2231,8 @@ impl AlterCompatible for SqlServerConnectionDetails { user: _, password: _, encryption: _, + certificate_validation_policy: _, + tls_root_cert: _, } = self; let compatibility_checks = [(tunnel.alter_compatible(id, &other.tunnel).is_ok(), "tunnel")]; @@ -2248,6 +2262,8 @@ impl RustType for SqlServerConnectionDetails { password: Some(self.password.into_proto()), tunnel: Some(self.tunnel.into_proto()), encryption: self.encryption.into_proto().into(), + certificate_validation_policy: self.certificate_validation_policy.into_proto().into(), + tls_root_cert: self.tls_root_cert.into_proto(), } } @@ -2266,6 +2282,11 @@ impl RustType for SqlServerConnectionDetails { .tunnel .into_rust_if_some("ProtoSqlServerConnectionDetails::tunnel")?, encryption: ProtoSqlServerEncryptionLevel::try_from(proto.encryption)?.into_rust()?, + certificate_validation_policy: ProtoSqlServerCertificateValidationPolicy::try_from( + proto.certificate_validation_policy, + )? + .into_rust()?, + tls_root_cert: proto.tls_root_cert.into_rust()?, }) } } @@ -2298,6 +2319,40 @@ impl RustType for mz_sql_server_util::config::Enc } } +impl RustType + for mz_sql_server_util::config::CertificateValidationPolicy +{ + fn into_proto(&self) -> ProtoSqlServerCertificateValidationPolicy { + match self { + mz_sql_server_util::config::CertificateValidationPolicy::TrustAll => { + ProtoSqlServerCertificateValidationPolicy::SqlServerTrustAll + } + mz_sql_server_util::config::CertificateValidationPolicy::VerifySystem => { + ProtoSqlServerCertificateValidationPolicy::SqlServerVerifySystem + } + mz_sql_server_util::config::CertificateValidationPolicy::VerifyCA => { + ProtoSqlServerCertificateValidationPolicy::SqlServerVerifyCa + } + } + } + + fn from_proto( + proto: ProtoSqlServerCertificateValidationPolicy, + ) -> Result { + Ok(match proto { + ProtoSqlServerCertificateValidationPolicy::SqlServerTrustAll => { + mz_sql_server_util::config::CertificateValidationPolicy::TrustAll + } + ProtoSqlServerCertificateValidationPolicy::SqlServerVerifySystem => { + mz_sql_server_util::config::CertificateValidationPolicy::VerifySystem + } + ProtoSqlServerCertificateValidationPolicy::SqlServerVerifyCa => { + mz_sql_server_util::config::CertificateValidationPolicy::VerifyCA + } + }) + } +} + /// A connection to an SSH tunnel. #[derive(Arbitrary, Clone, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)] pub struct SshConnection { diff --git a/src/storage/src/server.rs b/src/storage/src/server.rs index 04c62571e02ba..b244c1d102fbc 100644 --- a/src/storage/src/server.rs +++ b/src/storage/src/server.rs @@ -24,6 +24,7 @@ use mz_txn_wal::operator::TxnsContext; use timely::communication::Allocate; use timely::worker::Worker as TimelyWorker; use tokio::sync::mpsc; +use uuid::Uuid; use crate::metrics::StorageMetrics; use crate::storage_state::{StorageInstanceContext, Worker}; @@ -107,6 +108,7 @@ impl ClusterSpec for Config { &self, timely_worker: &mut TimelyWorker, client_rx: crossbeam_channel::Receiver<( + Uuid, crossbeam_channel::Receiver, mpsc::UnboundedSender, )>, diff --git a/src/storage/src/source/postgres/replication.rs b/src/storage/src/source/postgres/replication.rs index eeb8bf178bbdf..ec465b2beee15 100644 --- a/src/storage/src/source/postgres/replication.rs +++ b/src/storage/src/source/postgres/replication.rs @@ -246,8 +246,9 @@ pub(crate) fn render>( while let Some(_) = slot_ready_input.next().await { // Wait for the slot to be created } - tracing::info!(%id, "ensuring replication slot {slot} exists"); - super::ensure_replication_slot(&replication_client, slot).await?; + + // The slot is always created by the snapshot operator. If the slot doesn't exist, + // when this check runs, this operator will return an error. let slot_metadata = super::fetch_slot_metadata( &*metadata_client, slot, @@ -780,6 +781,9 @@ async fn raw_stream<'a>( // Ensure we don't pre-drop the task let _max_lsn_task_handle = max_lsn_task_handle; + // ensure we don't drop the replication client! + let _replication_client = replication_client; + let mut uppers = pin!(uppers); let mut last_committed_upper = resume_lsn; diff --git a/src/storage/src/source/postgres/snapshot.rs b/src/storage/src/source/postgres/snapshot.rs index 8f1c42d77d066..eab65d3b50379 100644 --- a/src/storage/src/source/postgres/snapshot.rs +++ b/src/storage/src/source/postgres/snapshot.rs @@ -144,7 +144,6 @@ use differential_dataflow::AsCollection; use futures::{StreamExt as _, TryStreamExt}; use mz_ore::cast::CastFrom; use mz_ore::future::InTask; -use mz_postgres_util::tunnel::PostgresFlavor; use mz_postgres_util::{Client, PostgresError, simple_query_opt}; use mz_repr::{Datum, DatumVec, Diff, Row}; use mz_sql_parser::ast::{Ident, display::AstDisplay}; @@ -261,14 +260,7 @@ pub(crate) fn render>( reader_table_info.len() ); - // Nothing needs to be snapshot. - if all_outputs.is_empty() { - trace!(%id, "no exports to snapshot"); - // Note we do not emit a `ProgressStatisticsUpdate::Snapshot` update here, - // as we do not want to attempt to override the current value with 0. We - // just leave it null. - return Ok(()); - } + let connection_config = connection .connection @@ -278,52 +270,58 @@ pub(crate) fn render>( InTask::Yes, ) .await?; - let task_name = format!("timely-{worker_id} PG snapshotter"); - let client = if is_snapshot_leader { + + // The snapshot operator is responsible for creating the replication slot(s). + // This first slot is the permanent slot that will be used for reading the replication + // stream. A temporary slot is created further on to capture table snapshots. + let replication_client = if is_snapshot_leader { let client = connection_config .connect_replication(&config.config.connection_context.ssh_tunnel_manager) .await?; - - // Attempt to export the snapshot by creating the main replication slot. If that - // succeeds then there is no need for creating additional temporary slots. let main_slot = &connection.publication_details.slot; - let snapshot_info = match export_snapshot(&client, main_slot, false).await { - Ok(info) => info, - Err(err @ TransientError::ReplicationSlotAlreadyExists) => { - match connection.connection.flavor { - // If we're connecting to a vanilla we have the option of exporting a - // snapshot via a temporary slot - PostgresFlavor::Vanilla => { - let tmp_slot = format!( - "mzsnapshot_{}", - uuid::Uuid::new_v4()).replace('-', "" - ); - export_snapshot(&client, &tmp_slot, true).await? - } - // No salvation for Yugabyte - PostgresFlavor::Yugabyte => return Err(err), - } - } - Err(err) => return Err(err), - }; - trace!( - %id, - "timely-{worker_id} exporting snapshot info {snapshot_info:?}"); - snapshot_handle.give(&snapshot_cap_set[0], snapshot_info); - client + tracing::info!(%id, "ensuring replication slot {main_slot} exists"); + super::ensure_replication_slot(&client, main_slot).await?; + Some(client) } else { - // Only the snapshot leader needs a replication connection. - connection_config - .connect( - &task_name, - &config.config.connection_context.ssh_tunnel_manager, - ) - .await? + None }; *slot_ready_cap_set = CapabilitySet::new(); + // Nothing needs to be snapshot. + if all_outputs.is_empty() { + trace!(%id, "no exports to snapshot"); + // Note we do not emit a `ProgressStatisticsUpdate::Snapshot` update here, + // as we do not want to attempt to override the current value with 0. We + // just leave it null. + return Ok(()); + } + + // replication client is only set if this worker is the snapshot leader + let client = match replication_client { + Some(client) => { + let tmp_slot = format!("mzsnapshot_{}", uuid::Uuid::new_v4()).replace('-', ""); + let snapshot_info = export_snapshot(&client, &tmp_slot, true).await?; + trace!( + %id, + "timely-{worker_id} exporting snapshot info {snapshot_info:?}"); + snapshot_handle.give(&snapshot_cap_set[0], snapshot_info); + + client + } + None => { + // Only the snapshot leader needs a replication connection. + let task_name = format!("timely-{worker_id} PG snapshotter"); + connection_config + .connect( + &task_name, + &config.config.connection_context.ssh_tunnel_manager, + ) + .await? + } + }; + // Configure statement_timeout based on param. We want to be able to // override the server value here in case it's set too low, // respective to the size of the data we need to copy. diff --git a/src/storage/src/storage_state.rs b/src/storage/src/storage_state.rs index bfc9c4c9d7a94..d48e301020126 100644 --- a/src/storage/src/storage_state.rs +++ b/src/storage/src/storage_state.rs @@ -138,7 +138,7 @@ pub struct Worker<'w, A: Allocate> { pub timely_worker: &'w mut TimelyWorker, /// The channel over which communication handles for newly connected clients /// are delivered. - pub client_rx: crossbeam_channel::Receiver<(CommandReceiver, ResponseSender)>, + pub client_rx: crossbeam_channel::Receiver<(Uuid, CommandReceiver, ResponseSender)>, /// The state associated with collection ingress and egress. pub storage_state: StorageState, } @@ -147,7 +147,7 @@ impl<'w, A: Allocate> Worker<'w, A> { /// Creates new `Worker` state from the given components. pub fn new( timely_worker: &'w mut TimelyWorker, - client_rx: crossbeam_channel::Receiver<(CommandReceiver, ResponseSender)>, + client_rx: crossbeam_channel::Receiver<(Uuid, CommandReceiver, ResponseSender)>, metrics: StorageMetrics, now: NowFn, connection_context: ConnectionContext, @@ -418,7 +418,7 @@ impl StorageInstanceContext { impl<'w, A: Allocate> Worker<'w, A> { /// Waits for client connections and runs them to completion. pub fn run(&mut self) { - while let Ok((rx, tx)) = self.client_rx.recv() { + while let Ok((_nonce, rx, tx)) = self.client_rx.recv() { self.run_client(rx, tx); } } diff --git a/src/storage/src/upsert.rs b/src/storage/src/upsert.rs index d00adc4b858cf..24b7acb5f56f2 100644 --- a/src/storage/src/upsert.rs +++ b/src/storage/src/upsert.rs @@ -55,15 +55,25 @@ use types::{ mod autospill; pub mod memory; -mod rocksdb; +pub(crate) mod rocksdb; // TODO(aljoscha): Move next to upsert module, rename to upsert_types. pub(crate) mod types; pub type UpsertValue = Result; -#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] +#[derive(Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] pub struct UpsertKey([u8; 32]); +impl Debug for UpsertKey { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "0x")?; + for byte in self.0 { + write!(f, "{:02x}", byte)?; + } + Ok(()) + } +} + impl AsRef<[u8]> for UpsertKey { #[inline(always)] // Note we do 1 `multi_get` and 1 `multi_put` while processing a _batch of updates_. Within the @@ -288,24 +298,16 @@ where // A closure that will initialize and return a configured RocksDB instance let rocksdb_init_fn = move || async move { - let merge_operator = - if rocksdb_use_native_merge_operator { - Some(( + let merge_operator = if rocksdb_use_native_merge_operator { + Some(( "upsert_state_snapshot_merge_v1".to_string(), - |a: &[u8], - b: ValueIterator< - BincodeOpts, - StateValue>, - >| { - consolidating_merge_function::>( - a.into(), - b, - ) + |a: &[u8], b: ValueIterator>| { + consolidating_merge_function::(a.into(), b) }, )) - } else { - None - }; + } else { + None + }; rocksdb::RocksDB::new( mz_rocksdb::RocksDBInstance::new( &rocksdb_dir, @@ -410,7 +412,7 @@ where G::Timestamp: Refines + TotalOrder + Sync, F: FnOnce() -> Fut + 'static, Fut: std::future::Future, - US: UpsertStateBackend>, + US: UpsertStateBackend, FromTime: Debug + timely::ExchangeData + Ord + Sync, { // Hard-coded to true because classic UPSERT cannot be used safely with @@ -552,18 +554,15 @@ enum DrainStyle<'a, T> { /// from the input timely edge. async fn drain_staged_input( stash: &mut Vec<(T, UpsertKey, Reverse, Option)>, - commands_state: &mut indexmap::IndexMap< - UpsertKey, - types::UpsertValueAndSize>, - >, + commands_state: &mut indexmap::IndexMap>, output_updates: &mut Vec<(Result, T, Diff)>, multi_get_scratch: &mut Vec, drain_style: DrainStyle<'_, T>, error_emitter: &mut E, - state: &mut UpsertState<'_, S, T, Option>, + state: &mut UpsertState<'_, S, T, FromTime>, source_config: &crate::source::SourceExportCreationConfig, ) where - S: UpsertStateBackend>, + S: UpsertStateBackend, G: Scope, T: PartialOrder + Ord + Clone + Send + Sync + Serialize + Debug + 'static, FromTime: timely::ExchangeData + Ord + Sync, @@ -639,7 +638,9 @@ async fn drain_staged_input( // Skip this command if its order key is below the one in the upsert state. // Note that the existing order key may be `None` if the existing value // is from snapshotting, which always sorts below new values/deletes. - let existing_order = existing_value.as_ref().and_then(|cs| cs.order().as_ref()); + let existing_order = existing_value + .as_ref() + .and_then(|cs| cs.provisional_order(&ts)); if existing_order >= Some(&from_time.0) { // Skip this update. If no later updates adjust this key, then we just // end up writing the same value back to state. If there @@ -650,11 +651,10 @@ async fn drain_staged_input( match value { Some(value) => { - if let Some(old_value) = existing_value.replace(StateValue::finalized_value( - value.clone(), - Some(from_time.0.clone()), - )) { - if let Value::FinalizedValue(old_value, _) = old_value.into_decoded() { + if let Some(old_value) = + existing_value.replace(StateValue::finalized_value(value.clone())) + { + if let Value::FinalizedValue(old_value) = old_value.into_decoded() { output_updates.push((old_value, ts.clone(), Diff::MINUS_ONE)); } } @@ -662,13 +662,13 @@ async fn drain_staged_input( } None => { if let Some(old_value) = existing_value.take() { - if let Value::FinalizedValue(old_value, _) = old_value.into_decoded() { + if let Value::FinalizedValue(old_value) = old_value.into_decoded() { output_updates.push((old_value, ts, Diff::MINUS_ONE)); } } // Record a tombstone for deletes. - *existing_value = Some(StateValue::tombstone(Some(from_time.0.clone()))); + *existing_value = Some(StateValue::tombstone()); } } } @@ -728,7 +728,7 @@ where G::Timestamp: TotalOrder + Sync, F: FnOnce() -> Fut + 'static, Fut: std::future::Future, - US: UpsertStateBackend>, + US: UpsertStateBackend, FromTime: timely::ExchangeData + Ord + Sync, { let mut builder = AsyncOperatorBuilder::new("Upsert".to_string(), input.scope()); @@ -769,10 +769,7 @@ where let shutdown_button = builder.build(move |caps| async move { let [mut output_cap, mut snapshot_cap, health_cap]: [_; 3] = caps.try_into().unwrap(); - // The order key of the `UpsertState` is `Option`, which implements `Default` - // (as required for `consolidate_chunk`), with slightly more efficient serialization - // than a default `Partitioned`. - let mut state = UpsertState::<_, _, Option>::new( + let mut state = UpsertState::<_, _, FromTime>::new( state().await, upsert_shared_metrics, &upsert_metrics, @@ -869,7 +866,7 @@ where // and have a consistent iteration order. let mut commands_state: indexmap::IndexMap< _, - types::UpsertValueAndSize>, + types::UpsertValueAndSize, > = indexmap::IndexMap::new(); let mut multi_get_scratch = Vec::new(); diff --git a/src/storage/src/upsert/types.rs b/src/storage/src/upsert/types.rs index 35a19cab8576f..335812ce887c3 100644 --- a/src/storage/src/upsert/types.rs +++ b/src/storage/src/upsert/types.rs @@ -198,7 +198,7 @@ pub enum StateValue { impl std::fmt::Debug for StateValue { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - StateValue::Consolidating(_) => write!(f, "Consolidating"), + StateValue::Consolidating(c) => std::fmt::Display::fmt(c, f), StateValue::Value(_) => write!(f, "Value"), } } @@ -221,13 +221,13 @@ impl std::fmt::Debug for StateValue { /// recorded. #[derive(Clone, serde::Serialize, serde::Deserialize, Debug)] pub enum Value { - FinalizedValue(UpsertValue, O), - Tombstone(O), + FinalizedValue(UpsertValue), + Tombstone, ProvisionalValue { // We keep the finalized value around, because the provisional value is // only valid when processing updates at the same timestamp. And at any // point we might still require access to the finalized value. - finalized_value: Option>, + finalized_value: Option, // A provisional value of `None` is a provisional tombstone. // // WIP: We can also box this, to keep the size of StateValue as it was @@ -259,36 +259,24 @@ impl fmt::Display for Consolidating { impl StateValue { /// A finalized, that is (assumed) persistent, value occurring at some order /// key. - pub fn finalized_value(value: UpsertValue, order: O) -> Self { - Self::Value(Value::FinalizedValue(value, order)) + pub fn finalized_value(value: UpsertValue) -> Self { + Self::Value(Value::FinalizedValue(value)) } #[allow(unused)] /// A tombstoned value occurring at some order key. - pub fn tombstone(order: O) -> Self { - Self::Value(Value::Tombstone(order)) + pub fn tombstone() -> Self { + Self::Value(Value::Tombstone) } /// Whether the value is a tombstone. pub fn is_tombstone(&self) -> bool { match self { - Self::Value(Value::Tombstone(_)) => true, + Self::Value(Value::Tombstone) => true, _ => false, } } - /// Pull out the order for the given `Value`, assuming `ensure_decoded` has been called. - pub fn order(&self) -> &O { - match self { - Self::Value(Value::FinalizedValue(_, order)) => order, - Self::Value(Value::ProvisionalValue { .. }) => { - panic!("order() called on provisional value") - } - Self::Value(Value::Tombstone(order)) => order, - _ => panic!("called `order` without calling `ensure_decoded`"), - } - } - /// Pull out the `Value` value for a `StateValue`, after `ensure_decoded` has been called. pub fn into_decoded(self) -> Value { match self { @@ -321,7 +309,7 @@ impl StateValue { provisional_value, }) => { finalized_value.as_ref().map(|v| match v.as_ref() { - (Ok(row), _order) => + Ok(row) => // The finalized value is boxed, so the size of Row is // not included in the outer size of Self. We therefore // don't subtract it here like for the other branches. @@ -330,7 +318,7 @@ impl StateValue { // the box. + u64::cast_from(std::mem::size_of::()), // Assume errors are rare enough to not move the needle. - (Err(_), _order) => 0, + Err(_) => 0, }).unwrap_or(0) + provisional_value.0.as_ref().map(|v| match v{ @@ -347,7 +335,7 @@ impl StateValue { // object). + u64::cast_from(std::mem::size_of::()) } - Self::Value(Value::Tombstone(_)) => { + Self::Value(Value::Tombstone) => { // This assumes the size of any `O` instantiation is meaningful (i.e. not a heap // object). u64::cast_from(std::mem::size_of::()) @@ -386,13 +374,13 @@ impl StateValue { provisional_order: O, ) -> Self { match self { - StateValue::Value(Value::FinalizedValue(value, order)) => { + StateValue::Value(Value::FinalizedValue(value)) => { StateValue::Value(Value::ProvisionalValue { - finalized_value: Some(Box::new((value, order))), + finalized_value: Some(value), provisional_value: (Some(provisional_value), provisional_ts, provisional_order), }) } - StateValue::Value(Value::Tombstone(_)) => StateValue::Value(Value::ProvisionalValue { + StateValue::Value(Value::Tombstone) => StateValue::Value(Value::ProvisionalValue { finalized_value: None, provisional_value: (Some(provisional_value), provisional_ts, provisional_order), }), @@ -426,18 +414,16 @@ impl StateValue { /// timestamp. pub fn into_provisional_tombstone(self, provisional_ts: T, provisional_order: O) -> Self { match self { - StateValue::Value(Value::FinalizedValue(value, order)) => { + StateValue::Value(Value::FinalizedValue(value)) => { StateValue::Value(Value::ProvisionalValue { - finalized_value: Some(Box::new((value, order))), + finalized_value: Some(value), provisional_value: (None, provisional_ts, provisional_order), }) } - StateValue::Value(Value::Tombstone(order)) => { - StateValue::Value(Value::ProvisionalValue { - finalized_value: None, - provisional_value: (None, provisional_ts, order), - }) - } + StateValue::Value(Value::Tombstone) => StateValue::Value(Value::ProvisionalValue { + finalized_value: None, + provisional_value: (None, provisional_ts, provisional_order), + }), StateValue::Value(Value::ProvisionalValue { finalized_value, provisional_value: _, @@ -451,70 +437,28 @@ impl StateValue { } } - /// Returns the order of a provisional value at the given timestamp. If that - /// doesn't exist, the order of the finalized value. - /// - /// Returns `None` if none of the above exist. + /// Returns the order of a provisional value at the given timestamp if it exists. pub fn provisional_order(&self, ts: &T) -> Option<&O> { match self { - Self::Value(Value::FinalizedValue(_, order)) => Some(order), - Self::Value(Value::Tombstone(order)) => Some(order), + Self::Value(Value::FinalizedValue(_)) => None, + Self::Value(Value::Tombstone) => None, Self::Value(Value::ProvisionalValue { finalized_value: _, provisional_value: (_, provisional_ts, provisional_order), }) if provisional_ts == ts => Some(provisional_order), - Self::Value(Value::ProvisionalValue { - finalized_value, - provisional_value: _, - }) => finalized_value.as_ref().map(|v| &v.1), + Self::Value(Value::ProvisionalValue { .. }) => None, Self::Consolidating(_) => { panic!("called `provisional_order` without calling `ensure_decoded`") } } } - // WIP: We don't need these after all, but leave in for review. - ///// Returns the order of this value, if a finalized value is present. - //pub fn finalized_order(&self) -> Option<&O> { - // match self { - // Self::Value(Value::FinalizedValue(_, order)) => Some(order), - // Self::Value(Value::Tombstone(order)) => Some(order), - // Self::Value(Value::ProvisionalValue { - // finalized_value, - // provisional_value: _, - // }) => finalized_value.as_ref().map(|v| &v.1), - // Self::Consolidating(_) => { - // panic!("called `finalized_order` without calling `ensure_decoded`") - // } - // } - //} - - ///// Returns the provisional value, if one is present at the given timestamp. - ///// Falls back to the finalized value, or `None` if there is neither. - //pub fn into_provisional_value(self, ts: &T) -> Option<(UpsertValue, O)> { - // match self { - // Self::Value(Value::FinalizedValue(value, order)) => Some((value, order)), - // Self::Value(Value::Tombstone(_)) => None, - // Self::Value(Value::ProvisionalValue { - // finalized_value: _, - // provisional_value: (provisional_value, provisional_ts, provisional_order), - // }) if provisional_ts == *ts => provisional_value.map(|v| (v, provisional_order)), - // Self::Value(Value::ProvisionalValue { - // finalized_value, - // provisional_value: _, - // }) => finalized_value.map(|boxed| *boxed), - // Self::Consolidating(_) => { - // panic!("called `into_provisional_value` without calling `ensure_decoded`") - // } - // } - //} - /// Returns the provisional value, if one is present at the given timestamp. /// Falls back to the finalized value, or `None` if there is neither. pub fn provisional_value_ref(&self, ts: &T) -> Option<&UpsertValue> { match self { - Self::Value(Value::FinalizedValue(value, _order)) => Some(value), - Self::Value(Value::Tombstone(_)) => None, + Self::Value(Value::FinalizedValue(value)) => Some(value), + Self::Value(Value::Tombstone) => None, Self::Value(Value::ProvisionalValue { finalized_value: _, provisional_value: (provisional_value, provisional_ts, _provisional_order), @@ -522,7 +466,7 @@ impl StateValue { Self::Value(Value::ProvisionalValue { finalized_value, provisional_value: _, - }) => finalized_value.as_ref().map(|boxed| &boxed.0), + }) => finalized_value.as_ref(), Self::Consolidating(_) => { panic!("called `provisional_value_ref` without calling `ensure_decoded`") } @@ -530,20 +474,20 @@ impl StateValue { } /// Returns the the finalized value, if one is present. - pub fn into_finalized_value(self) -> Option<(UpsertValue, O)> { + pub fn into_finalized_value(self) -> Option { match self { - Self::Value(Value::FinalizedValue(value, order)) => Some((value, order)), - Self::Value(Value::Tombstone(_order)) => None, + Self::Value(Value::FinalizedValue(value)) => Some(value), + Self::Value(Value::Tombstone) => None, Self::Value(Value::ProvisionalValue { finalized_value, provisional_value: _, - }) => finalized_value.map(|boxed| *boxed), + }) => finalized_value, _ => panic!("called `order` without calling `ensure_decoded`"), } } } -impl StateValue { +impl StateValue { /// We use a XOR trick in order to accumulate the values without having to store the full /// unconsolidated history in memory. For all (value, diff) updates of a key we track: /// - diff_sum = SUM(diff) @@ -631,7 +575,7 @@ impl StateValue { let this = std::mem::take(self); let finalized_value = this.into_finalized_value(); - if let Some((finalized_value, _order)) = finalized_value { + if let Some(finalized_value) = finalized_value { // If we had a value before, merge it into the // now-consolidating state first. let _ = @@ -720,7 +664,6 @@ impl StateValue { ); *self = Self::Value(Value::FinalizedValue( bincode_opts.deserialize(value).unwrap(), - Default::default(), )); } 0 => { @@ -746,7 +689,7 @@ impl StateValue { consolidating, source_id, ); - *self = Self::Value(Value::Tombstone(Default::default())); + *self = Self::Value(Value::Tombstone); } other => panic!( "invalid upsert state: non 0/1 diff_sum: {}, state: {}, {}", @@ -1009,14 +952,10 @@ where /// - An iterator over any current value and merge operands queued for the key. /// /// The function should return the new value for the key after merging all the updates. -pub(crate) fn consolidating_merge_function( +pub(crate) fn consolidating_merge_function( _key: UpsertKey, updates: impl Iterator>, -) -> StateValue -where - O: Default, - T: std::cmp::Eq, -{ +) -> StateValue { let mut current: StateValue = Default::default(); let mut bincode_buf = Vec::new(); @@ -1028,7 +967,7 @@ where StateValue::Value(_) => { // This branch is more expensive, but we hopefully rarely hit // it. - if let Some((finalized_value, _order)) = update.into_finalized_value() { + if let Some(finalized_value) = update.into_finalized_value() { let mut update = StateValue::default(); update.merge_update( finalized_value, @@ -1106,7 +1045,7 @@ impl UpsertState<'_, S, T, O> where S: UpsertStateBackend, T: Eq + Clone + Send + Sync + Serialize + 'static, - O: Default + Clone + Send + Sync + Serialize + DeserializeOwned + 'static, + O: Clone + Send + Sync + Serialize + DeserializeOwned + 'static, { /// Consolidate the following differential updates into the state. Updates /// provided to this method can be assumed to consolidate into a single @@ -1480,10 +1419,9 @@ mod tests { // has a direct impact on memory usage of in-memory UPSERT sources. #[mz_ore::test] fn test_memory_size() { - let finalized_value: StateValue<(), ()> = - StateValue::finalized_value(Ok(Row::default()), ()); + let finalized_value: StateValue<(), ()> = StateValue::finalized_value(Ok(Row::default())); assert!( - finalized_value.memory_size() <= 88, + finalized_value.memory_size() <= 144, "memory size is {}", finalized_value.memory_size(), ); @@ -1491,7 +1429,7 @@ mod tests { let provisional_value_with_finalized_value: StateValue<(), ()> = finalized_value.into_provisional_value(Ok(Row::default()), (), ()); assert!( - provisional_value_with_finalized_value.memory_size() <= 112, + provisional_value_with_finalized_value.memory_size() <= 168, "memory size is {}", provisional_value_with_finalized_value.memory_size(), ); @@ -1499,7 +1437,7 @@ mod tests { let provisional_value_without_finalized_value: StateValue<(), ()> = StateValue::new_provisional_value(Ok(Row::default()), (), ()); assert!( - provisional_value_without_finalized_value.memory_size() <= 88, + provisional_value_without_finalized_value.memory_size() <= 144, "memory size is {}", provisional_value_without_finalized_value.memory_size(), ); @@ -1512,7 +1450,7 @@ mod tests { &mut Vec::new(), ); assert!( - consolidating_value.memory_size() <= 90, + consolidating_value.memory_size() <= 146, "memory size is {}", consolidating_value.memory_size(), ); diff --git a/src/storage/src/upsert_continual_feedback.rs b/src/storage/src/upsert_continual_feedback.rs index 880665dc3311c..b6c3115f5c8be 100644 --- a/src/storage/src/upsert_continual_feedback.rs +++ b/src/storage/src/upsert_continual_feedback.rs @@ -122,7 +122,7 @@ where G::Timestamp: Refines + TotalOrder + Sync, F: FnOnce() -> Fut + 'static, Fut: std::future::Future, - US: UpsertStateBackend>, + US: UpsertStateBackend, FromTime: Debug + timely::ExchangeData + Ord + Sync, { let mut builder = AsyncOperatorBuilder::new("Upsert".to_string(), input.scope()); @@ -165,11 +165,7 @@ where drop(output_cap); let mut snapshot_cap = CapabilitySet::from_elem(snapshot_cap); - // The order key of the `UpsertState` is `Option`, which implements `Default` - // (as required for `consolidate_chunk`), with slightly more efficient serialization - // than a default `Partitioned`. - - let mut state = UpsertState::<_, G::Timestamp, Option>::new( + let mut state = UpsertState::<_, G::Timestamp, FromTime>::new( state_fn().await, upsert_shared_metrics, &upsert_metrics, @@ -185,7 +181,7 @@ where // A re-usable buffer of changes, per key. This is an `IndexMap` because it has to be `drain`-able // and have a consistent iteration order. - let mut commands_state: indexmap::IndexMap<_, upsert_types::UpsertValueAndSize>> = + let mut commands_state: indexmap::IndexMap<_, upsert_types::UpsertValueAndSize> = indexmap::IndexMap::new(); let mut multi_get_scratch = Vec::new(); @@ -602,16 +598,16 @@ enum DrainStyle<'a, T> { /// stale state, since in this drain style no modifications to the state are made. async fn drain_staged_input( stash: &mut Vec<(T, UpsertKey, Reverse, Option)>, - commands_state: &mut indexmap::IndexMap>>, + commands_state: &mut indexmap::IndexMap>, output_updates: &mut Vec<(Result, T, Diff)>, multi_get_scratch: &mut Vec, drain_style: DrainStyle<'_, T>, error_emitter: &mut E, - state: &mut UpsertState<'_, S, T, Option>, + state: &mut UpsertState<'_, S, T, FromTime>, source_config: &crate::source::SourceExportCreationConfig, ) -> Option where - S: UpsertStateBackend>, + S: UpsertStateBackend, G: Scope, T: TotalOrder + timely::ExchangeData + Debug + Ord + Sync, FromTime: timely::ExchangeData + Ord + Sync, @@ -754,8 +750,7 @@ where // is from snapshotting, which always sorts below new values/deletes. let existing_order = existing_state_cell .as_ref() - .and_then(|cs| cs.provisional_order(&ts).map_or(None, Option::as_ref)); - //.map(|cs| cs.provisional_order(&ts).map_or(None, Option::as_ref)); + .and_then(|cs| cs.provisional_order(&ts)); if existing_order >= Some(&from_time.0) { // Skip this update. If no later updates adjust this key, then we just // end up writing the same value back to state. If there @@ -780,12 +775,12 @@ where Some(existing_value) => existing_value.clone().into_provisional_value( value.clone(), ts.clone(), - Some(from_time.0.clone()), + from_time.0.clone(), ), None => StateValue::new_provisional_value( value.clone(), ts.clone(), - Some(from_time.0.clone()), + from_time.0.clone(), ), }; @@ -811,10 +806,10 @@ where let new_value = match existing_value { Some(existing_value) => existing_value - .into_provisional_tombstone(ts.clone(), Some(from_time.0.clone())), + .into_provisional_tombstone(ts.clone(), from_time.0.clone()), None => StateValue::new_provisional_tombstone( ts.clone(), - Some(from_time.0.clone()), + from_time.0.clone(), ), }; diff --git a/src/testdrive/BUILD.bazel b/src/testdrive/BUILD.bazel index faee25b2ca6b8..ebc7b4f857d3c 100644 --- a/src/testdrive/BUILD.bazel +++ b/src/testdrive/BUILD.bazel @@ -30,7 +30,7 @@ rust_library( proc_macro_deps = [] + all_crate_deps(proc_macro = True), rustc_env = {}, rustc_flags = [], - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ ":mz_testdrive_build_script", "//src/adapter:mz_adapter", @@ -78,7 +78,7 @@ rust_test( ), rustc_env = {}, rustc_flags = [], - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ "//src/adapter:mz_adapter", "//src/avro:mz_avro", @@ -169,7 +169,7 @@ rust_binary( "@//misc/bazel/platforms:xlang_lto_enabled": ["-Clinker-plugin-lto"], "//conditions:default": [], }), - version = "0.147.0-dev.0", + version = "0.147.13", deps = [ ":mz_testdrive", "//src/adapter:mz_adapter", diff --git a/src/testdrive/Cargo.toml b/src/testdrive/Cargo.toml index baf8b1645e74b..cc5c82ff9a631 100644 --- a/src/testdrive/Cargo.toml +++ b/src/testdrive/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "mz-testdrive" description = "Integration test driver for Materialize." -version = "0.147.0-dev.0" +version = "0.147.13" edition.workspace = true rust-version.workspace = true publish = false diff --git a/src/transform/src/analysis.rs b/src/transform/src/analysis.rs index 1f67994dc42b4..0dd4de1f1a29f 100644 --- a/src/transform/src/analysis.rs +++ b/src/transform/src/analysis.rs @@ -941,9 +941,10 @@ mod column_names { use std::sync::Arc; use super::Analysis; - use mz_expr::{AggregateFunc, Id, MirRelationExpr, MirScalarExpr}; + use mz_expr::{AggregateFunc, Id, MirRelationExpr, MirScalarExpr, TableFunc}; use mz_repr::GlobalId; use mz_repr::explain::ExprHumanizer; + use mz_sql::ORDINALITY_COL_NAME; /// An abstract type denoting an inferred column name. #[derive(Debug, Clone)] @@ -1109,6 +1110,13 @@ mod column_names { let func_output_start = column_names.len(); let func_output_end = column_names.len() + func.output_arity(); column_names.extend(Self::anonymous(func_output_start..func_output_end)); + if let TableFunc::WithOrdinality { .. } = func { + // We know the name of the last column + // TODO(ggevay): generalize this to meaningful col names for all table functions + **column_names.last_mut().as_mut().expect( + "there is at least one output column, from the WITH ORDINALITY", + ) = ColumnName::Annotated(ORDINALITY_COL_NAME.into()); + } column_names } Filter { diff --git a/src/transform/src/canonicalization/flatmap_to_map.rs b/src/transform/src/canonicalization/flatmap_to_map.rs index b0aae39839e08..8ff4c38d009e8 100644 --- a/src/transform/src/canonicalization/flatmap_to_map.rs +++ b/src/transform/src/canonicalization/flatmap_to_map.rs @@ -11,8 +11,8 @@ //! use mz_expr::visit::Visit; -use mz_expr::{MirRelationExpr, TableFunc}; -use mz_repr::Diff; +use mz_expr::{MirRelationExpr, MirScalarExpr, TableFunc}; +use mz_repr::{Datum, Diff, ScalarType}; use crate::TransformCtx; @@ -45,9 +45,22 @@ impl FlatMapToMap { /// Turns `FlatMap` into `Map` if only one row is produced by flatmap. pub fn action(relation: &mut MirRelationExpr) { if let MirRelationExpr::FlatMap { func, exprs, input } = relation { + let (func, with_ordinality) = if let TableFunc::WithOrdinality { inner } = func { + // get to the actual function, but remember that we have a WITH ORDINALITY clause. + (&**inner, true) + } else { + (&*func, false) + }; + if let TableFunc::Wrap { width, .. } = func { if *width >= exprs.len() { *relation = input.take_dangerous().map(std::mem::take(exprs)); + if with_ordinality { + *relation = relation.take_dangerous().map_one(MirScalarExpr::literal( + Ok(Datum::Int64(1)), + ScalarType::Int64, + )); + } } } else if is_supported_unnest(func) { let func = func.clone(); @@ -63,11 +76,19 @@ impl FlatMapToMap { relation.take_safely(None); } (Some((row, Diff::ONE)), None) => { + assert_eq!(func.output_type().column_types.len(), 1); *relation = input.take_dangerous().map(vec![MirScalarExpr::Literal( Ok(row), func.output_type().column_types[0].clone(), )]); + if with_ordinality { + *relation = + relation.take_dangerous().map_one(MirScalarExpr::literal( + Ok(Datum::Int64(1)), + ScalarType::Int64, + )); + } } _ => {} } diff --git a/src/transform/src/movement/projection_pushdown.rs b/src/transform/src/movement/projection_pushdown.rs index 38214770a416d..3649eea1323ba 100644 --- a/src/transform/src/movement/projection_pushdown.rs +++ b/src/transform/src/movement/projection_pushdown.rs @@ -291,7 +291,7 @@ impl ProjectionPushdown { // The actual projection always has the newly-created columns at // the end. let mut actual_projection = columns_to_pushdown; - for c in 0..func.output_type().arity() { + for c in 0..func.output_arity() { actual_projection.push(inner_arity + c); } actual_projection diff --git a/src/transform/src/redundant_join.rs b/src/transform/src/redundant_join.rs index 51e7f32b45c8b..3dc23344e1fa5 100644 --- a/src/transform/src/redundant_join.rs +++ b/src/transform/src/redundant_join.rs @@ -397,13 +397,17 @@ impl RedundantJoin { Ok(result) } - MirRelationExpr::FlatMap { input, func, .. } => { + MirRelationExpr::FlatMap { + input, + func, + exprs: _, + } => { // FlatMap may drop records, and so we unset `exact`. let mut result = self.action(input, ctx)?; for prov in result.iter_mut() { prov.exact = false; prov.dereferenced_projection - .extend((0..func.output_type().column_types.len()).map(|_| None)); + .extend((0..func.output_arity()).map(|_| None)); } Ok(result) } diff --git a/test/lang/csharp/csharp-npgsql.csproj b/test/lang/csharp/csharp-npgsql.csproj new file mode 100644 index 0000000000000..fc1ca4e856789 --- /dev/null +++ b/test/lang/csharp/csharp-npgsql.csproj @@ -0,0 +1,23 @@ + + + + + net8.0 + false + + + + + + + + + diff --git a/test/lang/csharp/csharp-npgsql5.csproj b/test/lang/csharp/csharp-npgsql5.csproj index 1204f428cdb6a..086387cab8fa1 100644 --- a/test/lang/csharp/csharp-npgsql5.csproj +++ b/test/lang/csharp/csharp-npgsql5.csproj @@ -10,7 +10,7 @@ the Business Source License, use of this software will be governed - net5.0 + net8.0 false diff --git a/test/lang/csharp/csharp-npgsql6.csproj b/test/lang/csharp/csharp-npgsql6.csproj index 3e81dc1252bcd..8d405e8e6bb2f 100644 --- a/test/lang/csharp/csharp-npgsql6.csproj +++ b/test/lang/csharp/csharp-npgsql6.csproj @@ -10,12 +10,12 @@ the Business Source License, use of this software will be governed - net5.0 + net8.0 false - + diff --git a/test/lang/csharp/csharp-npgsql7.csproj b/test/lang/csharp/csharp-npgsql7.csproj new file mode 100644 index 0000000000000..66c7678ccbafd --- /dev/null +++ b/test/lang/csharp/csharp-npgsql7.csproj @@ -0,0 +1,23 @@ + + + + + net8.0 + false + + + + + + + + + diff --git a/test/lang/csharp/csharp-npgsql8.csproj b/test/lang/csharp/csharp-npgsql8.csproj new file mode 100644 index 0000000000000..30af173339534 --- /dev/null +++ b/test/lang/csharp/csharp-npgsql8.csproj @@ -0,0 +1,23 @@ + + + + + net8.0 + false + + + + + + + + + diff --git a/test/lang/csharp/mzcompose.py b/test/lang/csharp/mzcompose.py index a9c2417861064..c997121f56c5d 100644 --- a/test/lang/csharp/mzcompose.py +++ b/test/lang/csharp/mzcompose.py @@ -19,7 +19,7 @@ Service( name="csharp", config={ - "image": "mcr.microsoft.com/dotnet/sdk:5.0-focal", + "image": "mcr.microsoft.com/dotnet/sdk:8.0", "volumes": [ "../../../:/workdir", ], diff --git a/test/lang/csharp/test.sh b/test/lang/csharp/test.sh index b928f212e3235..0eaf9f4f76db0 100755 --- a/test/lang/csharp/test.sh +++ b/test/lang/csharp/test.sh @@ -19,5 +19,9 @@ cd "$(dirname "$0")/../../.." cd test/lang/csharp -dotnet test csharp-npgsql5.csproj +# TODO: Reenable when database-issues#9531 is fixed +# dotnet test csharp-npgsql.csproj +# dotnet test csharp-npgsql8.csproj +dotnet test csharp-npgsql7.csproj dotnet test csharp-npgsql6.csproj +dotnet test csharp-npgsql5.csproj diff --git a/test/lang/java/smoketest/SmokeTest.java b/test/lang/java/smoketest/SmokeTest.java index 28d04cd46a738..ddc0349bfd53d 100644 --- a/test/lang/java/smoketest/SmokeTest.java +++ b/test/lang/java/smoketest/SmokeTest.java @@ -179,4 +179,12 @@ void testPgJDBCgetPrimaryKeys() throws SQLException, ClassNotFoundException { stmt.execute("DROP TABLE materialize.public.getpks"); stmt.close(); } + + // We test for getCatalog due to a regression caused by (database-issues#9530) + @Test + void testPgJDBCgetCatalog() throws SQLException, ClassNotFoundException { + //Retrieving the current catalog name + String catalogName = conn.getCatalog(); + Assertions.assertEquals("materialize", catalogName); + } } diff --git a/test/lang/java/smoketest/pom.xml b/test/lang/java/smoketest/pom.xml index 936f2812f317d..112678a7dfb93 100644 --- a/test/lang/java/smoketest/pom.xml +++ b/test/lang/java/smoketest/pom.xml @@ -30,7 +30,7 @@ the Business Source License, use of this software will be governed org.postgresql postgresql - 42.6.1 + 42.7.7 org.junit.jupiter diff --git a/test/lang/js/package.json b/test/lang/js/package.json index 0727c0e140850..4890c085f1a36 100644 --- a/test/lang/js/package.json +++ b/test/lang/js/package.json @@ -11,10 +11,10 @@ "@babel/preset-env": "^7.9.5", "@babel/preset-typescript": "^7.9.0", "@types/jest": "^27.0.0", - "@types/pg": "^7.14.3", + "@types/pg": "^8.15.5", "babel-jest": "^27.0.0", "jest": "^27.0.0", - "pg": "^8.11.3", + "pg": "^8.16.3", "pg-query-stream": "^4.5.3", "prettier": "^2.0.4" }, diff --git a/test/lang/js/yarn.lock b/test/lang/js/yarn.lock index 581ae28f85bf3..0d4d2d4d8c49e 100644 --- a/test/lang/js/yarn.lock +++ b/test/lang/js/yarn.lock @@ -10,243 +10,248 @@ "@jridgewell/gen-mapping" "^0.3.5" "@jridgewell/trace-mapping" "^0.3.24" -"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.12.13", "@babel/code-frame@^7.25.9", "@babel/code-frame@^7.26.2": - version "7.26.2" - resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.26.2.tgz#4b5fab97d33338eff916235055f0ebc21e573a85" - integrity sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ== +"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.12.13", "@babel/code-frame@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.27.1.tgz#200f715e66d52a23b221a9435534a91cc13ad5be" + integrity sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg== dependencies: - "@babel/helper-validator-identifier" "^7.25.9" + "@babel/helper-validator-identifier" "^7.27.1" js-tokens "^4.0.0" - picocolors "^1.0.0" + picocolors "^1.1.1" -"@babel/compat-data@^7.22.6", "@babel/compat-data@^7.26.5": - version "7.26.5" - resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.26.5.tgz#df93ac37f4417854130e21d72c66ff3d4b897fc7" - integrity sha512-XvcZi1KWf88RVbF9wn8MN6tYFloU5qX8KjuF3E1PVBmJ9eypXfs4GRiJwLuTZL0iSnJUKn1BFPa5BPZZJyFzPg== +"@babel/compat-data@^7.27.2", "@babel/compat-data@^7.27.7", "@babel/compat-data@^7.28.0": + version "7.28.0" + resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.28.0.tgz#9fc6fd58c2a6a15243cd13983224968392070790" + integrity sha512-60X7qkglvrap8mn1lh2ebxXdZYtUcpd7gsmy9kLaBJ4i/WdY8PqTSdxyA8qraikqKQK5C1KRBKXqznrVapyNaw== "@babel/core@^7.1.0", "@babel/core@^7.12.3", "@babel/core@^7.7.2", "@babel/core@^7.8.0", "@babel/core@^7.9.0": - version "7.26.7" - resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.26.7.tgz#0439347a183b97534d52811144d763a17f9d2b24" - integrity sha512-SRijHmF0PSPgLIBYlWnG0hyeJLwXE2CgpsXaMOrtt2yp9/86ALw6oUlj9KYuZ0JN07T4eBMVIW4li/9S1j2BGA== + version "7.28.0" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.28.0.tgz#55dad808d5bf3445a108eefc88ea3fdf034749a4" + integrity sha512-UlLAnTPrFdNGoFtbSXwcGFQBtQZJCNjaN6hQNP3UPvuNXT1i82N26KL3dZeIpNalWywr9IuQuncaAfUaS1g6sQ== dependencies: "@ampproject/remapping" "^2.2.0" - "@babel/code-frame" "^7.26.2" - "@babel/generator" "^7.26.5" - "@babel/helper-compilation-targets" "^7.26.5" - "@babel/helper-module-transforms" "^7.26.0" - "@babel/helpers" "^7.26.7" - "@babel/parser" "^7.26.7" - "@babel/template" "^7.25.9" - "@babel/traverse" "^7.26.7" - "@babel/types" "^7.26.7" + "@babel/code-frame" "^7.27.1" + "@babel/generator" "^7.28.0" + "@babel/helper-compilation-targets" "^7.27.2" + "@babel/helper-module-transforms" "^7.27.3" + "@babel/helpers" "^7.27.6" + "@babel/parser" "^7.28.0" + "@babel/template" "^7.27.2" + "@babel/traverse" "^7.28.0" + "@babel/types" "^7.28.0" convert-source-map "^2.0.0" debug "^4.1.0" gensync "^1.0.0-beta.2" json5 "^2.2.3" semver "^6.3.1" -"@babel/generator@^7.26.5", "@babel/generator@^7.7.2": - version "7.26.5" - resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.26.5.tgz#e44d4ab3176bbcaf78a5725da5f1dc28802a9458" - integrity sha512-2caSP6fN9I7HOe6nqhtft7V4g7/V/gfDsC3Ag4W7kEzzvRGKqiv0pu0HogPiZ3KaVSoNDhUws6IJjDjpfmYIXw== +"@babel/generator@^7.28.0", "@babel/generator@^7.7.2": + version "7.28.0" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.28.0.tgz#9cc2f7bd6eb054d77dc66c2664148a0c5118acd2" + integrity sha512-lJjzvrbEeWrhB4P3QBsH7tey117PjLZnDbLiQEKjQ/fNJTjuq4HSqgFA+UNSwZT8D7dxxbnuSBMsa1lrWzKlQg== dependencies: - "@babel/parser" "^7.26.5" - "@babel/types" "^7.26.5" - "@jridgewell/gen-mapping" "^0.3.5" - "@jridgewell/trace-mapping" "^0.3.25" + "@babel/parser" "^7.28.0" + "@babel/types" "^7.28.0" + "@jridgewell/gen-mapping" "^0.3.12" + "@jridgewell/trace-mapping" "^0.3.28" jsesc "^3.0.2" -"@babel/helper-annotate-as-pure@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.25.9.tgz#d8eac4d2dc0d7b6e11fa6e535332e0d3184f06b4" - integrity sha512-gv7320KBUFJz1RnylIg5WWYPRXKZ884AGkYpgpWW02TH66Dl+HaC1t1CKd0z3R4b6hdYEcmrNZHUmfCP+1u3/g== +"@babel/helper-annotate-as-pure@^7.27.1", "@babel/helper-annotate-as-pure@^7.27.3": + version "7.27.3" + resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.27.3.tgz#f31fd86b915fc4daf1f3ac6976c59be7084ed9c5" + integrity sha512-fXSwMQqitTGeHLBC08Eq5yXz2m37E4pJX1qAU1+2cNedz/ifv/bVXft90VeSav5nFO61EcNgwr0aJxbyPaWBPg== dependencies: - "@babel/types" "^7.25.9" + "@babel/types" "^7.27.3" -"@babel/helper-compilation-targets@^7.22.6", "@babel/helper-compilation-targets@^7.25.9", "@babel/helper-compilation-targets@^7.26.5": - version "7.26.5" - resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.26.5.tgz#75d92bb8d8d51301c0d49e52a65c9a7fe94514d8" - integrity sha512-IXuyn5EkouFJscIDuFF5EsiSolseme1s0CZB+QxVugqJLYmKdxI1VfIBOst0SUu4rnk2Z7kqTwmoO1lp3HIfnA== +"@babel/helper-compilation-targets@^7.27.1", "@babel/helper-compilation-targets@^7.27.2": + version "7.27.2" + resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.2.tgz#46a0f6efab808d51d29ce96858dd10ce8732733d" + integrity sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ== dependencies: - "@babel/compat-data" "^7.26.5" - "@babel/helper-validator-option" "^7.25.9" + "@babel/compat-data" "^7.27.2" + "@babel/helper-validator-option" "^7.27.1" browserslist "^4.24.0" lru-cache "^5.1.1" semver "^6.3.1" -"@babel/helper-create-class-features-plugin@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.25.9.tgz#7644147706bb90ff613297d49ed5266bde729f83" - integrity sha512-UTZQMvt0d/rSz6KI+qdu7GQze5TIajwTS++GUozlw8VBJDEOAqSXwm1WvmYEZwqdqSGQshRocPDqrt4HBZB3fQ== - dependencies: - "@babel/helper-annotate-as-pure" "^7.25.9" - "@babel/helper-member-expression-to-functions" "^7.25.9" - "@babel/helper-optimise-call-expression" "^7.25.9" - "@babel/helper-replace-supers" "^7.25.9" - "@babel/helper-skip-transparent-expression-wrappers" "^7.25.9" - "@babel/traverse" "^7.25.9" +"@babel/helper-create-class-features-plugin@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.27.1.tgz#5bee4262a6ea5ddc852d0806199eb17ca3de9281" + integrity sha512-QwGAmuvM17btKU5VqXfb+Giw4JcN0hjuufz3DYnpeVDvZLAObloM77bhMXiqry3Iio+Ai4phVRDwl6WU10+r5A== + dependencies: + "@babel/helper-annotate-as-pure" "^7.27.1" + "@babel/helper-member-expression-to-functions" "^7.27.1" + "@babel/helper-optimise-call-expression" "^7.27.1" + "@babel/helper-replace-supers" "^7.27.1" + "@babel/helper-skip-transparent-expression-wrappers" "^7.27.1" + "@babel/traverse" "^7.27.1" semver "^6.3.1" -"@babel/helper-create-regexp-features-plugin@^7.18.6", "@babel/helper-create-regexp-features-plugin@^7.25.9": - version "7.26.3" - resolved "https://registry.yarnpkg.com/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.26.3.tgz#5169756ecbe1d95f7866b90bb555b022595302a0" - integrity sha512-G7ZRb40uUgdKOQqPLjfD12ZmGA54PzqDFUv2BKImnC9QIfGhIHKvVML0oN8IUiDq4iRqpq74ABpvOaerfWdong== +"@babel/helper-create-regexp-features-plugin@^7.18.6", "@babel/helper-create-regexp-features-plugin@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.27.1.tgz#05b0882d97ba1d4d03519e4bce615d70afa18c53" + integrity sha512-uVDC72XVf8UbrH5qQTc18Agb8emwjTiZrQE11Nv3CuBEZmVvTwwE9CBUEvHku06gQCAyYf8Nv6ja1IN+6LMbxQ== dependencies: - "@babel/helper-annotate-as-pure" "^7.25.9" + "@babel/helper-annotate-as-pure" "^7.27.1" regexpu-core "^6.2.0" semver "^6.3.1" -"@babel/helper-define-polyfill-provider@^0.6.2", "@babel/helper-define-polyfill-provider@^0.6.3": - version "0.6.3" - resolved "https://registry.yarnpkg.com/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.3.tgz#f4f2792fae2ef382074bc2d713522cf24e6ddb21" - integrity sha512-HK7Bi+Hj6H+VTHA3ZvBis7V/6hu9QuTrnMXNybfUf2iiuU/N97I8VjB+KbhFF8Rld/Lx5MzoCwPCpPjfK+n8Cg== +"@babel/helper-define-polyfill-provider@^0.6.5": + version "0.6.5" + resolved "https://registry.yarnpkg.com/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.5.tgz#742ccf1cb003c07b48859fc9fa2c1bbe40e5f753" + integrity sha512-uJnGFcPsWQK8fvjgGP5LZUZZsYGIoPeRjSF5PGwrelYgq7Q15/Ft9NGFp1zglwgIv//W0uG4BevRuSJRyylZPg== dependencies: - "@babel/helper-compilation-targets" "^7.22.6" - "@babel/helper-plugin-utils" "^7.22.5" - debug "^4.1.1" + "@babel/helper-compilation-targets" "^7.27.2" + "@babel/helper-plugin-utils" "^7.27.1" + debug "^4.4.1" lodash.debounce "^4.0.8" - resolve "^1.14.2" - -"@babel/helper-member-expression-to-functions@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.25.9.tgz#9dfffe46f727005a5ea29051ac835fb735e4c1a3" - integrity sha512-wbfdZ9w5vk0C0oyHqAJbc62+vet5prjj01jjJ8sKn3j9h3MQQlflEdXYvuqRWjHnM12coDEqiC1IRCi0U/EKwQ== - dependencies: - "@babel/traverse" "^7.25.9" - "@babel/types" "^7.25.9" - -"@babel/helper-module-imports@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.25.9.tgz#e7f8d20602ebdbf9ebbea0a0751fb0f2a4141715" - integrity sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw== - dependencies: - "@babel/traverse" "^7.25.9" - "@babel/types" "^7.25.9" - -"@babel/helper-module-transforms@^7.25.9", "@babel/helper-module-transforms@^7.26.0": - version "7.26.0" - resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.26.0.tgz#8ce54ec9d592695e58d84cd884b7b5c6a2fdeeae" - integrity sha512-xO+xu6B5K2czEnQye6BHA7DolFFmS3LB7stHZFaOLb1pAwO1HWLS8fXA+eh0A2yIvltPVmx3eNNDBJA2SLHXFw== - dependencies: - "@babel/helper-module-imports" "^7.25.9" - "@babel/helper-validator-identifier" "^7.25.9" - "@babel/traverse" "^7.25.9" - -"@babel/helper-optimise-call-expression@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.25.9.tgz#3324ae50bae7e2ab3c33f60c9a877b6a0146b54e" - integrity sha512-FIpuNaz5ow8VyrYcnXQTDRGvV6tTjkNtCK/RYNDXGSLlUD6cBuQTSw43CShGxjvfBTfcUA/r6UhUCbtYqkhcuQ== - dependencies: - "@babel/types" "^7.25.9" - -"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.18.6", "@babel/helper-plugin-utils@^7.22.5", "@babel/helper-plugin-utils@^7.25.9", "@babel/helper-plugin-utils@^7.26.5", "@babel/helper-plugin-utils@^7.8.0": - version "7.26.5" - resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.26.5.tgz#18580d00c9934117ad719392c4f6585c9333cc35" - integrity sha512-RS+jZcRdZdRFzMyr+wcsaqOmld1/EqTghfaBGQQd/WnRdzdlvSZ//kF7U8VQTxf1ynZ4cjUcYgjVGx13ewNPMg== - -"@babel/helper-remap-async-to-generator@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.25.9.tgz#e53956ab3d5b9fb88be04b3e2f31b523afd34b92" - integrity sha512-IZtukuUeBbhgOcaW2s06OXTzVNJR0ybm4W5xC1opWFFJMZbwRj5LCk+ByYH7WdZPZTt8KnFwA8pvjN2yqcPlgw== - dependencies: - "@babel/helper-annotate-as-pure" "^7.25.9" - "@babel/helper-wrap-function" "^7.25.9" - "@babel/traverse" "^7.25.9" - -"@babel/helper-replace-supers@^7.25.9": - version "7.26.5" - resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.26.5.tgz#6cb04e82ae291dae8e72335dfe438b0725f14c8d" - integrity sha512-bJ6iIVdYX1YooY2X7w1q6VITt+LnUILtNk7zT78ykuwStx8BauCzxvFqFaHjOpW1bVnSUM1PN1f0p5P21wHxvg== - dependencies: - "@babel/helper-member-expression-to-functions" "^7.25.9" - "@babel/helper-optimise-call-expression" "^7.25.9" - "@babel/traverse" "^7.26.5" - -"@babel/helper-skip-transparent-expression-wrappers@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.25.9.tgz#0b2e1b62d560d6b1954893fd2b705dc17c91f0c9" - integrity sha512-K4Du3BFa3gvyhzgPcntrkDgZzQaq6uozzcpGbOO1OEJaI+EJdqWIMTLgFgQf6lrfiDFo5FU+BxKepI9RmZqahA== - dependencies: - "@babel/traverse" "^7.25.9" - "@babel/types" "^7.25.9" - -"@babel/helper-string-parser@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz#1aabb72ee72ed35789b4bbcad3ca2862ce614e8c" - integrity sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA== - -"@babel/helper-validator-identifier@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz#24b64e2c3ec7cd3b3c547729b8d16871f22cbdc7" - integrity sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ== - -"@babel/helper-validator-option@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.25.9.tgz#86e45bd8a49ab7e03f276577f96179653d41da72" - integrity sha512-e/zv1co8pp55dNdEcCynfj9X7nyUKUXoUEwfXqaZt0omVOmDe9oOTdKStH4GmAw6zxMFs50ZayuMfHDKlO7Tfw== - -"@babel/helper-wrap-function@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-wrap-function/-/helper-wrap-function-7.25.9.tgz#d99dfd595312e6c894bd7d237470025c85eea9d0" - integrity sha512-ETzz9UTjQSTmw39GboatdymDq4XIQbR8ySgVrylRhPOFpsd+JrKHIuF0de7GCWmem+T4uC5z7EZguod7Wj4A4g== - dependencies: - "@babel/template" "^7.25.9" - "@babel/traverse" "^7.25.9" - "@babel/types" "^7.25.9" - -"@babel/helpers@^7.26.7": - version "7.26.7" - resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.26.7.tgz#fd1d2a7c431b6e39290277aacfd8367857c576a4" - integrity sha512-8NHiL98vsi0mbPQmYAGWwfcFaOy4j2HY49fXJCfuDcdE7fMIsH9a7GdaeXpIBsbT7307WU8KCMp5pUVDNL4f9A== - dependencies: - "@babel/template" "^7.25.9" - "@babel/types" "^7.26.7" + resolve "^1.22.10" + +"@babel/helper-globals@^7.28.0": + version "7.28.0" + resolved "https://registry.yarnpkg.com/@babel/helper-globals/-/helper-globals-7.28.0.tgz#b9430df2aa4e17bc28665eadeae8aa1d985e6674" + integrity sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw== + +"@babel/helper-member-expression-to-functions@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.27.1.tgz#ea1211276be93e798ce19037da6f06fbb994fa44" + integrity sha512-E5chM8eWjTp/aNoVpcbfM7mLxu9XGLWYise2eBKGQomAk/Mb4XoxyqXTZbuTohbsl8EKqdlMhnDI2CCLfcs9wA== + dependencies: + "@babel/traverse" "^7.27.1" + "@babel/types" "^7.27.1" + +"@babel/helper-module-imports@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz#7ef769a323e2655e126673bb6d2d6913bbead204" + integrity sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w== + dependencies: + "@babel/traverse" "^7.27.1" + "@babel/types" "^7.27.1" + +"@babel/helper-module-transforms@^7.27.1", "@babel/helper-module-transforms@^7.27.3": + version "7.27.3" + resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.27.3.tgz#db0bbcfba5802f9ef7870705a7ef8788508ede02" + integrity sha512-dSOvYwvyLsWBeIRyOeHXp5vPj5l1I011r52FM1+r1jCERv+aFXYk4whgQccYEGYxK2H3ZAIA8nuPkQ0HaUo3qg== + dependencies: + "@babel/helper-module-imports" "^7.27.1" + "@babel/helper-validator-identifier" "^7.27.1" + "@babel/traverse" "^7.27.3" + +"@babel/helper-optimise-call-expression@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.27.1.tgz#c65221b61a643f3e62705e5dd2b5f115e35f9200" + integrity sha512-URMGH08NzYFhubNSGJrpUEphGKQwMQYBySzat5cAByY1/YgIRkULnIy3tAMeszlL/so2HbeilYloUmSpd7GdVw== + dependencies: + "@babel/types" "^7.27.1" + +"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.18.6", "@babel/helper-plugin-utils@^7.27.1", "@babel/helper-plugin-utils@^7.8.0": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.27.1.tgz#ddb2f876534ff8013e6c2b299bf4d39b3c51d44c" + integrity sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw== + +"@babel/helper-remap-async-to-generator@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.27.1.tgz#4601d5c7ce2eb2aea58328d43725523fcd362ce6" + integrity sha512-7fiA521aVw8lSPeI4ZOD3vRFkoqkJcS+z4hFo82bFSH/2tNd6eJ5qCVMS5OzDmZh/kaHQeBaeyxK6wljcPtveA== + dependencies: + "@babel/helper-annotate-as-pure" "^7.27.1" + "@babel/helper-wrap-function" "^7.27.1" + "@babel/traverse" "^7.27.1" + +"@babel/helper-replace-supers@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.27.1.tgz#b1ed2d634ce3bdb730e4b52de30f8cccfd692bc0" + integrity sha512-7EHz6qDZc8RYS5ElPoShMheWvEgERonFCs7IAonWLLUTXW59DP14bCZt89/GKyreYn8g3S83m21FelHKbeDCKA== + dependencies: + "@babel/helper-member-expression-to-functions" "^7.27.1" + "@babel/helper-optimise-call-expression" "^7.27.1" + "@babel/traverse" "^7.27.1" + +"@babel/helper-skip-transparent-expression-wrappers@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.27.1.tgz#62bb91b3abba8c7f1fec0252d9dbea11b3ee7a56" + integrity sha512-Tub4ZKEXqbPjXgWLl2+3JpQAYBJ8+ikpQ2Ocj/q/r0LwE3UhENh7EUabyHjz2kCEsrRY83ew2DQdHluuiDQFzg== + dependencies: + "@babel/traverse" "^7.27.1" + "@babel/types" "^7.27.1" + +"@babel/helper-string-parser@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz#54da796097ab19ce67ed9f88b47bb2ec49367687" + integrity sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA== + +"@babel/helper-validator-identifier@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.27.1.tgz#a7054dcc145a967dd4dc8fee845a57c1316c9df8" + integrity sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow== + +"@babel/helper-validator-option@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz#fa52f5b1e7db1ab049445b421c4471303897702f" + integrity sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg== + +"@babel/helper-wrap-function@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-wrap-function/-/helper-wrap-function-7.27.1.tgz#b88285009c31427af318d4fe37651cd62a142409" + integrity sha512-NFJK2sHUvrjo8wAU/nQTWU890/zB2jj0qBcCbZbbf+005cAsv6tMjXz31fBign6M5ov1o0Bllu+9nbqkfsjjJQ== + dependencies: + "@babel/template" "^7.27.1" + "@babel/traverse" "^7.27.1" + "@babel/types" "^7.27.1" + +"@babel/helpers@^7.27.6": + version "7.28.2" + resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.28.2.tgz#80f0918fecbfebea9af856c419763230040ee850" + integrity sha512-/V9771t+EgXz62aCcyofnQhGM8DQACbRhvzKFsXKC9QM+5MadF8ZmIm0crDMaz3+o0h0zXfJnd4EhbYbxsrcFw== + dependencies: + "@babel/template" "^7.27.2" + "@babel/types" "^7.28.2" -"@babel/parser@^7.1.0", "@babel/parser@^7.14.7", "@babel/parser@^7.20.7", "@babel/parser@^7.25.9", "@babel/parser@^7.26.5", "@babel/parser@^7.26.7": - version "7.26.7" - resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.26.7.tgz#e114cd099e5f7d17b05368678da0fb9f69b3385c" - integrity sha512-kEvgGGgEjRUutvdVvZhbn/BxVt+5VSpwXz1j3WYXQbXDo8KzFOPNG2GQbdAiNq8g6wn1yKk7C/qrke03a84V+w== +"@babel/parser@^7.1.0", "@babel/parser@^7.14.7", "@babel/parser@^7.20.7", "@babel/parser@^7.27.2", "@babel/parser@^7.28.0": + version "7.28.0" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.28.0.tgz#979829fbab51a29e13901e5a80713dbcb840825e" + integrity sha512-jVZGvOxOuNSsuQuLRTh13nU0AogFlw32w/MT+LV6D3sP5WdbW61E77RnkbaO2dUvmPAYrBDJXGn5gGS6tH4j8g== dependencies: - "@babel/types" "^7.26.7" - -"@babel/plugin-bugfix-firefox-class-in-computed-class-key@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-firefox-class-in-computed-class-key/-/plugin-bugfix-firefox-class-in-computed-class-key-7.25.9.tgz#cc2e53ebf0a0340777fff5ed521943e253b4d8fe" - integrity sha512-ZkRyVkThtxQ/J6nv3JFYv1RYY+JT5BvU0y3k5bWrmuG4woXypRa4PXmm9RhOwodRkYFWqC0C0cqcJ4OqR7kW+g== + "@babel/types" "^7.28.0" + +"@babel/plugin-bugfix-firefox-class-in-computed-class-key@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-firefox-class-in-computed-class-key/-/plugin-bugfix-firefox-class-in-computed-class-key-7.27.1.tgz#61dd8a8e61f7eb568268d1b5f129da3eee364bf9" + integrity sha512-QPG3C9cCVRQLxAVwmefEmwdTanECuUBMQZ/ym5kiw3XKCGA7qkuQLcjWWHcrD/GKbn/WmJwaezfuuAOcyKlRPA== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/traverse" "^7.25.9" - -"@babel/plugin-bugfix-safari-class-field-initializer-scope@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-safari-class-field-initializer-scope/-/plugin-bugfix-safari-class-field-initializer-scope-7.25.9.tgz#af9e4fb63ccb8abcb92375b2fcfe36b60c774d30" - integrity sha512-MrGRLZxLD/Zjj0gdU15dfs+HH/OXvnw/U4jJD8vpcP2CJQapPEv1IWwjc/qMg7ItBlPwSv1hRBbb7LeuANdcnw== + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/traverse" "^7.27.1" + +"@babel/plugin-bugfix-safari-class-field-initializer-scope@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-safari-class-field-initializer-scope/-/plugin-bugfix-safari-class-field-initializer-scope-7.27.1.tgz#43f70a6d7efd52370eefbdf55ae03d91b293856d" + integrity sha512-qNeq3bCKnGgLkEXUuFry6dPlGfCdQNZbn7yUAPCInwAJHMU7THJfrBSozkcWq5sNM6RcF3S8XyQL2A52KNR9IA== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - -"@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.25.9.tgz#e8dc26fcd616e6c5bf2bd0d5a2c151d4f92a9137" - integrity sha512-2qUwwfAFpJLZqxd02YW9btUCZHl+RFvdDkNfZwaIJrvB8Tesjsk8pEQkTvGwZXLqXUx/2oyY3ySRhm6HOXuCug== - dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" + +"@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.27.1.tgz#beb623bd573b8b6f3047bd04c32506adc3e58a72" + integrity sha512-g4L7OYun04N1WyqMNjldFwlfPCLVkgB54A/YCXICZYBsvJJE3kByKv9c9+R/nAfmIfjl2rKYLNyMHboYbZaWaA== + dependencies: + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.25.9.tgz#807a667f9158acac6f6164b4beb85ad9ebc9e1d1" - integrity sha512-6xWgLZTJXwilVjlnV7ospI3xi+sl8lN8rXXbBD6vYn3UYDlGsag8wrZkKcSI8G6KgqKP7vNFaDgeDnfAABq61g== +"@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.27.1.tgz#e134a5479eb2ba9c02714e8c1ebf1ec9076124fd" + integrity sha512-oO02gcONcD5O1iTLi/6frMJBIwWEHceWGSGqrpCmEL8nogiS6J9PBlE48CaK20/Jx1LuRml9aDftLgdjXT8+Cw== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/helper-skip-transparent-expression-wrappers" "^7.25.9" - "@babel/plugin-transform-optional-chaining" "^7.25.9" - -"@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly/-/plugin-bugfix-v8-static-class-fields-redefine-readonly-7.25.9.tgz#de7093f1e7deaf68eadd7cc6b07f2ab82543269e" - integrity sha512-aLnMXYPnzwwqhYSCyXfKkIkYgJ8zv9RK+roo9DkTXz38ynIhd9XCbN08s3MGvqL2MYGVUGdRQLL/JqBIeJhJBg== - dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/traverse" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/helper-skip-transparent-expression-wrappers" "^7.27.1" + "@babel/plugin-transform-optional-chaining" "^7.27.1" + +"@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly/-/plugin-bugfix-v8-static-class-fields-redefine-readonly-7.27.1.tgz#bb1c25af34d75115ce229a1de7fa44bf8f955670" + integrity sha512-6BpaYGDavZqkI6yT+KSPdpZFfpnd68UKXbcjI9pJ13pvHhPrCKWOOLp+ysvMeA+DxnhuPpgIaRpxRxo5A9t5jw== + dependencies: + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/traverse" "^7.27.1" "@babel/plugin-proposal-private-property-in-object@7.21.0-placeholder-for-preset-env.2": version "7.21.0-placeholder-for-preset-env.2" @@ -281,19 +286,19 @@ dependencies: "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-syntax-import-assertions@^7.26.0": - version "7.26.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.26.0.tgz#620412405058efa56e4a564903b79355020f445f" - integrity sha512-QCWT5Hh830hK5EQa7XzuqIkQU9tT/whqbDz7kuaZMHFl1inRRg7JnuAEOQ0Ur0QUl0NufCk1msK2BeY79Aj/eg== +"@babel/plugin-syntax-import-assertions@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.27.1.tgz#88894aefd2b03b5ee6ad1562a7c8e1587496aecd" + integrity sha512-UT/Jrhw57xg4ILHLFnzFpPDlMbcdEicaAtjPQpbj9wa8T4r5KVWCimHcL/460g8Ht0DMxDyjsLgiWSkVjnwPFg== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-syntax-import-attributes@^7.24.7", "@babel/plugin-syntax-import-attributes@^7.26.0": - version "7.26.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.26.0.tgz#3b1412847699eea739b4f2602c74ce36f6b0b0f7" - integrity sha512-e2dttdsJ1ZTpi3B9UYGLw41hifAubg19AtCu/2I/F1QNVclOBr1dYpTdmdyZ84Xiz43BS/tCUkMAZNLv12Pi+A== +"@babel/plugin-syntax-import-attributes@^7.24.7", "@babel/plugin-syntax-import-attributes@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.27.1.tgz#34c017d54496f9b11b61474e7ea3dfd5563ffe07" + integrity sha512-oFT0FrKHgF53f4vOsZGi2Hh3I35PfSmVs4IBFLFj4dnafP+hIWDLg3VyKmUHfLoLHlyxY4C7DGtmHuJgn+IGww== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" "@babel/plugin-syntax-import-meta@^7.10.4": version "7.10.4" @@ -309,12 +314,12 @@ dependencies: "@babel/helper-plugin-utils" "^7.8.0" -"@babel/plugin-syntax-jsx@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.25.9.tgz#a34313a178ea56f1951599b929c1ceacee719290" - integrity sha512-ld6oezHQMZsZfp6pWtbjaNDF2tiiCYYDqQszHt5VV437lewP9aSi2Of99CK0D0XB21k7FLgnLcmQKyKzynfeAA== +"@babel/plugin-syntax-jsx@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.27.1.tgz#2f9beb5eff30fa507c5532d107daac7b888fa34c" + integrity sha512-y8YTNIeKoyhGd9O0Jiyzyyqk8gdjnumGTQPsz0xOZOQ2RmkVJeZ1vmmfIvFEKqucBG6axJGBZDE/7iI5suUI/w== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" "@babel/plugin-syntax-logical-assignment-operators@^7.10.4": version "7.10.4" @@ -372,12 +377,12 @@ dependencies: "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-syntax-typescript@^7.25.9", "@babel/plugin-syntax-typescript@^7.7.2": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.25.9.tgz#67dda2b74da43727cf21d46cf9afef23f4365399" - integrity sha512-hjMgRy5hb8uJJjUcdWunWVcoi9bGpJp8p5Ol1229PoN6aytsLwNMgmdftO23wnCLMfVmTwZDWMPNq/D1SY60JQ== +"@babel/plugin-syntax-typescript@^7.27.1", "@babel/plugin-syntax-typescript@^7.7.2": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.27.1.tgz#5147d29066a793450f220c63fa3a9431b7e6dd18" + integrity sha512-xfYCBMxveHrRMnAWl1ZlPXOZjzkN82THFvLhQhFXFt81Z5HnN+EtUkZhv/zcKpmT3fzmWZB0ywiBrbC3vogbwQ== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" "@babel/plugin-syntax-unicode-sets-regex@^7.18.6": version "7.18.6" @@ -387,477 +392,488 @@ "@babel/helper-create-regexp-features-plugin" "^7.18.6" "@babel/helper-plugin-utils" "^7.18.6" -"@babel/plugin-transform-arrow-functions@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.25.9.tgz#7821d4410bee5daaadbb4cdd9a6649704e176845" - integrity sha512-6jmooXYIwn9ca5/RylZADJ+EnSxVUS5sjeJ9UPk6RWRzXCmOJCy6dqItPJFpw2cuCangPK4OYr5uhGKcmrm5Qg== +"@babel/plugin-transform-arrow-functions@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.27.1.tgz#6e2061067ba3ab0266d834a9f94811196f2aba9a" + integrity sha512-8Z4TGic6xW70FKThA5HYEKKyBpOOsucTOD1DjU3fZxDg+K3zBJcXMFnt/4yQiZnf5+MiOMSXQ9PaEK/Ilh1DeA== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-async-generator-functions@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.25.9.tgz#1b18530b077d18a407c494eb3d1d72da505283a2" - integrity sha512-RXV6QAzTBbhDMO9fWwOmwwTuYaiPbggWQ9INdZqAYeSHyG7FzQ+nOZaUUjNwKv9pV3aE4WFqFm1Hnbci5tBCAw== +"@babel/plugin-transform-async-generator-functions@^7.28.0": + version "7.28.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.28.0.tgz#1276e6c7285ab2cd1eccb0bc7356b7a69ff842c2" + integrity sha512-BEOdvX4+M765icNPZeidyADIvQ1m1gmunXufXxvRESy/jNNyfovIqUyE7MVgGBjWktCoJlzvFA1To2O4ymIO3Q== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/helper-remap-async-to-generator" "^7.25.9" - "@babel/traverse" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/helper-remap-async-to-generator" "^7.27.1" + "@babel/traverse" "^7.28.0" -"@babel/plugin-transform-async-to-generator@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.25.9.tgz#c80008dacae51482793e5a9c08b39a5be7e12d71" - integrity sha512-NT7Ejn7Z/LjUH0Gv5KsBCxh7BH3fbLTV0ptHvpeMvrt3cPThHfJfst9Wrb7S8EvJ7vRTFI7z+VAvFVEQn/m5zQ== +"@babel/plugin-transform-async-to-generator@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.27.1.tgz#9a93893b9379b39466c74474f55af03de78c66e7" + integrity sha512-NREkZsZVJS4xmTr8qzE5y8AfIPqsdQfRuUiLRTEzb7Qii8iFWCyDKaUV2c0rCuh4ljDZ98ALHP/PetiBV2nddA== dependencies: - "@babel/helper-module-imports" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/helper-remap-async-to-generator" "^7.25.9" + "@babel/helper-module-imports" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/helper-remap-async-to-generator" "^7.27.1" -"@babel/plugin-transform-block-scoped-functions@^7.26.5": - version "7.26.5" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.26.5.tgz#3dc4405d31ad1cbe45293aa57205a6e3b009d53e" - integrity sha512-chuTSY+hq09+/f5lMj8ZSYgCFpppV2CbYrhNFJ1BFoXpiWPnnAb7R0MqrafCpN8E1+YRrtM1MXZHJdIx8B6rMQ== +"@babel/plugin-transform-block-scoped-functions@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.27.1.tgz#558a9d6e24cf72802dd3b62a4b51e0d62c0f57f9" + integrity sha512-cnqkuOtZLapWYZUYM5rVIdv1nXYuFVIltZ6ZJ7nIj585QsjKM5dhL2Fu/lICXZ1OyIAFc7Qy+bvDAtTXqGrlhg== dependencies: - "@babel/helper-plugin-utils" "^7.26.5" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-block-scoping@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.25.9.tgz#c33665e46b06759c93687ca0f84395b80c0473a1" - integrity sha512-1F05O7AYjymAtqbsFETboN1NvBdcnzMerO+zlMyJBEz6WkMdejvGWw9p05iTSjC85RLlBseHHQpYaM4gzJkBGg== +"@babel/plugin-transform-block-scoping@^7.28.0": + version "7.28.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.28.0.tgz#e7c50cbacc18034f210b93defa89638666099451" + integrity sha512-gKKnwjpdx5sER/wl0WN0efUBFzF/56YZO0RJrSYP4CljXnP31ByY7fol89AzomdlLNzI36AvOTmYHsnZTCkq8Q== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-class-properties@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.25.9.tgz#a8ce84fedb9ad512549984101fa84080a9f5f51f" - integrity sha512-bbMAII8GRSkcd0h0b4X+36GksxuheLFjP65ul9w6C3KgAamI3JqErNgSrosX6ZPj+Mpim5VvEbawXxJCyEUV3Q== +"@babel/plugin-transform-class-properties@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.27.1.tgz#dd40a6a370dfd49d32362ae206ddaf2bb082a925" + integrity sha512-D0VcalChDMtuRvJIu3U/fwWjf8ZMykz5iZsg77Nuj821vCKI3zCyRLwRdWbsuJ/uRwZhZ002QtCqIkwC/ZkvbA== dependencies: - "@babel/helper-create-class-features-plugin" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-create-class-features-plugin" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-class-static-block@^7.26.0": - version "7.26.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.26.0.tgz#6c8da219f4eb15cae9834ec4348ff8e9e09664a0" - integrity sha512-6J2APTs7BDDm+UMqP1useWqhcRAXo0WIoVj26N7kPFB6S73Lgvyka4KTZYIxtgYXiN5HTyRObA72N2iu628iTQ== +"@babel/plugin-transform-class-static-block@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.27.1.tgz#7e920d5625b25bbccd3061aefbcc05805ed56ce4" + integrity sha512-s734HmYU78MVzZ++joYM+NkJusItbdRcbm+AGRgJCt3iA+yux0QpD9cBVdz3tKyrjVYWRl7j0mHSmv4lhV0aoA== dependencies: - "@babel/helper-create-class-features-plugin" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-create-class-features-plugin" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-classes@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.25.9.tgz#7152457f7880b593a63ade8a861e6e26a4469f52" - integrity sha512-mD8APIXmseE7oZvZgGABDyM34GUmK45Um2TXiBUt7PnuAxrgoSVf123qUzPxEr/+/BHrRn5NMZCdE2m/1F8DGg== +"@babel/plugin-transform-classes@^7.28.0": + version "7.28.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.28.0.tgz#12fa46cffc32a6e084011b650539e880add8a0f8" + integrity sha512-IjM1IoJNw72AZFlj33Cu8X0q2XK/6AaVC3jQu+cgQ5lThWD5ajnuUAml80dqRmOhmPkTH8uAwnpMu9Rvj0LTRA== dependencies: - "@babel/helper-annotate-as-pure" "^7.25.9" - "@babel/helper-compilation-targets" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/helper-replace-supers" "^7.25.9" - "@babel/traverse" "^7.25.9" - globals "^11.1.0" + "@babel/helper-annotate-as-pure" "^7.27.3" + "@babel/helper-compilation-targets" "^7.27.2" + "@babel/helper-globals" "^7.28.0" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/helper-replace-supers" "^7.27.1" + "@babel/traverse" "^7.28.0" -"@babel/plugin-transform-computed-properties@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.25.9.tgz#db36492c78460e534b8852b1d5befe3c923ef10b" - integrity sha512-HnBegGqXZR12xbcTHlJ9HGxw1OniltT26J5YpfruGqtUHlz/xKf/G2ak9e+t0rVqrjXa9WOhvYPz1ERfMj23AA== +"@babel/plugin-transform-computed-properties@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.27.1.tgz#81662e78bf5e734a97982c2b7f0a793288ef3caa" + integrity sha512-lj9PGWvMTVksbWiDT2tW68zGS/cyo4AkZ/QTp0sQT0mjPopCmrSkzxeXkznjqBxzDI6TclZhOJbBmbBLjuOZUw== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/template" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/template" "^7.27.1" -"@babel/plugin-transform-destructuring@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.25.9.tgz#966ea2595c498224340883602d3cfd7a0c79cea1" - integrity sha512-WkCGb/3ZxXepmMiX101nnGiU+1CAdut8oHyEOHxkKuS1qKpU2SMXE2uSvfz8PBuLd49V6LEsbtyPhWC7fnkgvQ== +"@babel/plugin-transform-destructuring@^7.28.0": + version "7.28.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.28.0.tgz#0f156588f69c596089b7d5b06f5af83d9aa7f97a" + integrity sha512-v1nrSMBiKcodhsyJ4Gf+Z0U/yawmJDBOTpEB3mcQY52r9RIyPneGyAS/yM6seP/8I+mWI3elOMtT5dB8GJVs+A== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/traverse" "^7.28.0" -"@babel/plugin-transform-dotall-regex@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.25.9.tgz#bad7945dd07734ca52fe3ad4e872b40ed09bb09a" - integrity sha512-t7ZQ7g5trIgSRYhI9pIJtRl64KHotutUJsh4Eze5l7olJv+mRSg4/MmbZ0tv1eeqRbdvo/+trvJD/Oc5DmW2cA== +"@babel/plugin-transform-dotall-regex@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.27.1.tgz#aa6821de864c528b1fecf286f0a174e38e826f4d" + integrity sha512-gEbkDVGRvjj7+T1ivxrfgygpT7GUd4vmODtYpbs0gZATdkX8/iSnOtZSxiZnsgm1YjTgjI6VKBGSJJevkrclzw== dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-create-regexp-features-plugin" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-duplicate-keys@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.25.9.tgz#8850ddf57dce2aebb4394bb434a7598031059e6d" - integrity sha512-LZxhJ6dvBb/f3x8xwWIuyiAHy56nrRG3PeYTpBkkzkYRRQ6tJLu68lEF5VIqMUZiAV7a8+Tb78nEoMCMcqjXBw== +"@babel/plugin-transform-duplicate-keys@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.27.1.tgz#f1fbf628ece18e12e7b32b175940e68358f546d1" + integrity sha512-MTyJk98sHvSs+cvZ4nOauwTTG1JeonDjSGvGGUNHreGQns+Mpt6WX/dVzWBHgg+dYZhkC4X+zTDfkTU+Vy9y7Q== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-duplicate-named-capturing-groups-regex@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-duplicate-named-capturing-groups-regex/-/plugin-transform-duplicate-named-capturing-groups-regex-7.25.9.tgz#6f7259b4de127721a08f1e5165b852fcaa696d31" - integrity sha512-0UfuJS0EsXbRvKnwcLjFtJy/Sxc5J5jhLHnFhy7u4zih97Hz6tJkLU+O+FMMrNZrosUPxDi6sYxJ/EA8jDiAog== +"@babel/plugin-transform-duplicate-named-capturing-groups-regex@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-duplicate-named-capturing-groups-regex/-/plugin-transform-duplicate-named-capturing-groups-regex-7.27.1.tgz#5043854ca620a94149372e69030ff8cb6a9eb0ec" + integrity sha512-hkGcueTEzuhB30B3eJCbCYeCaaEQOmQR0AdvzpD4LoN0GXMWzzGSuRrxR2xTnCrvNbVwK9N6/jQ92GSLfiZWoQ== dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-create-regexp-features-plugin" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-dynamic-import@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.25.9.tgz#23e917de63ed23c6600c5dd06d94669dce79f7b8" - integrity sha512-GCggjexbmSLaFhqsojeugBpeaRIgWNTcgKVq/0qIteFEqY2A+b9QidYadrWlnbWQUrW5fn+mCvf3tr7OeBFTyg== +"@babel/plugin-transform-dynamic-import@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.27.1.tgz#4c78f35552ac0e06aa1f6e3c573d67695e8af5a4" + integrity sha512-MHzkWQcEmjzzVW9j2q8LGjwGWpG2mjwaaB0BNQwst3FIjqsg8Ct/mIZlvSPJvfi9y2AC8mi/ktxbFVL9pZ1I4A== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-exponentiation-operator@^7.26.3": - version "7.26.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.26.3.tgz#e29f01b6de302c7c2c794277a48f04a9ca7f03bc" - integrity sha512-7CAHcQ58z2chuXPWblnn1K6rLDnDWieghSOEmqQsrBenH0P9InCUtOJYD89pvngljmZlJcz3fcmgYsXFNGa1ZQ== +"@babel/plugin-transform-explicit-resource-management@^7.28.0": + version "7.28.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-explicit-resource-management/-/plugin-transform-explicit-resource-management-7.28.0.tgz#45be6211b778dbf4b9d54c4e8a2b42fa72e09a1a" + integrity sha512-K8nhUcn3f6iB+P3gwCv/no7OdzOZQcKchW6N389V6PD8NUWKZHzndOd9sPDVbMoBsbmjMqlB4L9fm+fEFNVlwQ== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/plugin-transform-destructuring" "^7.28.0" -"@babel/plugin-transform-export-namespace-from@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.25.9.tgz#90745fe55053394f554e40584cda81f2c8a402a2" - integrity sha512-2NsEz+CxzJIVOPx2o9UsW1rXLqtChtLoVnwYHHiB04wS5sgn7mrV45fWMBX0Kk+ub9uXytVYfNP2HjbVbCB3Ww== +"@babel/plugin-transform-exponentiation-operator@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.27.1.tgz#fc497b12d8277e559747f5a3ed868dd8064f83e1" + integrity sha512-uspvXnhHvGKf2r4VVtBpeFnuDWsJLQ6MF6lGJLC89jBR1uoVeqM416AZtTuhTezOfgHicpJQmoD5YUakO/YmXQ== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-for-of@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.25.9.tgz#4bdc7d42a213397905d89f02350c5267866d5755" - integrity sha512-LqHxduHoaGELJl2uhImHwRQudhCM50pT46rIBNvtT/Oql3nqiS3wOwP+5ten7NpYSXrrVLgtZU3DZmPtWZo16A== +"@babel/plugin-transform-export-namespace-from@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.27.1.tgz#71ca69d3471edd6daa711cf4dfc3400415df9c23" + integrity sha512-tQvHWSZ3/jH2xuq/vZDy0jNn+ZdXJeM8gHvX4lnJmsc3+50yPlWdZXIc5ay+umX+2/tJIqHqiEqcJvxlmIvRvQ== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/helper-skip-transparent-expression-wrappers" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-function-name@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.25.9.tgz#939d956e68a606661005bfd550c4fc2ef95f7b97" - integrity sha512-8lP+Yxjv14Vc5MuWBpJsoUCd3hD6V9DgBon2FVYL4jJgbnVQ9fTgYmonchzZJOVNgzEgbxp4OwAf6xz6M/14XA== +"@babel/plugin-transform-for-of@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.27.1.tgz#bc24f7080e9ff721b63a70ac7b2564ca15b6c40a" + integrity sha512-BfbWFFEJFQzLCQ5N8VocnCtA8J1CLkNTe2Ms2wocj75dd6VpiqS5Z5quTYcUoo4Yq+DN0rtikODccuv7RU81sw== dependencies: - "@babel/helper-compilation-targets" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/traverse" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/helper-skip-transparent-expression-wrappers" "^7.27.1" -"@babel/plugin-transform-json-strings@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.25.9.tgz#c86db407cb827cded902a90c707d2781aaa89660" - integrity sha512-xoTMk0WXceiiIvsaquQQUaLLXSW1KJ159KP87VilruQm0LNNGxWzahxSS6T6i4Zg3ezp4vA4zuwiNUR53qmQAw== +"@babel/plugin-transform-function-name@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.27.1.tgz#4d0bf307720e4dce6d7c30fcb1fd6ca77bdeb3a7" + integrity sha512-1bQeydJF9Nr1eBCMMbC+hdwmRlsv5XYOMu03YSWFwNs0HsAmtSxxF1fyuYPqemVldVyFmlCU7w8UE14LupUSZQ== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-compilation-targets" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/traverse" "^7.27.1" -"@babel/plugin-transform-literals@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-literals/-/plugin-transform-literals-7.25.9.tgz#1a1c6b4d4aa59bc4cad5b6b3a223a0abd685c9de" - integrity sha512-9N7+2lFziW8W9pBl2TzaNht3+pgMIRP74zizeCSrtnSKVdUl8mAjjOP2OOVQAfZ881P2cNjDj1uAMEdeD50nuQ== +"@babel/plugin-transform-json-strings@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.27.1.tgz#a2e0ce6ef256376bd527f290da023983527a4f4c" + integrity sha512-6WVLVJiTjqcQauBhn1LkICsR2H+zm62I3h9faTDKt1qP4jn2o72tSvqMwtGFKGTpojce0gJs+76eZ2uCHRZh0Q== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-logical-assignment-operators@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.25.9.tgz#b19441a8c39a2fda0902900b306ea05ae1055db7" - integrity sha512-wI4wRAzGko551Y8eVf6iOY9EouIDTtPb0ByZx+ktDGHwv6bHFimrgJM/2T021txPZ2s4c7bqvHbd+vXG6K948Q== +"@babel/plugin-transform-literals@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-literals/-/plugin-transform-literals-7.27.1.tgz#baaefa4d10a1d4206f9dcdda50d7d5827bb70b24" + integrity sha512-0HCFSepIpLTkLcsi86GG3mTUzxV5jpmbv97hTETW3yzrAij8aqlD36toB1D0daVFJM8NK6GvKO0gslVQmm+zZA== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-member-expression-literals@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.25.9.tgz#63dff19763ea64a31f5e6c20957e6a25e41ed5de" - integrity sha512-PYazBVfofCQkkMzh2P6IdIUaCEWni3iYEerAsRWuVd8+jlM1S9S9cz1dF9hIzyoZ8IA3+OwVYIp9v9e+GbgZhA== +"@babel/plugin-transform-logical-assignment-operators@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.27.1.tgz#890cb20e0270e0e5bebe3f025b434841c32d5baa" + integrity sha512-SJvDs5dXxiae4FbSL1aBJlG4wvl594N6YEVVn9e3JGulwioy6z3oPjx/sQBO3Y4NwUu5HNix6KJ3wBZoewcdbw== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-modules-amd@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.25.9.tgz#49ba478f2295101544abd794486cd3088dddb6c5" - integrity sha512-g5T11tnI36jVClQlMlt4qKDLlWnG5pP9CSM4GhdRciTNMRgkfpo5cR6b4rGIOYPgRRuFAvwjPQ/Yk+ql4dyhbw== +"@babel/plugin-transform-member-expression-literals@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.27.1.tgz#37b88ba594d852418e99536f5612f795f23aeaf9" + integrity sha512-hqoBX4dcZ1I33jCSWcXrP+1Ku7kdqXf1oeah7ooKOIiAdKQ+uqftgCFNOSzA5AMS2XIHEYeGFg4cKRCdpxzVOQ== dependencies: - "@babel/helper-module-transforms" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-modules-commonjs@^7.25.9", "@babel/plugin-transform-modules-commonjs@^7.26.3": - version "7.26.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.26.3.tgz#8f011d44b20d02c3de44d8850d971d8497f981fb" - integrity sha512-MgR55l4q9KddUDITEzEFYn5ZsGDXMSsU9E+kh7fjRXTIC3RHqfCo8RPRbyReYJh44HQ/yomFkqbOFohXvDCiIQ== +"@babel/plugin-transform-modules-amd@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.27.1.tgz#a4145f9d87c2291fe2d05f994b65dba4e3e7196f" + integrity sha512-iCsytMg/N9/oFq6n+gFTvUYDZQOMK5kEdeYxmxt91fcJGycfxVP9CnrxoliM0oumFERba2i8ZtwRUCMhvP1LnA== dependencies: - "@babel/helper-module-transforms" "^7.26.0" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-module-transforms" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-modules-systemjs@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.25.9.tgz#8bd1b43836269e3d33307151a114bcf3ba6793f8" - integrity sha512-hyss7iIlH/zLHaehT+xwiymtPOpsiwIIRlCAOwBB04ta5Tt+lNItADdlXw3jAWZ96VJ2jlhl/c+PNIQPKNfvcA== +"@babel/plugin-transform-modules-commonjs@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.27.1.tgz#8e44ed37c2787ecc23bdc367f49977476614e832" + integrity sha512-OJguuwlTYlN0gBZFRPqwOGNWssZjfIUdS7HMYtN8c1KmwpwHFBwTeFZrg9XZa+DFTitWOW5iTAG7tyCUPsCCyw== dependencies: - "@babel/helper-module-transforms" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/helper-validator-identifier" "^7.25.9" - "@babel/traverse" "^7.25.9" + "@babel/helper-module-transforms" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-modules-umd@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.25.9.tgz#6710079cdd7c694db36529a1e8411e49fcbf14c9" - integrity sha512-bS9MVObUgE7ww36HEfwe6g9WakQ0KF07mQF74uuXdkoziUPfKyu/nIm663kz//e5O1nPInPFx36z7WJmJ4yNEw== +"@babel/plugin-transform-modules-systemjs@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.27.1.tgz#00e05b61863070d0f3292a00126c16c0e024c4ed" + integrity sha512-w5N1XzsRbc0PQStASMksmUeqECuzKuTJer7kFagK8AXgpCMkeDMO5S+aaFb7A51ZYDF7XI34qsTX+fkHiIm5yA== dependencies: - "@babel/helper-module-transforms" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-module-transforms" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/helper-validator-identifier" "^7.27.1" + "@babel/traverse" "^7.27.1" -"@babel/plugin-transform-named-capturing-groups-regex@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.25.9.tgz#454990ae6cc22fd2a0fa60b3a2c6f63a38064e6a" - integrity sha512-oqB6WHdKTGl3q/ItQhpLSnWWOpjUJLsOCLVyeFgeTktkBSCiurvPOsyt93gibI9CmuKvTUEtWmG5VhZD+5T/KA== +"@babel/plugin-transform-modules-umd@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.27.1.tgz#63f2cf4f6dc15debc12f694e44714863d34cd334" + integrity sha512-iQBE/xC5BV1OxJbp6WG7jq9IWiD+xxlZhLrdwpPkTX3ydmXdvoCpyfJN7acaIBZaOqTfr76pgzqBJflNbeRK+w== dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-module-transforms" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-new-target@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.25.9.tgz#42e61711294b105c248336dcb04b77054ea8becd" - integrity sha512-U/3p8X1yCSoKyUj2eOBIx3FOn6pElFOKvAAGf8HTtItuPyB+ZeOqfn+mvTtg9ZlOAjsPdK3ayQEjqHjU/yLeVQ== +"@babel/plugin-transform-named-capturing-groups-regex@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.27.1.tgz#f32b8f7818d8fc0cc46ee20a8ef75f071af976e1" + integrity sha512-SstR5JYy8ddZvD6MhV0tM/j16Qds4mIpJTOd1Yu9J9pJjH93bxHECF7pgtc28XvkzTD6Pxcm/0Z73Hvk7kb3Ng== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-create-regexp-features-plugin" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-nullish-coalescing-operator@^7.26.6": - version "7.26.6" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.26.6.tgz#fbf6b3c92cb509e7b319ee46e3da89c5bedd31fe" - integrity sha512-CKW8Vu+uUZneQCPtXmSBUC6NCAUdya26hWCElAWh5mVSlSRsmiCPUUDKb3Z0szng1hiAJa098Hkhg9o4SE35Qw== +"@babel/plugin-transform-new-target@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.27.1.tgz#259c43939728cad1706ac17351b7e6a7bea1abeb" + integrity sha512-f6PiYeqXQ05lYq3TIfIDu/MtliKUbNwkGApPUvyo6+tc7uaR4cPjPe7DFPr15Uyycg2lZU6btZ575CuQoYh7MQ== dependencies: - "@babel/helper-plugin-utils" "^7.26.5" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-numeric-separator@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.25.9.tgz#bfed75866261a8b643468b0ccfd275f2033214a1" - integrity sha512-TlprrJ1GBZ3r6s96Yq8gEQv82s8/5HnCVHtEJScUj90thHQbwe+E5MLhi2bbNHBEJuzrvltXSru+BUxHDoog7Q== +"@babel/plugin-transform-nullish-coalescing-operator@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.27.1.tgz#4f9d3153bf6782d73dd42785a9d22d03197bc91d" + integrity sha512-aGZh6xMo6q9vq1JGcw58lZ1Z0+i0xB2x0XaauNIUXd6O1xXc3RwoWEBlsTQrY4KQ9Jf0s5rgD6SiNkaUdJegTA== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-object-rest-spread@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.25.9.tgz#0203725025074164808bcf1a2cfa90c652c99f18" - integrity sha512-fSaXafEE9CVHPweLYw4J0emp1t8zYTXyzN3UuG+lylqkvYd7RMrsOQ8TYx5RF231be0vqtFC6jnx3UmpJmKBYg== +"@babel/plugin-transform-numeric-separator@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.27.1.tgz#614e0b15cc800e5997dadd9bd6ea524ed6c819c6" + integrity sha512-fdPKAcujuvEChxDBJ5c+0BTaS6revLV7CJL08e4m3de8qJfNIuCc2nc7XJYOjBoTMJeqSmwXJ0ypE14RCjLwaw== dependencies: - "@babel/helper-compilation-targets" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/plugin-transform-parameters" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-object-super@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.25.9.tgz#385d5de135162933beb4a3d227a2b7e52bb4cf03" - integrity sha512-Kj/Gh+Rw2RNLbCK1VAWj2U48yxxqL2x0k10nPtSdRa0O2xnHXalD0s+o1A6a0W43gJ00ANo38jxkQreckOzv5A== +"@babel/plugin-transform-object-rest-spread@^7.28.0": + version "7.28.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.28.0.tgz#d23021857ffd7cd809f54d624299b8086402ed8d" + integrity sha512-9VNGikXxzu5eCiQjdE4IZn8sb9q7Xsk5EXLDBKUYg1e/Tve8/05+KJEtcxGxAgCY5t/BpKQM+JEL/yT4tvgiUA== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/helper-replace-supers" "^7.25.9" + "@babel/helper-compilation-targets" "^7.27.2" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/plugin-transform-destructuring" "^7.28.0" + "@babel/plugin-transform-parameters" "^7.27.7" + "@babel/traverse" "^7.28.0" -"@babel/plugin-transform-optional-catch-binding@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.25.9.tgz#10e70d96d52bb1f10c5caaac59ac545ea2ba7ff3" - integrity sha512-qM/6m6hQZzDcZF3onzIhZeDHDO43bkNNlOX0i8n3lR6zLbu0GN2d8qfM/IERJZYauhAHSLHy39NF0Ctdvcid7g== +"@babel/plugin-transform-object-super@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.27.1.tgz#1c932cd27bf3874c43a5cac4f43ebf970c9871b5" + integrity sha512-SFy8S9plRPbIcxlJ8A6mT/CxFdJx/c04JEctz4jf8YZaVS2px34j7NXRrlGlHkN/M2gnpL37ZpGRGVFLd3l8Ng== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/helper-replace-supers" "^7.27.1" -"@babel/plugin-transform-optional-chaining@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.25.9.tgz#e142eb899d26ef715435f201ab6e139541eee7dd" - integrity sha512-6AvV0FsLULbpnXeBjrY4dmWF8F7gf8QnvTEoO/wX/5xm/xE1Xo8oPuD3MPS+KS9f9XBEAWN7X1aWr4z9HdOr7A== +"@babel/plugin-transform-optional-catch-binding@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.27.1.tgz#84c7341ebde35ccd36b137e9e45866825072a30c" + integrity sha512-txEAEKzYrHEX4xSZN4kJ+OfKXFVSWKB2ZxM9dpcE3wT7smwkNmXo5ORRlVzMVdJbD+Q8ILTgSD7959uj+3Dm3Q== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/helper-skip-transparent-expression-wrappers" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-parameters@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.25.9.tgz#b856842205b3e77e18b7a7a1b94958069c7ba257" - integrity sha512-wzz6MKwpnshBAiRmn4jR8LYz/g8Ksg0o80XmwZDlordjwEk9SxBzTWC7F5ef1jhbrbOW2DJ5J6ayRukrJmnr0g== +"@babel/plugin-transform-optional-chaining@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.27.1.tgz#874ce3c4f06b7780592e946026eb76a32830454f" + integrity sha512-BQmKPPIuc8EkZgNKsv0X4bPmOoayeu4F1YCwx2/CfmDSXDbp7GnzlUH+/ul5VGfRg1AoFPsrIThlEBj2xb4CAg== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/helper-skip-transparent-expression-wrappers" "^7.27.1" -"@babel/plugin-transform-private-methods@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.25.9.tgz#847f4139263577526455d7d3223cd8bda51e3b57" - integrity sha512-D/JUozNpQLAPUVusvqMxyvjzllRaF8/nSrP1s2YGQT/W4LHK4xxsMcHjhOGTS01mp9Hda8nswb+FblLdJornQw== +"@babel/plugin-transform-parameters@^7.27.7": + version "7.27.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.27.7.tgz#1fd2febb7c74e7d21cf3b05f7aebc907940af53a" + integrity sha512-qBkYTYCb76RRxUM6CcZA5KRu8K4SM8ajzVeUgVdMVO9NN9uI/GaVmBg/WKJJGnNokV9SY8FxNOVWGXzqzUidBg== dependencies: - "@babel/helper-create-class-features-plugin" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-private-property-in-object@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.25.9.tgz#9c8b73e64e6cc3cbb2743633885a7dd2c385fe33" - integrity sha512-Evf3kcMqzXA3xfYJmZ9Pg1OvKdtqsDMSWBDzZOPLvHiTt36E75jLDQo5w1gtRU95Q4E5PDttrTf25Fw8d/uWLw== +"@babel/plugin-transform-private-methods@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.27.1.tgz#fdacbab1c5ed81ec70dfdbb8b213d65da148b6af" + integrity sha512-10FVt+X55AjRAYI9BrdISN9/AQWHqldOeZDUoLyif1Kn05a56xVBXb8ZouL8pZ9jem8QpXaOt8TS7RHUIS+GPA== dependencies: - "@babel/helper-annotate-as-pure" "^7.25.9" - "@babel/helper-create-class-features-plugin" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-create-class-features-plugin" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-property-literals@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.25.9.tgz#d72d588bd88b0dec8b62e36f6fda91cedfe28e3f" - integrity sha512-IvIUeV5KrS/VPavfSM/Iu+RE6llrHrYIKY1yfCzyO/lMXHQ+p7uGhonmGVisv6tSBSVgWzMBohTcvkC9vQcQFA== +"@babel/plugin-transform-private-property-in-object@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.27.1.tgz#4dbbef283b5b2f01a21e81e299f76e35f900fb11" + integrity sha512-5J+IhqTi1XPa0DXF83jYOaARrX+41gOewWbkPyjMNRDqgOCqdffGh8L3f/Ek5utaEBZExjSAzcyjmV9SSAWObQ== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-annotate-as-pure" "^7.27.1" + "@babel/helper-create-class-features-plugin" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-regenerator@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.25.9.tgz#03a8a4670d6cebae95305ac6defac81ece77740b" - integrity sha512-vwDcDNsgMPDGP0nMqzahDWE5/MLcX8sv96+wfX7as7LoF/kr97Bo/7fI00lXY4wUXYfVmwIIyG80fGZ1uvt2qg== +"@babel/plugin-transform-property-literals@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.27.1.tgz#07eafd618800591e88073a0af1b940d9a42c6424" + integrity sha512-oThy3BCuCha8kDZ8ZkgOg2exvPYUlprMukKQXI1r1pJ47NCvxfkEy8vK+r/hT9nF0Aa4H1WUPZZjHTFtAhGfmQ== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - regenerator-transform "^0.15.2" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-regexp-modifiers@^7.26.0": - version "7.26.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regexp-modifiers/-/plugin-transform-regexp-modifiers-7.26.0.tgz#2f5837a5b5cd3842a919d8147e9903cc7455b850" - integrity sha512-vN6saax7lrA2yA/Pak3sCxuD6F5InBjn9IcrIKQPjpsLvuHYLVroTxjdlVRHjjBWxKOqIwpTXDkOssYT4BFdRw== +"@babel/plugin-transform-regenerator@^7.28.0": + version "7.28.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.28.1.tgz#bde80603442ff4bb4e910bc8b35485295d556ab1" + integrity sha512-P0QiV/taaa3kXpLY+sXla5zec4E+4t4Aqc9ggHlfZ7a2cp8/x/Gv08jfwEtn9gnnYIMvHx6aoOZ8XJL8eU71Dg== dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-reserved-words@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.25.9.tgz#0398aed2f1f10ba3f78a93db219b27ef417fb9ce" - integrity sha512-7DL7DKYjn5Su++4RXu8puKZm2XBPHyjWLUidaPEkCUBbE7IPcsrkRHggAOOKydH1dASWdcUBxrkOGNxUv5P3Jg== +"@babel/plugin-transform-regexp-modifiers@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regexp-modifiers/-/plugin-transform-regexp-modifiers-7.27.1.tgz#df9ba5577c974e3f1449888b70b76169998a6d09" + integrity sha512-TtEciroaiODtXvLZv4rmfMhkCv8jx3wgKpL68PuiPh2M4fvz5jhsA7697N1gMvkvr/JTF13DrFYyEbY9U7cVPA== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-create-regexp-features-plugin" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-shorthand-properties@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.25.9.tgz#bb785e6091f99f826a95f9894fc16fde61c163f2" - integrity sha512-MUv6t0FhO5qHnS/W8XCbHmiRWOphNufpE1IVxhK5kuN3Td9FT1x4rx4K42s3RYdMXCXpfWkGSbCSd0Z64xA7Ng== +"@babel/plugin-transform-reserved-words@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.27.1.tgz#40fba4878ccbd1c56605a4479a3a891ac0274bb4" + integrity sha512-V2ABPHIJX4kC7HegLkYoDpfg9PVmuWy/i6vUM5eGK22bx4YVFD3M5F0QQnWQoDs6AGsUWTVOopBiMFQgHaSkVw== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-spread@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-spread/-/plugin-transform-spread-7.25.9.tgz#24a35153931b4ba3d13cec4a7748c21ab5514ef9" - integrity sha512-oNknIB0TbURU5pqJFVbOOFspVlrpVwo2H1+HUIsVDvp5VauGGDP1ZEvO8Nn5xyMEs3dakajOxlmkNW7kNgSm6A== +"@babel/plugin-transform-shorthand-properties@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.27.1.tgz#532abdacdec87bfee1e0ef8e2fcdee543fe32b90" + integrity sha512-N/wH1vcn4oYawbJ13Y/FxcQrWk63jhfNa7jef0ih7PHSIHX2LB7GWE1rkPrOnka9kwMxb6hMl19p7lidA+EHmQ== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/helper-skip-transparent-expression-wrappers" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-sticky-regex@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.25.9.tgz#c7f02b944e986a417817b20ba2c504dfc1453d32" - integrity sha512-WqBUSgeVwucYDP9U/xNRQam7xV8W5Zf+6Eo7T2SRVUFlhRiMNFdFz58u0KZmCVVqs2i7SHgpRnAhzRNmKfi2uA== +"@babel/plugin-transform-spread@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-spread/-/plugin-transform-spread-7.27.1.tgz#1a264d5fc12750918f50e3fe3e24e437178abb08" + integrity sha512-kpb3HUqaILBJcRFVhFUs6Trdd4mkrzcGXss+6/mxUd273PfbWqSDHRzMT2234gIg2QYfAjvXLSquP1xECSg09Q== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/helper-skip-transparent-expression-wrappers" "^7.27.1" -"@babel/plugin-transform-template-literals@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.25.9.tgz#6dbd4a24e8fad024df76d1fac6a03cf413f60fe1" - integrity sha512-o97AE4syN71M/lxrCtQByzphAdlYluKPDBzDVzMmfCobUjjhAryZV0AIpRPrxN0eAkxXO6ZLEScmt+PNhj2OTw== +"@babel/plugin-transform-sticky-regex@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.27.1.tgz#18984935d9d2296843a491d78a014939f7dcd280" + integrity sha512-lhInBO5bi/Kowe2/aLdBAawijx+q1pQzicSgnkB6dUPc1+RC8QmJHKf2OjvU+NZWitguJHEaEmbV6VWEouT58g== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-typeof-symbol@^7.26.7": - version "7.26.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.26.7.tgz#d0e33acd9223744c1e857dbd6fa17bd0a3786937" - integrity sha512-jfoTXXZTgGg36BmhqT3cAYK5qkmqvJpvNrPhaK/52Vgjhw4Rq29s9UqpWWV0D6yuRmgiFH/BUVlkl96zJWqnaw== +"@babel/plugin-transform-template-literals@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.27.1.tgz#1a0eb35d8bb3e6efc06c9fd40eb0bcef548328b8" + integrity sha512-fBJKiV7F2DxZUkg5EtHKXQdbsbURW3DZKQUWphDum0uRP6eHGGa/He9mc0mypL680pb+e/lDIthRohlv8NCHkg== dependencies: - "@babel/helper-plugin-utils" "^7.26.5" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-typescript@^7.25.9": - version "7.26.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.26.7.tgz#64339515ea3eff610160f62499c3ef437d0ac83d" - integrity sha512-5cJurntg+AT+cgelGP9Bt788DKiAw9gIMSMU2NJrLAilnj0m8WZWUNZPSLOmadYsujHutpgElO+50foX+ib/Wg== +"@babel/plugin-transform-typeof-symbol@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.27.1.tgz#70e966bb492e03509cf37eafa6dcc3051f844369" + integrity sha512-RiSILC+nRJM7FY5srIyc4/fGIwUhyDuuBSdWn4y6yT6gm652DpCHZjIipgn6B7MQ1ITOUnAKWixEUjQRIBIcLw== dependencies: - "@babel/helper-annotate-as-pure" "^7.25.9" - "@babel/helper-create-class-features-plugin" "^7.25.9" - "@babel/helper-plugin-utils" "^7.26.5" - "@babel/helper-skip-transparent-expression-wrappers" "^7.25.9" - "@babel/plugin-syntax-typescript" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-unicode-escapes@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.25.9.tgz#a75ef3947ce15363fccaa38e2dd9bc70b2788b82" - integrity sha512-s5EDrE6bW97LtxOcGj1Khcx5AaXwiMmi4toFWRDP9/y0Woo6pXC+iyPu/KuhKtfSrNFd7jJB+/fkOtZy6aIC6Q== +"@babel/plugin-transform-typescript@^7.27.1": + version "7.28.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.28.0.tgz#796cbd249ab56c18168b49e3e1d341b72af04a6b" + integrity sha512-4AEiDEBPIZvLQaWlc9liCavE0xRM0dNca41WtBeM3jgFptfUOSG9z0uteLhq6+3rq+WB6jIvUwKDTpXEHPJ2Vg== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-annotate-as-pure" "^7.27.3" + "@babel/helper-create-class-features-plugin" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/helper-skip-transparent-expression-wrappers" "^7.27.1" + "@babel/plugin-syntax-typescript" "^7.27.1" -"@babel/plugin-transform-unicode-property-regex@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.25.9.tgz#a901e96f2c1d071b0d1bb5dc0d3c880ce8f53dd3" - integrity sha512-Jt2d8Ga+QwRluxRQ307Vlxa6dMrYEMZCgGxoPR8V52rxPyldHu3hdlHspxaqYmE7oID5+kB+UKUB/eWS+DkkWg== +"@babel/plugin-transform-unicode-escapes@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.27.1.tgz#3e3143f8438aef842de28816ece58780190cf806" + integrity sha512-Ysg4v6AmF26k9vpfFuTZg8HRfVWzsh1kVfowA23y9j/Gu6dOuahdUVhkLqpObp3JIv27MLSii6noRnuKN8H0Mg== dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-unicode-regex@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.25.9.tgz#5eae747fe39eacf13a8bd006a4fb0b5d1fa5e9b1" - integrity sha512-yoxstj7Rg9dlNn9UQxzk4fcNivwv4nUYz7fYXBaKxvw/lnmPuOm/ikoELygbYq68Bls3D/D+NBPHiLwZdZZ4HA== +"@babel/plugin-transform-unicode-property-regex@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.27.1.tgz#bdfe2d3170c78c5691a3c3be934c8c0087525956" + integrity sha512-uW20S39PnaTImxp39O5qFlHLS9LJEmANjMG7SxIhap8rCHqu0Ik+tLEPX5DKmHn6CsWQ7j3lix2tFOa5YtL12Q== dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-create-regexp-features-plugin" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/plugin-transform-unicode-sets-regex@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.25.9.tgz#65114c17b4ffc20fa5b163c63c70c0d25621fabe" - integrity sha512-8BYqO3GeVNHtx69fdPshN3fnzUNLrWdHhk/icSwigksJGczKSizZ+Z6SBCxTs723Fr5VSNorTIK7a+R2tISvwQ== +"@babel/plugin-transform-unicode-regex@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.27.1.tgz#25948f5c395db15f609028e370667ed8bae9af97" + integrity sha512-xvINq24TRojDuyt6JGtHmkVkrfVV3FPT16uytxImLeBZqW3/H52yN+kM1MGuyPkIQxrzKwPHs5U/MP3qKyzkGw== dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-create-regexp-features-plugin" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" + +"@babel/plugin-transform-unicode-sets-regex@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.27.1.tgz#6ab706d10f801b5c72da8bb2548561fa04193cd1" + integrity sha512-EtkOujbc4cgvb0mlpQefi4NTPBzhSIevblFevACNLUspmrALgmEBdL/XfnyyITfd8fKBZrZys92zOWcik7j9Tw== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.27.1" + "@babel/helper-plugin-utils" "^7.27.1" "@babel/preset-env@^7.9.5": - version "7.26.7" - resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.26.7.tgz#24d38e211f4570b8d806337035cc3ae798e0c36d" - integrity sha512-Ycg2tnXwixaXOVb29rana8HNPgLVBof8qqtNQ9LE22IoyZboQbGSxI6ZySMdW3K5nAe6gu35IaJefUJflhUFTQ== - dependencies: - "@babel/compat-data" "^7.26.5" - "@babel/helper-compilation-targets" "^7.26.5" - "@babel/helper-plugin-utils" "^7.26.5" - "@babel/helper-validator-option" "^7.25.9" - "@babel/plugin-bugfix-firefox-class-in-computed-class-key" "^7.25.9" - "@babel/plugin-bugfix-safari-class-field-initializer-scope" "^7.25.9" - "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression" "^7.25.9" - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining" "^7.25.9" - "@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly" "^7.25.9" + version "7.28.0" + resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.28.0.tgz#d23a6bc17b43227d11db77081a0779c706b5569c" + integrity sha512-VmaxeGOwuDqzLl5JUkIRM1X2Qu2uKGxHEQWh+cvvbl7JuJRgKGJSfsEF/bUaxFhJl/XAyxBe7q7qSuTbKFuCyg== + dependencies: + "@babel/compat-data" "^7.28.0" + "@babel/helper-compilation-targets" "^7.27.2" + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/helper-validator-option" "^7.27.1" + "@babel/plugin-bugfix-firefox-class-in-computed-class-key" "^7.27.1" + "@babel/plugin-bugfix-safari-class-field-initializer-scope" "^7.27.1" + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression" "^7.27.1" + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining" "^7.27.1" + "@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly" "^7.27.1" "@babel/plugin-proposal-private-property-in-object" "7.21.0-placeholder-for-preset-env.2" - "@babel/plugin-syntax-import-assertions" "^7.26.0" - "@babel/plugin-syntax-import-attributes" "^7.26.0" + "@babel/plugin-syntax-import-assertions" "^7.27.1" + "@babel/plugin-syntax-import-attributes" "^7.27.1" "@babel/plugin-syntax-unicode-sets-regex" "^7.18.6" - "@babel/plugin-transform-arrow-functions" "^7.25.9" - "@babel/plugin-transform-async-generator-functions" "^7.25.9" - "@babel/plugin-transform-async-to-generator" "^7.25.9" - "@babel/plugin-transform-block-scoped-functions" "^7.26.5" - "@babel/plugin-transform-block-scoping" "^7.25.9" - "@babel/plugin-transform-class-properties" "^7.25.9" - "@babel/plugin-transform-class-static-block" "^7.26.0" - "@babel/plugin-transform-classes" "^7.25.9" - "@babel/plugin-transform-computed-properties" "^7.25.9" - "@babel/plugin-transform-destructuring" "^7.25.9" - "@babel/plugin-transform-dotall-regex" "^7.25.9" - "@babel/plugin-transform-duplicate-keys" "^7.25.9" - "@babel/plugin-transform-duplicate-named-capturing-groups-regex" "^7.25.9" - "@babel/plugin-transform-dynamic-import" "^7.25.9" - "@babel/plugin-transform-exponentiation-operator" "^7.26.3" - "@babel/plugin-transform-export-namespace-from" "^7.25.9" - "@babel/plugin-transform-for-of" "^7.25.9" - "@babel/plugin-transform-function-name" "^7.25.9" - "@babel/plugin-transform-json-strings" "^7.25.9" - "@babel/plugin-transform-literals" "^7.25.9" - "@babel/plugin-transform-logical-assignment-operators" "^7.25.9" - "@babel/plugin-transform-member-expression-literals" "^7.25.9" - "@babel/plugin-transform-modules-amd" "^7.25.9" - "@babel/plugin-transform-modules-commonjs" "^7.26.3" - "@babel/plugin-transform-modules-systemjs" "^7.25.9" - "@babel/plugin-transform-modules-umd" "^7.25.9" - "@babel/plugin-transform-named-capturing-groups-regex" "^7.25.9" - "@babel/plugin-transform-new-target" "^7.25.9" - "@babel/plugin-transform-nullish-coalescing-operator" "^7.26.6" - "@babel/plugin-transform-numeric-separator" "^7.25.9" - "@babel/plugin-transform-object-rest-spread" "^7.25.9" - "@babel/plugin-transform-object-super" "^7.25.9" - "@babel/plugin-transform-optional-catch-binding" "^7.25.9" - "@babel/plugin-transform-optional-chaining" "^7.25.9" - "@babel/plugin-transform-parameters" "^7.25.9" - "@babel/plugin-transform-private-methods" "^7.25.9" - "@babel/plugin-transform-private-property-in-object" "^7.25.9" - "@babel/plugin-transform-property-literals" "^7.25.9" - "@babel/plugin-transform-regenerator" "^7.25.9" - "@babel/plugin-transform-regexp-modifiers" "^7.26.0" - "@babel/plugin-transform-reserved-words" "^7.25.9" - "@babel/plugin-transform-shorthand-properties" "^7.25.9" - "@babel/plugin-transform-spread" "^7.25.9" - "@babel/plugin-transform-sticky-regex" "^7.25.9" - "@babel/plugin-transform-template-literals" "^7.25.9" - "@babel/plugin-transform-typeof-symbol" "^7.26.7" - "@babel/plugin-transform-unicode-escapes" "^7.25.9" - "@babel/plugin-transform-unicode-property-regex" "^7.25.9" - "@babel/plugin-transform-unicode-regex" "^7.25.9" - "@babel/plugin-transform-unicode-sets-regex" "^7.25.9" + "@babel/plugin-transform-arrow-functions" "^7.27.1" + "@babel/plugin-transform-async-generator-functions" "^7.28.0" + "@babel/plugin-transform-async-to-generator" "^7.27.1" + "@babel/plugin-transform-block-scoped-functions" "^7.27.1" + "@babel/plugin-transform-block-scoping" "^7.28.0" + "@babel/plugin-transform-class-properties" "^7.27.1" + "@babel/plugin-transform-class-static-block" "^7.27.1" + "@babel/plugin-transform-classes" "^7.28.0" + "@babel/plugin-transform-computed-properties" "^7.27.1" + "@babel/plugin-transform-destructuring" "^7.28.0" + "@babel/plugin-transform-dotall-regex" "^7.27.1" + "@babel/plugin-transform-duplicate-keys" "^7.27.1" + "@babel/plugin-transform-duplicate-named-capturing-groups-regex" "^7.27.1" + "@babel/plugin-transform-dynamic-import" "^7.27.1" + "@babel/plugin-transform-explicit-resource-management" "^7.28.0" + "@babel/plugin-transform-exponentiation-operator" "^7.27.1" + "@babel/plugin-transform-export-namespace-from" "^7.27.1" + "@babel/plugin-transform-for-of" "^7.27.1" + "@babel/plugin-transform-function-name" "^7.27.1" + "@babel/plugin-transform-json-strings" "^7.27.1" + "@babel/plugin-transform-literals" "^7.27.1" + "@babel/plugin-transform-logical-assignment-operators" "^7.27.1" + "@babel/plugin-transform-member-expression-literals" "^7.27.1" + "@babel/plugin-transform-modules-amd" "^7.27.1" + "@babel/plugin-transform-modules-commonjs" "^7.27.1" + "@babel/plugin-transform-modules-systemjs" "^7.27.1" + "@babel/plugin-transform-modules-umd" "^7.27.1" + "@babel/plugin-transform-named-capturing-groups-regex" "^7.27.1" + "@babel/plugin-transform-new-target" "^7.27.1" + "@babel/plugin-transform-nullish-coalescing-operator" "^7.27.1" + "@babel/plugin-transform-numeric-separator" "^7.27.1" + "@babel/plugin-transform-object-rest-spread" "^7.28.0" + "@babel/plugin-transform-object-super" "^7.27.1" + "@babel/plugin-transform-optional-catch-binding" "^7.27.1" + "@babel/plugin-transform-optional-chaining" "^7.27.1" + "@babel/plugin-transform-parameters" "^7.27.7" + "@babel/plugin-transform-private-methods" "^7.27.1" + "@babel/plugin-transform-private-property-in-object" "^7.27.1" + "@babel/plugin-transform-property-literals" "^7.27.1" + "@babel/plugin-transform-regenerator" "^7.28.0" + "@babel/plugin-transform-regexp-modifiers" "^7.27.1" + "@babel/plugin-transform-reserved-words" "^7.27.1" + "@babel/plugin-transform-shorthand-properties" "^7.27.1" + "@babel/plugin-transform-spread" "^7.27.1" + "@babel/plugin-transform-sticky-regex" "^7.27.1" + "@babel/plugin-transform-template-literals" "^7.27.1" + "@babel/plugin-transform-typeof-symbol" "^7.27.1" + "@babel/plugin-transform-unicode-escapes" "^7.27.1" + "@babel/plugin-transform-unicode-property-regex" "^7.27.1" + "@babel/plugin-transform-unicode-regex" "^7.27.1" + "@babel/plugin-transform-unicode-sets-regex" "^7.27.1" "@babel/preset-modules" "0.1.6-no-external-plugins" - babel-plugin-polyfill-corejs2 "^0.4.10" - babel-plugin-polyfill-corejs3 "^0.10.6" - babel-plugin-polyfill-regenerator "^0.6.1" - core-js-compat "^3.38.1" + babel-plugin-polyfill-corejs2 "^0.4.14" + babel-plugin-polyfill-corejs3 "^0.13.0" + babel-plugin-polyfill-regenerator "^0.6.5" + core-js-compat "^3.43.0" semver "^6.3.1" "@babel/preset-modules@0.1.6-no-external-plugins": @@ -870,52 +886,45 @@ esutils "^2.0.2" "@babel/preset-typescript@^7.9.0": - version "7.26.0" - resolved "https://registry.yarnpkg.com/@babel/preset-typescript/-/preset-typescript-7.26.0.tgz#4a570f1b8d104a242d923957ffa1eaff142a106d" - integrity sha512-NMk1IGZ5I/oHhoXEElcm+xUnL/szL6xflkFZmoEU9xj1qSJXpiS7rsspYo92B4DRCDvZn2erT5LdsCeXAKNCkg== - dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/helper-validator-option" "^7.25.9" - "@babel/plugin-syntax-jsx" "^7.25.9" - "@babel/plugin-transform-modules-commonjs" "^7.25.9" - "@babel/plugin-transform-typescript" "^7.25.9" - -"@babel/runtime@^7.8.4": - version "7.26.7" - resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.26.7.tgz#f4e7fe527cd710f8dc0618610b61b4b060c3c341" - integrity sha512-AOPI3D+a8dXnja+iwsUqGRjr1BbZIe771sXdapOtYI531gSqpi92vXivKcq2asu/DFpdl1ceFAKZyRzK2PCVcQ== - dependencies: - regenerator-runtime "^0.14.0" - -"@babel/template@^7.25.9", "@babel/template@^7.3.3": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.25.9.tgz#ecb62d81a8a6f5dc5fe8abfc3901fc52ddf15016" - integrity sha512-9DGttpmPvIxBb/2uwpVo3dqJ+O6RooAFOS+lB+xDqoE2PVCE8nfoHMdZLpfCQRLwvohzXISPZcgxt80xLfsuwg== - dependencies: - "@babel/code-frame" "^7.25.9" - "@babel/parser" "^7.25.9" - "@babel/types" "^7.25.9" - -"@babel/traverse@^7.25.9", "@babel/traverse@^7.26.5", "@babel/traverse@^7.26.7", "@babel/traverse@^7.7.2": - version "7.26.7" - resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.26.7.tgz#99a0a136f6a75e7fb8b0a1ace421e0b25994b8bb" - integrity sha512-1x1sgeyRLC3r5fQOM0/xtQKsYjyxmFjaOrLJNtZ81inNjyJHGIolTULPiSc/2qe1/qfpFLisLQYFnnZl7QoedA== - dependencies: - "@babel/code-frame" "^7.26.2" - "@babel/generator" "^7.26.5" - "@babel/parser" "^7.26.7" - "@babel/template" "^7.25.9" - "@babel/types" "^7.26.7" + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/preset-typescript/-/preset-typescript-7.27.1.tgz#190742a6428d282306648a55b0529b561484f912" + integrity sha512-l7WfQfX0WK4M0v2RudjuQK4u99BS6yLHYEmdtVPP7lKV013zr9DygFuWNlnbvQ9LR+LS0Egz/XAvGx5U9MX0fQ== + dependencies: + "@babel/helper-plugin-utils" "^7.27.1" + "@babel/helper-validator-option" "^7.27.1" + "@babel/plugin-syntax-jsx" "^7.27.1" + "@babel/plugin-transform-modules-commonjs" "^7.27.1" + "@babel/plugin-transform-typescript" "^7.27.1" + +"@babel/template@^7.27.1", "@babel/template@^7.27.2", "@babel/template@^7.3.3": + version "7.27.2" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.27.2.tgz#fa78ceed3c4e7b63ebf6cb39e5852fca45f6809d" + integrity sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw== + dependencies: + "@babel/code-frame" "^7.27.1" + "@babel/parser" "^7.27.2" + "@babel/types" "^7.27.1" + +"@babel/traverse@^7.27.1", "@babel/traverse@^7.27.3", "@babel/traverse@^7.28.0", "@babel/traverse@^7.7.2": + version "7.28.0" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.28.0.tgz#518aa113359b062042379e333db18380b537e34b" + integrity sha512-mGe7UK5wWyh0bKRfupsUchrQGqvDbZDbKJw+kcRGSmdHVYrv+ltd0pnpDTVpiTqnaBru9iEvA8pz8W46v0Amwg== + dependencies: + "@babel/code-frame" "^7.27.1" + "@babel/generator" "^7.28.0" + "@babel/helper-globals" "^7.28.0" + "@babel/parser" "^7.28.0" + "@babel/template" "^7.27.2" + "@babel/types" "^7.28.0" debug "^4.3.1" - globals "^11.1.0" -"@babel/types@^7.0.0", "@babel/types@^7.20.7", "@babel/types@^7.25.9", "@babel/types@^7.26.5", "@babel/types@^7.26.7", "@babel/types@^7.3.3", "@babel/types@^7.4.4": - version "7.26.7" - resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.26.7.tgz#5e2b89c0768e874d4d061961f3a5a153d71dc17a" - integrity sha512-t8kDRGrKXyp6+tjUh7hw2RLyclsW4TRoRvRHtSyAX9Bb5ldlFh+90YAYY6awRXrlB4G5G2izNeGySpATlFzmOg== +"@babel/types@^7.0.0", "@babel/types@^7.20.7", "@babel/types@^7.27.1", "@babel/types@^7.27.3", "@babel/types@^7.28.0", "@babel/types@^7.28.2", "@babel/types@^7.3.3", "@babel/types@^7.4.4": + version "7.28.2" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.28.2.tgz#da9db0856a9a88e0a13b019881d7513588cf712b" + integrity sha512-ruv7Ae4J5dUYULmeXw1gmb7rYRz57OWCPM57pHojnLq/3Z1CK2lNSLTCVjxVk1F/TZHwOZZrOWi0ur95BbLxNQ== dependencies: - "@babel/helper-string-parser" "^7.25.9" - "@babel/helper-validator-identifier" "^7.25.9" + "@babel/helper-string-parser" "^7.27.1" + "@babel/helper-validator-identifier" "^7.27.1" "@bcoe/v8-coverage@^0.2.3": version "0.2.3" @@ -1107,13 +1116,12 @@ "@types/yargs" "^16.0.0" chalk "^4.0.0" -"@jridgewell/gen-mapping@^0.3.5": - version "0.3.8" - resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz#4f0e06362e01362f823d348f1872b08f666d8142" - integrity sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA== +"@jridgewell/gen-mapping@^0.3.12", "@jridgewell/gen-mapping@^0.3.5": + version "0.3.12" + resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.12.tgz#2234ce26c62889f03db3d7fea43c1932ab3e927b" + integrity sha512-OuLGC46TjB5BbN1dH8JULVVZY4WTdkF7tV9Ys6wLL1rubZnCMstOhNHueU5bLCrnRuDhKPDM4g6sw4Bel5Gzqg== dependencies: - "@jridgewell/set-array" "^1.2.1" - "@jridgewell/sourcemap-codec" "^1.4.10" + "@jridgewell/sourcemap-codec" "^1.5.0" "@jridgewell/trace-mapping" "^0.3.24" "@jridgewell/resolve-uri@^3.1.0": @@ -1121,20 +1129,15 @@ resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz#7a0ee601f60f99a20c7c7c5ff0c80388c1189bd6" integrity sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw== -"@jridgewell/set-array@^1.2.1": - version "1.2.1" - resolved "https://registry.yarnpkg.com/@jridgewell/set-array/-/set-array-1.2.1.tgz#558fb6472ed16a4c850b889530e6b36438c49280" - integrity sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A== - -"@jridgewell/sourcemap-codec@^1.4.10", "@jridgewell/sourcemap-codec@^1.4.14": - version "1.5.0" - resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz#3188bcb273a414b0d215fd22a58540b989b9409a" - integrity sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ== +"@jridgewell/sourcemap-codec@^1.4.14", "@jridgewell/sourcemap-codec@^1.5.0": + version "1.5.4" + resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.4.tgz#7358043433b2e5da569aa02cbc4c121da3af27d7" + integrity sha512-VT2+G1VQs/9oz078bLrYbecdZKs912zQlkelYpuf+SXF+QvZDYJlbx/LSx+meSAwdDFnF8FVXW92AVjjkVmgFw== -"@jridgewell/trace-mapping@^0.3.24", "@jridgewell/trace-mapping@^0.3.25": - version "0.3.25" - resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz#15f190e98895f3fc23276ee14bc76b675c2e50f0" - integrity sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ== +"@jridgewell/trace-mapping@^0.3.24", "@jridgewell/trace-mapping@^0.3.28": + version "0.3.29" + resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.29.tgz#a58d31eaadaf92c6695680b2e1d464a9b8fbf7fc" + integrity sha512-uw6guiW/gcAGPDhLmd77/6lW8QLeiV5RUTsAX46Db6oLhGaVj4lhnPwb184s1bkc8kdVg/+h988dro8GRDpmYQ== dependencies: "@jridgewell/resolve-uri" "^3.1.0" "@jridgewell/sourcemap-codec" "^1.4.14" @@ -1170,9 +1173,9 @@ "@types/babel__traverse" "*" "@types/babel__generator@*": - version "7.6.8" - resolved "https://registry.yarnpkg.com/@types/babel__generator/-/babel__generator-7.6.8.tgz#f836c61f48b1346e7d2b0d93c6dacc5b9535d3ab" - integrity sha512-ASsj+tpEDsEiFr1arWrlN6V3mdfjRMZt6LtK/Vp/kreFLnr5QH5+DhvD5nINYZXzwJvXeGq+05iUXcAzVrqWtw== + version "7.27.0" + resolved "https://registry.yarnpkg.com/@types/babel__generator/-/babel__generator-7.27.0.tgz#b5819294c51179957afaec341442f9341e4108a9" + integrity sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg== dependencies: "@babel/types" "^7.0.0" @@ -1185,9 +1188,9 @@ "@babel/types" "^7.0.0" "@types/babel__traverse@*", "@types/babel__traverse@^7.0.4", "@types/babel__traverse@^7.0.6": - version "7.20.6" - resolved "https://registry.yarnpkg.com/@types/babel__traverse/-/babel__traverse-7.20.6.tgz#8dc9f0ae0f202c08d8d4dab648912c8d6038e3f7" - integrity sha512-r1bzfrm0tomOI8g1SzvCaQHo6Lcv6zu0EA+W2kHrt8dyrHQxGzBBL4kdkzIS+jBMV+EYcMAEAqXqYaLJq5rOZg== + version "7.20.7" + resolved "https://registry.yarnpkg.com/@types/babel__traverse/-/babel__traverse-7.20.7.tgz#968cdc2366ec3da159f61166428ee40f370e56c2" + integrity sha512-dkO5fhS7+/oos4ciWxyEyjWe48zmG6wbCheo/G2ZnHx4fs3EU6YC6UM8rk56gAjNJ9P3MTH2jo5jb92/K6wbng== dependencies: "@babel/types" "^7.20.7" @@ -1226,19 +1229,19 @@ pretty-format "^27.0.0" "@types/node@*": - version "22.13.1" - resolved "https://registry.yarnpkg.com/@types/node/-/node-22.13.1.tgz#a2a3fefbdeb7ba6b89f40371842162fac0934f33" - integrity sha512-jK8uzQlrvXqEU91UxiK5J7pKHyzgnI1Qnl0QDHIgVGuolJhRb9EEl28Cj9b3rGR8B2lhFCtvIm5os8lFnO/1Ew== + version "24.1.0" + resolved "https://registry.yarnpkg.com/@types/node/-/node-24.1.0.tgz#0993f7dc31ab5cc402d112315b463e383d68a49c" + integrity sha512-ut5FthK5moxFKH2T1CUOC6ctR67rQRvvHdFLCD2Ql6KXmMuCrjsSsRI9UsLCm9M18BMwClv4pn327UvB7eeO1w== dependencies: - undici-types "~6.20.0" + undici-types "~7.8.0" -"@types/pg@^7.14.3": - version "7.14.11" - resolved "https://registry.yarnpkg.com/@types/pg/-/pg-7.14.11.tgz#daf5555504a1f7af4263df265d91f140fece52e3" - integrity sha512-EnZkZ1OMw9DvNfQkn2MTJrwKmhJYDEs5ujWrPfvseWNoI95N8B4HzU/Ltrq5ZfYxDX/Zg8mTzwr6UAyTjjFvXA== +"@types/pg@^8.15.5": + version "8.15.5" + resolved "https://registry.yarnpkg.com/@types/pg/-/pg-8.15.5.tgz#ef43e0f33b62dac95cae2f042888ec7980b30c09" + integrity sha512-LF7lF6zWEKxuT3/OR8wAZGzkg4ENGXFNyiV/JeOt9z5B+0ZVwbql9McqX5c/WStFq1GaGso7H1AzP/qSzmlCKQ== dependencies: "@types/node" "*" - pg-protocol "^1.2.0" + pg-protocol "*" pg-types "^2.2.0" "@types/prettier@^2.1.5": @@ -1287,9 +1290,9 @@ acorn@^7.1.1: integrity sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A== acorn@^8.2.4: - version "8.14.0" - resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.14.0.tgz#063e2c70cac5fb4f6467f0b11152e04c682795b0" - integrity sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA== + version "8.15.0" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.15.0.tgz#a360898bc415edaac46c8241f6383975b930b816" + integrity sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg== agent-base@6: version "6.0.2" @@ -1377,34 +1380,34 @@ babel-plugin-jest-hoist@^27.5.1: "@types/babel__core" "^7.0.0" "@types/babel__traverse" "^7.0.6" -babel-plugin-polyfill-corejs2@^0.4.10: - version "0.4.12" - resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.12.tgz#ca55bbec8ab0edeeef3d7b8ffd75322e210879a9" - integrity sha512-CPWT6BwvhrTO2d8QVorhTCQw9Y43zOu7G9HigcfxvepOU6b8o3tcWad6oVgZIsZCTt42FFv97aA7ZJsbM4+8og== +babel-plugin-polyfill-corejs2@^0.4.14: + version "0.4.14" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.14.tgz#8101b82b769c568835611542488d463395c2ef8f" + integrity sha512-Co2Y9wX854ts6U8gAAPXfn0GmAyctHuK8n0Yhfjd6t30g7yvKjspvvOo9yG+z52PZRgFErt7Ka2pYnXCjLKEpg== dependencies: - "@babel/compat-data" "^7.22.6" - "@babel/helper-define-polyfill-provider" "^0.6.3" + "@babel/compat-data" "^7.27.7" + "@babel/helper-define-polyfill-provider" "^0.6.5" semver "^6.3.1" -babel-plugin-polyfill-corejs3@^0.10.6: - version "0.10.6" - resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.10.6.tgz#2deda57caef50f59c525aeb4964d3b2f867710c7" - integrity sha512-b37+KR2i/khY5sKmWNVQAnitvquQbNdWy6lJdsr0kmquCKEEUgMKK4SboVM3HtfnZilfjr4MMQ7vY58FVWDtIA== +babel-plugin-polyfill-corejs3@^0.13.0: + version "0.13.0" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.13.0.tgz#bb7f6aeef7addff17f7602a08a6d19a128c30164" + integrity sha512-U+GNwMdSFgzVmfhNm8GJUX88AadB3uo9KpJqS3FaqNIPKgySuvMb+bHPsOmmuWyIcuqZj/pzt1RUIUZns4y2+A== dependencies: - "@babel/helper-define-polyfill-provider" "^0.6.2" - core-js-compat "^3.38.0" + "@babel/helper-define-polyfill-provider" "^0.6.5" + core-js-compat "^3.43.0" -babel-plugin-polyfill-regenerator@^0.6.1: - version "0.6.3" - resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.6.3.tgz#abeb1f3f1c762eace37587f42548b08b57789bc8" - integrity sha512-LiWSbl4CRSIa5x/JAU6jZiG9eit9w6mz+yVMFwDE83LAWvt0AfGBoZ7HS/mkhrKuh2ZlzfVZYKoLjXdqw6Yt7Q== +babel-plugin-polyfill-regenerator@^0.6.5: + version "0.6.5" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.6.5.tgz#32752e38ab6f6767b92650347bf26a31b16ae8c5" + integrity sha512-ISqQ2frbiNU9vIJkzg7dlPpznPZ4jOiUQ1uSmB0fEHeowtN3COYRsXr/xexn64NpU13P06jc/L5TgiJXOgrbEg== dependencies: - "@babel/helper-define-polyfill-provider" "^0.6.3" + "@babel/helper-define-polyfill-provider" "^0.6.5" babel-preset-current-node-syntax@^1.0.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.1.0.tgz#9a929eafece419612ef4ae4f60b1862ebad8ef30" - integrity sha512-ldYss8SbBlWva1bs28q78Ju5Zq1F+8BrqBZZ0VFhLBvhh6lCpC2o3gDJi/5DRLs9FgYZCnmPYIVFU4lRXCkyUw== + version "1.2.0" + resolved "https://registry.yarnpkg.com/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.2.0.tgz#20730d6cdc7dda5d89401cab10ac6a32067acde6" + integrity sha512-E/VlAEzRrsLEb2+dv8yp3bo4scof3l9nR4lrld+Iy5NyVqgVYUJnDAmunkhPMisRI32Qc4iRiz425d8vM++2fg== dependencies: "@babel/plugin-syntax-async-generators" "^7.8.4" "@babel/plugin-syntax-bigint" "^7.8.3" @@ -1436,9 +1439,9 @@ balanced-match@^1.0.0: integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== brace-expansion@^1.1.7: - version "1.1.11" - resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" - integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA== + version "1.1.12" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.12.tgz#ab9b454466e5a8cc3a187beaad580412a9c5b843" + integrity sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg== dependencies: balanced-match "^1.0.0" concat-map "0.0.1" @@ -1455,15 +1458,15 @@ browser-process-hrtime@^1.0.0: resolved "https://registry.yarnpkg.com/browser-process-hrtime/-/browser-process-hrtime-1.0.0.tgz#3c9b4b7d782c8121e56f10106d84c0d0ffc94626" integrity sha512-9o5UecI3GhkpM6DrXr69PblIuWxPKk9Y0jHBRhdocZ2y7YECBFCsHm79Pr3OyR2AvjhDkabFJaDJMYRazHgsow== -browserslist@^4.24.0, browserslist@^4.24.3: - version "4.24.4" - resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.24.4.tgz#c6b2865a3f08bcb860a0e827389003b9fe686e4b" - integrity sha512-KDi1Ny1gSePi1vm0q4oxSF8b4DR44GF4BbmS2YdhPLOEqd8pDviZOGH/GsmRwoWJ2+5Lr085X7naowMwKHDG1A== +browserslist@^4.24.0, browserslist@^4.25.1: + version "4.25.1" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.25.1.tgz#ba9e8e6f298a1d86f829c9b975e07948967bb111" + integrity sha512-KGj0KoOMXLpSNkkEI6Z6mShmQy0bc1I+T7K9N81k4WWMrfz+6fQ6es80B/YLAeRoKvjYE1YSHHOW1qe9xIVzHw== dependencies: - caniuse-lite "^1.0.30001688" - electron-to-chromium "^1.5.73" + caniuse-lite "^1.0.30001726" + electron-to-chromium "^1.5.173" node-releases "^2.0.19" - update-browserslist-db "^1.1.1" + update-browserslist-db "^1.1.3" bser@2.1.1: version "2.1.1" @@ -1477,6 +1480,14 @@ buffer-from@^1.0.0: resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.2.tgz#2b146a6fd72e80b4f55d255f35ed59a3a9a41bd5" integrity sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ== +call-bind-apply-helpers@^1.0.1, call-bind-apply-helpers@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz#4b5428c222be985d79c3d82657479dbe0b59b2d6" + integrity sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ== + dependencies: + es-errors "^1.3.0" + function-bind "^1.1.2" + callsites@^3.0.0: version "3.1.0" resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73" @@ -1492,10 +1503,10 @@ camelcase@^6.2.0: resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-6.3.0.tgz#5685b95eb209ac9c0c177467778c9c84df58ba9a" integrity sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA== -caniuse-lite@^1.0.30001688: - version "1.0.30001698" - resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001698.tgz#3e86d4bad6f87f493197fb2483a211fe8841abd3" - integrity sha512-xJ3km2oiG/MbNU8G6zIq6XRZ6HtAOVXsbOrP/blGazi52kc5Yy7b6sDA5O+FbROzRrV7BSTllLHuNvmawYUJjw== +caniuse-lite@^1.0.30001726: + version "1.0.30001731" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001731.tgz#277c07416ea4613ec564e5b0ffb47e7b60f32e2f" + integrity sha512-lDdp2/wrOmTRWuoB5DpfNkC0rJDU8DqRa6nYL6HK6sytw70QMopt/NIc/9SM7ylItlBWfACXk0tEn37UWM/+mg== chalk@^4.0.0: version "4.1.2" @@ -1573,12 +1584,12 @@ convert-source-map@^2.0.0: resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-2.0.0.tgz#4b560f649fc4e918dd0ab75cf4961e8bc882d82a" integrity sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg== -core-js-compat@^3.38.0, core-js-compat@^3.38.1: - version "3.40.0" - resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.40.0.tgz#7485912a5a4a4315c2fdb2cbdc623e6881c88b38" - integrity sha512-0XEDpr5y5mijvw8Lbc6E5AkjrHfp7eEoPlu36SWeAbcL8fn1G1ANe8DBlo2XoNN89oVpxWwOjYIPVzR4ZvsKCQ== +core-js-compat@^3.43.0: + version "3.44.0" + resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.44.0.tgz#62b9165b97e4cbdb8bca16b14818e67428b4a0f8" + integrity sha512-JepmAj2zfl6ogy34qfWtcE7nHKAJnKsQFRn++scjVS2bZFllwptzw61BZcZFYBPpUznLfAvh0LGhxKppk04ClA== dependencies: - browserslist "^4.24.3" + browserslist "^4.25.1" cross-spawn@^7.0.3: version "7.0.6" @@ -1615,17 +1626,17 @@ data-urls@^2.0.0: whatwg-mimetype "^2.3.0" whatwg-url "^8.0.0" -debug@4, debug@^4.1.0, debug@^4.1.1, debug@^4.3.1: - version "4.4.0" - resolved "https://registry.yarnpkg.com/debug/-/debug-4.4.0.tgz#2b3f2aea2ffeb776477460267377dc8710faba8a" - integrity sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA== +debug@4, debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.4.1: + version "4.4.1" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.4.1.tgz#e5a8bc6cbc4c6cd3e64308b0693a3d4fa550189b" + integrity sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ== dependencies: ms "^2.1.3" decimal.js@^10.2.1: - version "10.5.0" - resolved "https://registry.yarnpkg.com/decimal.js/-/decimal.js-10.5.0.tgz#0f371c7cf6c4898ce0afb09836db73cd82010f22" - integrity sha512-8vDa8Qxvr/+d94hSh5P3IJwI5t8/c0KsMp+g8bNw9cY2icONa5aPfvKeieW1WlG0WQYwwhJ7mjui2xtiePQSXw== + version "10.6.0" + resolved "https://registry.yarnpkg.com/decimal.js/-/decimal.js-10.6.0.tgz#e649a43e3ab953a72192ff5983865e509f37ed9a" + integrity sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg== dedent@^0.7.0: version "0.7.0" @@ -1659,10 +1670,19 @@ domexception@^2.0.1: dependencies: webidl-conversions "^5.0.0" -electron-to-chromium@^1.5.73: - version "1.5.95" - resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.5.95.tgz#c7819461af3cc64f467bd10bf020516e20682a2a" - integrity sha512-XNsZaQrgQX+BG37BRQv+E+HcOZlWhqYaDoVVNCws/WrYYdbGrkR1qCDJ2mviBF3flCs6/BTa4O7ANfFTFZk6Dg== +dunder-proto@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/dunder-proto/-/dunder-proto-1.0.1.tgz#d7ae667e1dc83482f8b70fd0f6eefc50da30f58a" + integrity sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A== + dependencies: + call-bind-apply-helpers "^1.0.1" + es-errors "^1.3.0" + gopd "^1.2.0" + +electron-to-chromium@^1.5.173: + version "1.5.192" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.5.192.tgz#6dfc57a41846a57b18f9c0121821a6df1e165cc1" + integrity sha512-rP8Ez0w7UNw/9j5eSXCe10o1g/8B1P5SM90PCCMVkIRQn2R0LEHWz4Eh9RnxkniuDe1W0cTSOB3MLlkTGDcuCg== emittery@^0.8.1: version "0.8.1" @@ -1681,6 +1701,33 @@ error-ex@^1.3.1: dependencies: is-arrayish "^0.2.1" +es-define-property@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/es-define-property/-/es-define-property-1.0.1.tgz#983eb2f9a6724e9303f61addf011c72e09e0b0fa" + integrity sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g== + +es-errors@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/es-errors/-/es-errors-1.3.0.tgz#05f75a25dab98e4fb1dcd5e1472c0546d5057c8f" + integrity sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw== + +es-object-atoms@^1.0.0, es-object-atoms@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/es-object-atoms/-/es-object-atoms-1.1.1.tgz#1c4f2c4837327597ce69d2ca190a7fdd172338c1" + integrity sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA== + dependencies: + es-errors "^1.3.0" + +es-set-tostringtag@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz#f31dbbe0c183b00a6d26eb6325c810c0fd18bd4d" + integrity sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA== + dependencies: + es-errors "^1.3.0" + get-intrinsic "^1.2.6" + has-tostringtag "^1.0.2" + hasown "^2.0.2" + escalade@^3.1.1, escalade@^3.2.0: version "3.2.0" resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.2.0.tgz#011a3f69856ba189dffa7dc8fcce99d2a87903e5" @@ -1775,13 +1822,15 @@ find-up@^4.0.0, find-up@^4.1.0: path-exists "^4.0.0" form-data@^3.0.0: - version "3.0.2" - resolved "https://registry.yarnpkg.com/form-data/-/form-data-3.0.2.tgz#83ad9ced7c03feaad97e293d6f6091011e1659c8" - integrity sha512-sJe+TQb2vIaIyO783qN6BlMYWMw3WBOHA1Ay2qxsnjuafEOQFJ2JakedOQirT6D5XPRxDvS7AHYyem9fTpb4LQ== + version "3.0.4" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-3.0.4.tgz#938273171d3f999286a4557528ce022dc2c98df1" + integrity sha512-f0cRzm6dkyVYV3nPoooP8XlccPQukegwhAnpoLcXy+X+A8KfpGOoXwDr9FLZd3wzgLaBGQBE3lY93Zm/i1JvIQ== dependencies: asynckit "^0.4.0" combined-stream "^1.0.8" - mime-types "^2.1.12" + es-set-tostringtag "^2.1.0" + hasown "^2.0.2" + mime-types "^2.1.35" fs.realpath@^1.0.0: version "1.0.0" @@ -1808,11 +1857,35 @@ get-caller-file@^2.0.5: resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e" integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== +get-intrinsic@^1.2.6: + version "1.3.0" + resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.3.0.tgz#743f0e3b6964a93a5491ed1bffaae054d7f98d01" + integrity sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ== + dependencies: + call-bind-apply-helpers "^1.0.2" + es-define-property "^1.0.1" + es-errors "^1.3.0" + es-object-atoms "^1.1.1" + function-bind "^1.1.2" + get-proto "^1.0.1" + gopd "^1.2.0" + has-symbols "^1.1.0" + hasown "^2.0.2" + math-intrinsics "^1.1.0" + get-package-type@^0.1.0: version "0.1.0" resolved "https://registry.yarnpkg.com/get-package-type/-/get-package-type-0.1.0.tgz#8de2d803cff44df3bc6c456e6668b36c3926e11a" integrity sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q== +get-proto@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/get-proto/-/get-proto-1.0.1.tgz#150b3f2743869ef3e851ec0c49d15b1d14d00ee1" + integrity sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g== + dependencies: + dunder-proto "^1.0.1" + es-object-atoms "^1.0.0" + get-stream@^6.0.0: version "6.0.1" resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-6.0.1.tgz#a262d8eef67aced57c2852ad6167526a43cbf7b7" @@ -1830,10 +1903,10 @@ glob@^7.1.1, glob@^7.1.2, glob@^7.1.3, glob@^7.1.4: once "^1.3.0" path-is-absolute "^1.0.0" -globals@^11.1.0: - version "11.12.0" - resolved "https://registry.yarnpkg.com/globals/-/globals-11.12.0.tgz#ab8795338868a0babd8525758018c2a7eb95c42e" - integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA== +gopd@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.2.0.tgz#89f56b8217bdbc8802bd299df6d7f1081d7e51a1" + integrity sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg== graceful-fs@^4.2.9: version "4.2.11" @@ -1845,6 +1918,18 @@ has-flag@^4.0.0: resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b" integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== +has-symbols@^1.0.3, has-symbols@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.1.0.tgz#fc9c6a783a084951d0b971fe1018de813707a338" + integrity sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ== + +has-tostringtag@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/has-tostringtag/-/has-tostringtag-1.0.2.tgz#2cdc42d40bef2e5b4eeab7c01a73c54ce7ab5abc" + integrity sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw== + dependencies: + has-symbols "^1.0.3" + hasown@^2.0.2: version "2.0.2" resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.2.tgz#003eaf91be7adc372e84ec59dc37252cedb80003" @@ -2532,6 +2617,11 @@ makeerror@1.0.12: dependencies: tmpl "1.0.5" +math-intrinsics@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/math-intrinsics/-/math-intrinsics-1.1.0.tgz#a0dd74be81e2aa5c2f27e65ce283605ee4e2b7f9" + integrity sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g== + merge-stream@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60" @@ -2550,7 +2640,7 @@ mime-db@1.52.0: resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.52.0.tgz#bbabcdc02859f4987301c856e3387ce5ec43bf70" integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg== -mime-types@^2.1.12: +mime-types@^2.1.35: version "2.1.35" resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.35.tgz#381a871b62a734450660ae3deee44813f70d959a" integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw== @@ -2602,9 +2692,9 @@ npm-run-path@^4.0.1: path-key "^3.0.0" nwsapi@^2.2.0: - version "2.2.16" - resolved "https://registry.yarnpkg.com/nwsapi/-/nwsapi-2.2.16.tgz#177760bba02c351df1d2644e220c31dfec8cdb43" - integrity sha512-F1I/bimDpj3ncaNDhfyMWuFqmQDBwDB0Fogc2qpL3BWvkQteFD/8BzWuIRl83rq0DXfm8SGt/HFhLXZyljTXcQ== + version "2.2.21" + resolved "https://registry.yarnpkg.com/nwsapi/-/nwsapi-2.2.21.tgz#8df7797079350adda208910d8c33fc4c2d7520c3" + integrity sha512-o6nIY3qwiSXl7/LuOU0Dmuctd34Yay0yeuZRLFmDPrrdHpXKFndPj3hM+YEPVHYC5fx2otBx4Ilc/gyYSAUaIA== once@^1.3.0: version "1.4.0" @@ -2674,44 +2764,44 @@ path-parse@^1.0.7: resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== -pg-cloudflare@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/pg-cloudflare/-/pg-cloudflare-1.1.1.tgz#e6d5833015b170e23ae819e8c5d7eaedb472ca98" - integrity sha512-xWPagP/4B6BgFO+EKz3JONXv3YDgvkbVrGw2mTo3D6tVDQRh1e7cqVGvyR3BE+eQgAvx1XhW/iEASj4/jCWl3Q== +pg-cloudflare@^1.2.7: + version "1.2.7" + resolved "https://registry.yarnpkg.com/pg-cloudflare/-/pg-cloudflare-1.2.7.tgz#a1f3d226bab2c45ae75ea54d65ec05ac6cfafbef" + integrity sha512-YgCtzMH0ptvZJslLM1ffsY4EuGaU0cx4XSdXLRFae8bPP4dS5xL1tNB3k2o/N64cHJpwU7dxKli/nZ2lUa5fLg== -pg-connection-string@^2.7.0: - version "2.7.0" - resolved "https://registry.yarnpkg.com/pg-connection-string/-/pg-connection-string-2.7.0.tgz#f1d3489e427c62ece022dba98d5262efcb168b37" - integrity sha512-PI2W9mv53rXJQEOb8xNR8lH7Hr+EKa6oJa38zsK0S/ky2er16ios1wLKhZyxzD7jUReiWokc9WK5nxSnC7W1TA== +pg-connection-string@^2.9.1: + version "2.9.1" + resolved "https://registry.yarnpkg.com/pg-connection-string/-/pg-connection-string-2.9.1.tgz#bb1fd0011e2eb76ac17360dc8fa183b2d3465238" + integrity sha512-nkc6NpDcvPVpZXxrreI/FOtX3XemeLl8E0qFr6F2Lrm/I8WOnaWNhIPK2Z7OHpw7gh5XJThi6j6ppgNoaT1w4w== -pg-cursor@^2.12.1: - version "2.12.1" - resolved "https://registry.yarnpkg.com/pg-cursor/-/pg-cursor-2.12.1.tgz#3df93ccd91b52d5baab22d028b92e862c738b978" - integrity sha512-V13tEaA9Oq1w+V6Q3UBIB/blxJrwbbr35/dY54r/86soBJ7xkP236bXaORUTVXUPt9B6Ql2BQu+uwQiuMfRVgg== +pg-cursor@^2.15.3: + version "2.15.3" + resolved "https://registry.yarnpkg.com/pg-cursor/-/pg-cursor-2.15.3.tgz#19f05739ff95366eed28e80191a6321d0e036395" + integrity sha512-eHw63TsiGtFEfAd7tOTZ+TLy+i/2ePKS20H84qCQ+aQ60pve05Okon9tKMC+YN3j6XyeFoHnaim7Lt9WVafQsA== pg-int8@1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/pg-int8/-/pg-int8-1.0.1.tgz#943bd463bf5b71b4170115f80f8efc9a0c0eb78c" integrity sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw== -pg-pool@^3.7.0: - version "3.7.0" - resolved "https://registry.yarnpkg.com/pg-pool/-/pg-pool-3.7.0.tgz#d4d3c7ad640f8c6a2245adc369bafde4ebb8cbec" - integrity sha512-ZOBQForurqh4zZWjrgSwwAtzJ7QiRX0ovFkZr2klsen3Nm0aoh33Ls0fzfv3imeH/nw/O27cjdz5kzYJfeGp/g== +pg-pool@^3.10.1: + version "3.10.1" + resolved "https://registry.yarnpkg.com/pg-pool/-/pg-pool-3.10.1.tgz#481047c720be2d624792100cac1816f8850d31b2" + integrity sha512-Tu8jMlcX+9d8+QVzKIvM/uJtp07PKr82IUOYEphaWcoBhIYkoHpLXN3qO59nAI11ripznDsEzEv8nUxBVWajGg== -pg-protocol@^1.2.0, pg-protocol@^1.7.0: - version "1.7.0" - resolved "https://registry.yarnpkg.com/pg-protocol/-/pg-protocol-1.7.0.tgz#ec037c87c20515372692edac8b63cf4405448a93" - integrity sha512-hTK/mE36i8fDDhgDFjy6xNOG+LCorxLG3WO17tku+ij6sVHXh1jQUJ8hYAnRhNla4QVD2H8er/FOjc/+EgC6yQ== +pg-protocol@*, pg-protocol@^1.10.3: + version "1.10.3" + resolved "https://registry.yarnpkg.com/pg-protocol/-/pg-protocol-1.10.3.tgz#ac9e4778ad3f84d0c5670583bab976ea0a34f69f" + integrity sha512-6DIBgBQaTKDJyxnXaLiLR8wBpQQcGWuAESkRBX/t6OwA8YsqP+iVSiond2EDy6Y/dsGk8rh/jtax3js5NeV7JQ== pg-query-stream@^4.5.3: - version "4.7.1" - resolved "https://registry.yarnpkg.com/pg-query-stream/-/pg-query-stream-4.7.1.tgz#9938dac9a2dd6696ac58a429e312054201765069" - integrity sha512-UMgsgn/pOIYsIifRySp59vwlpTpLADMK9HWJtq5ff0Z3MxBnPMGnCQeaQl5VuL+7ov4F96mSzIRIcz+Duo6OiQ== + version "4.10.3" + resolved "https://registry.yarnpkg.com/pg-query-stream/-/pg-query-stream-4.10.3.tgz#ed4461c76a1115a36581614ed1897ef4ecee375a" + integrity sha512-h2utrzpOIzeT9JfaqfvBbVuvCfBjH86jNfVrGGTbyepKAIOyTfDew0lAt8bbJjs9n/I5bGDl7S2sx6h5hPyJxw== dependencies: - pg-cursor "^2.12.1" + pg-cursor "^2.15.3" -pg-types@^2.1.0, pg-types@^2.2.0: +pg-types@2.2.0, pg-types@^2.2.0: version "2.2.0" resolved "https://registry.yarnpkg.com/pg-types/-/pg-types-2.2.0.tgz#2d0250d636454f7cfa3b6ae0382fdfa8063254a3" integrity sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA== @@ -2722,27 +2812,27 @@ pg-types@^2.1.0, pg-types@^2.2.0: postgres-date "~1.0.4" postgres-interval "^1.1.0" -pg@^8.11.3: - version "8.13.1" - resolved "https://registry.yarnpkg.com/pg/-/pg-8.13.1.tgz#6498d8b0a87ff76c2df7a32160309d3168c0c080" - integrity sha512-OUir1A0rPNZlX//c7ksiu7crsGZTKSOXJPgtNiHGIlC9H0lO+NC6ZDYksSgBYY/thSWhnSRBv8w1lieNNGATNQ== +pg@^8.16.3: + version "8.16.3" + resolved "https://registry.yarnpkg.com/pg/-/pg-8.16.3.tgz#160741d0b44fdf64680e45374b06d632e86c99fd" + integrity sha512-enxc1h0jA/aq5oSDMvqyW3q89ra6XIIDZgCX9vkMrnz5DFTw/Ny3Li2lFQ+pt3L6MCgm/5o2o8HW9hiJji+xvw== dependencies: - pg-connection-string "^2.7.0" - pg-pool "^3.7.0" - pg-protocol "^1.7.0" - pg-types "^2.1.0" - pgpass "1.x" + pg-connection-string "^2.9.1" + pg-pool "^3.10.1" + pg-protocol "^1.10.3" + pg-types "2.2.0" + pgpass "1.0.5" optionalDependencies: - pg-cloudflare "^1.1.1" + pg-cloudflare "^1.2.7" -pgpass@1.x: +pgpass@1.0.5: version "1.0.5" resolved "https://registry.yarnpkg.com/pgpass/-/pgpass-1.0.5.tgz#9b873e4a564bb10fa7a7dbd55312728d422a223d" integrity sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug== dependencies: split2 "^4.1.0" -picocolors@^1.0.0, picocolors@^1.1.1: +picocolors@^1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.1.1.tgz#3d321af3eab939b083c8f929a1d12cda81c26b6b" integrity sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA== @@ -2753,9 +2843,9 @@ picomatch@^2.0.4, picomatch@^2.2.3, picomatch@^2.3.1: integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA== pirates@^4.0.4: - version "4.0.6" - resolved "https://registry.yarnpkg.com/pirates/-/pirates-4.0.6.tgz#3018ae32ecfcff6c29ba2267cbf21166ac1f36b9" - integrity sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg== + version "4.0.7" + resolved "https://registry.yarnpkg.com/pirates/-/pirates-4.0.7.tgz#643b4a18c4257c8a65104b73f3049ce9a0a15e22" + integrity sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA== pkg-dir@^4.2.0: version "4.2.0" @@ -2842,18 +2932,6 @@ regenerate@^1.4.2: resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.4.2.tgz#b9346d8827e8f5a32f7ba29637d398b69014848a" integrity sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A== -regenerator-runtime@^0.14.0: - version "0.14.1" - resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz#356ade10263f685dda125100cd862c1db895327f" - integrity sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw== - -regenerator-transform@^0.15.2: - version "0.15.2" - resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.15.2.tgz#5bbae58b522098ebdf09bca2f83838929001c7a4" - integrity sha512-hfMp2BoF0qOk3uc5V20ALGDS2ddjQaLrdl7xrGXvAIow7qeWRM2VA2HuCHkUKk9slq3VwEwLNK3DFBqDfPGYtg== - dependencies: - "@babel/runtime" "^7.8.4" - regexpu-core@^6.2.0: version "6.2.0" resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-6.2.0.tgz#0e5190d79e542bf294955dccabae04d3c7d53826" @@ -2905,7 +2983,7 @@ resolve.exports@^1.1.0: resolved "https://registry.yarnpkg.com/resolve.exports/-/resolve.exports-1.1.1.tgz#05cfd5b3edf641571fd46fa608b610dda9ead999" integrity sha512-/NtpHNDN7jWhAaQ9BvBUYZ6YTXsRBgfqWFWP7BZBaoMJO/I3G5OFzvTuWNlZC3aPjins1F+TNrLKsGbH4rfsRQ== -resolve@^1.14.2, resolve@^1.20.0: +resolve@^1.20.0, resolve@^1.22.10: version "1.22.10" resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.10.tgz#b663e83ffb09bbf2386944736baae803029b8b39" integrity sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w== @@ -2939,9 +3017,9 @@ semver@^6.3.0, semver@^6.3.1: integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== semver@^7.3.2, semver@^7.5.3: - version "7.7.1" - resolved "https://registry.yarnpkg.com/semver/-/semver-7.7.1.tgz#abd5098d82b18c6c81f6074ff2647fd3e7220c9f" - integrity sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA== + version "7.7.2" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.7.2.tgz#67d99fdcd35cec21e6f8b87a7fd515a33f982b58" + integrity sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA== shebang-command@^2.0.0: version "2.0.0" @@ -2984,9 +3062,9 @@ source-map@^0.6.0, source-map@^0.6.1, source-map@~0.6.1: integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== source-map@^0.7.3: - version "0.7.4" - resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.7.4.tgz#a9bbe705c9d8846f4e08ff6765acf0f1b0898656" - integrity sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA== + version "0.7.6" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.7.6.tgz#a3658ab87e5b6429c8a1f3ba0083d4c61ca3ef02" + integrity sha512-i5uvt8C3ikiWeNZSVZNWcfZPItFQOsYTUAOkcUPGd8DqDy1uOUikjt5dG+uRlwyvR108Fb9DOd4GvXfT0N2/uQ== split2@^4.1.0: version "4.2.0" @@ -3144,10 +3222,10 @@ typedarray-to-buffer@^3.1.5: dependencies: is-typedarray "^1.0.0" -undici-types@~6.20.0: - version "6.20.0" - resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-6.20.0.tgz#8171bf22c1f588d1554d55bf204bc624af388433" - integrity sha512-Ny6QZ2Nju20vw1SRHe3d9jVu6gJ+4e3+MMpqu7pqE5HT6WsTSlce++GQmK5UXS8mzV8DSYHrQH+Xrf2jVcuKNg== +undici-types@~7.8.0: + version "7.8.0" + resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-7.8.0.tgz#de00b85b710c54122e44fbfd911f8d70174cd294" + integrity sha512-9UJ2xGDvQ43tYyVMpuHlsgApydB8ZKfVYTsLDhXkFL/6gfkp+U8xTGdh8pMJv1SpZna0zxG1DwsKZsreLbXBxw== unicode-canonical-property-names-ecmascript@^2.0.0: version "2.0.1" @@ -3177,10 +3255,10 @@ universalify@^0.2.0: resolved "https://registry.yarnpkg.com/universalify/-/universalify-0.2.0.tgz#6451760566fa857534745ab1dde952d1b1761be0" integrity sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg== -update-browserslist-db@^1.1.1: - version "1.1.2" - resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.1.2.tgz#97e9c96ab0ae7bcac08e9ae5151d26e6bc6b5580" - integrity sha512-PPypAm5qvlD7XMZC3BujecnaOxwhrtoFR+Dqkk5Aa/6DssiH0ibKoketaj9w8LP7Bont1rYeoV5plxD7RTEPRg== +update-browserslist-db@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.1.3.tgz#348377dd245216f9e7060ff50b15a1b740b75420" + integrity sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw== dependencies: escalade "^3.2.0" picocolors "^1.1.1" diff --git a/test/lang/python/requirements.txt b/test/lang/python/requirements.txt index 8fd0bb38f6c87..4fa401860e44e 100644 --- a/test/lang/python/requirements.txt +++ b/test/lang/python/requirements.txt @@ -1,4 +1,4 @@ -psycopg==3.1.12 -psycopg-binary==3.1.12 -psycopg2==2.9.9 +psycopg==3.2.9 +psycopg-binary==3.2.9 +psycopg2==2.9.10 SQLAlchemy==1.3.20 diff --git a/test/lang/ruby/Gemfile b/test/lang/ruby/Gemfile index fc0c1fff8f834..dea833c207f77 100644 --- a/test/lang/ruby/Gemfile +++ b/test/lang/ruby/Gemfile @@ -1,3 +1,3 @@ source "https://rubygems.org" -gem "pg", "~> 1.5.4" +gem "pg", "~> 1.6.0" gem "test-unit", "~> 3.6.1" diff --git a/test/lang/ruby/Gemfile.lock b/test/lang/ruby/Gemfile.lock index 40c4585eceee7..d190b28716290 100644 --- a/test/lang/ruby/Gemfile.lock +++ b/test/lang/ruby/Gemfile.lock @@ -1,7 +1,9 @@ GEM remote: https://rubygems.org/ specs: - pg (1.5.4) + pg (1.6.0-aarch64-linux) + pg (1.6.0-arm64-darwin) + pg (1.6.0-x86_64-linux) power_assert (2.0.3) test-unit (3.6.1) power_assert @@ -9,9 +11,10 @@ GEM PLATFORMS aarch64-linux arm64-darwin-22 + x86_64-linux-gnu DEPENDENCIES - pg (~> 1.5.4) + pg (~> 1.6.0) test-unit (~> 3.6.1) BUNDLED WITH diff --git a/test/sql-server-cdc/11-sql-server-cdc-ssl.td b/test/sql-server-cdc/11-sql-server-cdc-ssl.td new file mode 100644 index 0000000000000..0a3cfd5155a06 --- /dev/null +++ b/test/sql-server-cdc/11-sql-server-cdc-ssl.td @@ -0,0 +1,132 @@ +# Copyright Materialize, Inc. and contributors. All rights reserved. +# +# Use of this software is governed by the Business Source License +# included in the LICENSE file at the root of this repository. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0. + +# Setup SQL Server state. + +$ postgres-execute connection=postgres://mz_system:materialize@${testdrive.materialize-internal-sql-addr} +ALTER SYSTEM SET enable_sql_server_source = true; + +> CREATE SECRET ssl_ca AS '${arg.ssl-ca}' +> CREATE SECRET alt_ssl_ca AS '${arg.alt-ssl-ca}' +> CREATE SECRET IF NOT EXISTS sql_server_pass AS '${arg.default-sql-server-password}' + +# Create a table that has CDC enabled. + +$ sql-server-connect name=sql-server +server=tcp:sql-server,1433;IntegratedSecurity=true;TrustServerCertificate=true;User ID=${arg.default-sql-server-user};Password=${arg.default-sql-server-password} + +$ sql-server-execute name=sql-server +DROP DATABASE IF EXISTS test_ssl; +CREATE DATABASE test_ssl; +USE test_ssl; + +EXEC sys.sp_cdc_enable_db; +ALTER DATABASE test_ssl SET ALLOW_SNAPSHOT_ISOLATION ON; + +CREATE TABLE t1_pk (key_col VARCHAR(20) PRIMARY KEY, val_col VARCHAR(1024)); +EXEC sys.sp_cdc_enable_table @source_schema = 'dbo', @source_name = 't1_pk', @role_name = 'SA', @supports_net_changes = 0; + +INSERT INTO t1_pk VALUES ('a', 'hello world'), ('b', 'foobar'), ('c', 'anotha one'); + +CREATE TABLE t2_no_cdc (key_col VARCHAR(20) PRIMARY KEY, val_col VARCHAR(1024)); + +CREATE TABLE t3_text (value VARCHAR(100)); +EXEC sys.sp_cdc_enable_table @source_schema = 'dbo', @source_name = 't3_text', @role_name = 'SA', @supports_net_changes = 0; + +# Exercise Materialize. + +# Test SSL MODE disabled. + +> CREATE CONNECTION no_ssl_connection TO SQL SERVER ( + HOST 'sql-server', + PORT 1433, + DATABASE test_ssl, + USER '${arg.default-sql-server-user}', + PASSWORD = SECRET sql_server_pass, + SSL MODE disabled + ); + +> VALIDATE CONNECTION no_ssl_connection; + +> SELECT name, type from mz_connections WHERE name = 'no_ssl_connection'; +name type +--------------------------------------- +no_ssl_connection sql-server + + +> DROP CONNECTION no_ssl_connection; + +# Test SSL MODE required. +> CREATE CONNECTION required_ssl_connection TO SQL SERVER ( + HOST 'sql-server', + PORT 1433, + DATABASE test_ssl, + USER '${arg.default-sql-server-user}', + PASSWORD = SECRET sql_server_pass, + SSL MODE required + ); + +> VALIDATE CONNECTION required_ssl_connection; + +> SELECT name, type from mz_connections WHERE name = 'required_ssl_connection'; +name type +--------------------------------------- +required_ssl_connection sql-server + +> DROP CONNECTION required_ssl_connection; + + +# Test SSL MODE verify_ca.'' +# verify_ca requires a CA +! CREATE CONNECTION missing_ca TO SQL SERVER ( + HOST 'sql-server', + PORT 1433, + DATABASE test_ssl, + USER '${arg.default-sql-server-user}', + PASSWORD SECRET sql_server_pass, + SSL MODE verify_ca + ); +contains:invalid CONNECTION: SSL MODE 'verify_ca' requires SSL CERTIFICATE AUTHORITY + +# verify_ca fails with incorrect CA +! CREATE CONNECTION invalid_ca TO SQL SERVER ( + HOST 'sql-server', + PORT 1433, + DATABASE test_ssl, + USER '${arg.default-sql-server-user}', + PASSWORD SECRET sql_server_pass, + SSL MODE verify_ca, + SSL CERTIFICATE AUTHORITY SECRET alt_ssl_ca + ); +contains:certificate verify failed + +> SELECT count(*) from mz_connections WHERE name = 'invalid_ca'; +0 + + +# verify_ca works with correct CA +> CREATE CONNECTION verify_ca_ssl_connection TO SQL SERVER ( + HOST 'sql-server', + PORT 1433, + DATABASE test_ssl, + USER '${arg.default-sql-server-user}', + PASSWORD SECRET sql_server_pass, + SSL MODE verify_ca, + SSL CERTIFICATE AUTHORITY SECRET ssl_ca + ); + +> VALIDATE CONNECTION verify_ca_ssl_connection; + +> SELECT name, type from mz_connections WHERE name = 'verify_ca_ssl_connection'; +name type +--------------------------------------- +verify_ca_ssl_connection sql-server + + +> DROP CONNECTION verify_ca_ssl_connection; diff --git a/test/sql-server-cdc/mzcompose.py b/test/sql-server-cdc/mzcompose.py index 7f14a5f30b047..69342b9b733a5 100644 --- a/test/sql-server-cdc/mzcompose.py +++ b/test/sql-server-cdc/mzcompose.py @@ -19,13 +19,26 @@ from materialize.mzcompose.services.materialized import Materialized from materialize.mzcompose.services.mz import Mz from materialize.mzcompose.services.sql_server import SqlServer +from materialize.mzcompose.services.test_certs import TestCerts from materialize.mzcompose.services.testdrive import Testdrive +TLS_CONF_PATH = MZ_ROOT / "test" / "sql-server-cdc" / "tls-mssconfig.conf" + SERVICES = [ Mz(app_password=""), - Materialized(), + Materialized( + additional_system_parameter_defaults={ + "log_filter": "mz_storage::source::sql-server=debug,mz_sql_server_util=debug,info" + }, + ), Testdrive(), - SqlServer(), + TestCerts(), + SqlServer( + volumes_extra=[ + "secrets:/var/opt/mssql/certs", + f"{TLS_CONF_PATH}:/var/opt/mssql/mssql.conf", + ] + ), ] @@ -55,15 +68,26 @@ def workflow_default(c: Composition, parser: WorkflowArgumentParser) -> None: c.kill("materialized") c.rm("materialized") - c.up("materialized", "sql-server") + # must start test-certs, otherwise the certificates needed by sql-server may not be avaiable + # in the secrets volume when it starts up + c.up("materialized", "test-certs", "sql-server") seed = random.getrandbits(16) + ssl_ca = c.exec( + "sql-server", "cat", "/var/opt/mssql/certs/ca.crt", capture=True + ).stdout + alt_ssl_ca = c.exec( + "sql-server", "cat", "/var/opt/mssql/certs/ca-selective.crt", capture=True + ).stdout + c.test_parts( matching_files, lambda file: c.run_testdrive_files( "--no-reset", "--max-errors=1", f"--seed={seed}", + f"--var=ssl-ca={ssl_ca}", + f"--var=alt-ssl-ca={alt_ssl_ca}", f"--var=default-replica-size={Materialized.Size.DEFAULT_SIZE}-{Materialized.Size.DEFAULT_SIZE}", f"--var=default-sql-server-user={SqlServer.DEFAULT_USER}", f"--var=default-sql-server-password={SqlServer.DEFAULT_SA_PASSWORD}", diff --git a/test/sql-server-cdc/tls-mssconfig.conf b/test/sql-server-cdc/tls-mssconfig.conf new file mode 100644 index 0000000000000..d700b70f7432a --- /dev/null +++ b/test/sql-server-cdc/tls-mssconfig.conf @@ -0,0 +1,14 @@ +# Copyright Materialize, Inc. and contributors. All rights reserved. +# +# Use of this software is governed by the Business Source License +# included in the LICENSE file at the root of this repository. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0. + +[network] +tlscert = /var/opt/mssql/certs/sql-server.crt +tlskey = /var/opt/mssql/certs/sql-server.key +tlsprotocols = 1.2 +forceencryption = 0 diff --git a/test/sqllogictest/attributes/mir_arity.slt b/test/sqllogictest/attributes/mir_arity.slt index b336af76ad0b0..fdb486a378cb3 100644 --- a/test/sqllogictest/attributes/mir_arity.slt +++ b/test/sqllogictest/attributes/mir_arity.slt @@ -96,14 +96,9 @@ EXPLAIN OPTIMIZED PLAN WITH(arity, humanized expressions) AS VERBOSE TEXT FOR SELECT sum(e * f), max(f) FROM v GROUP BY mod(e, 5) ---- Explained Query: - Project (#3{sum}, #1{max_f}) // { arity: 2 } - Join on=(#0 = #2) type=differential // { arity: 4 } - ArrangeBy keys=[[#0]] // { arity: 2 } - Reduce group_by=[(#0{e} % 5)] aggregates=[max(#1{f})] // { arity: 2 } - ReadStorage materialize.public.v // { arity: 2 } - ArrangeBy keys=[[#0]] // { arity: 2 } - Reduce group_by=[(#0{e} % 5)] aggregates=[sum((#0{e} * #1{f}))] // { arity: 2 } - ReadStorage materialize.public.v // { arity: 2 } + Project (#1{sum}, #2{max_f}) // { arity: 2 } + Reduce group_by=[(#0{e} % 5)] aggregates=[sum((#0{e} * #1{f})), max(#1{f})] // { arity: 3 } + ReadStorage materialize.public.v // { arity: 2 } Source materialize.public.v diff --git a/test/sqllogictest/attributes/mir_column_types.slt b/test/sqllogictest/attributes/mir_column_types.slt index e54de76bb63a7..9d6b6f0492d71 100644 --- a/test/sqllogictest/attributes/mir_column_types.slt +++ b/test/sqllogictest/attributes/mir_column_types.slt @@ -73,16 +73,9 @@ Explained Query: Threshold // { types: "(bigint?, text?, date?)" } Union // { types: "(bigint?, text?, date?)" } Negate // { types: "(bigint?, text?, date?)" } - Project (#3{sum_a}, #1{max_b}, #0{c}) // { types: "(bigint?, text?, date?)" } - Join on=(#0{c} = #2{c}) type=differential // { types: "(date?, text?, date?, bigint?)" } - ArrangeBy keys=[[#0{c}]] // { types: "(date?, text?)" } - Reduce group_by=[#1{c}] aggregates=[max(#0{b})] // { types: "(date?, text?)" } - Project (#1{b}, #2{c}) // { types: "(text?, date?)" } - ReadStorage materialize.public.t // { types: "(integer?, text?, date?)" } - ArrangeBy keys=[[#0{c}]] // { types: "(date?, bigint?)" } - Reduce group_by=[#1{c}] aggregates=[sum(#0{a})] // { types: "(date?, bigint?)" } - Project (#0{a}, #2{c}) // { types: "(integer?, date?)" } - ReadStorage materialize.public.t // { types: "(integer?, text?, date?)" } + Project (#1{sum_a}, #2{max_b}, #0{c}) // { types: "(bigint?, text?, date?)" } + Reduce group_by=[#2{c}] aggregates=[sum(#0{a}), max(#1{b})] // { types: "(date?, bigint?, text?)" } + ReadStorage materialize.public.t // { types: "(integer?, text?, date?)" } Constant // { types: "(bigint, text, date?)" } - (1, "hello", null) @@ -140,41 +133,31 @@ EXPLAIN OPTIMIZED PLAN WITH(types, humanized expressions) AS VERBOSE TEXT FOR (SELECT null::boolean as f1, 10 as f2) EXCEPT (SELECT min(f), count(*) FROM v WHERE (select d::double FROM u) = v.e GROUP BY e LIMIT 1) ---- Explained Query: - With - cte l0 = - Project (#0{e}, #1{f}) // { types: "(double precision, boolean?)" } - Join on=(#0{e} = #2) type=differential // { types: "(double precision, boolean?, double precision)" } - ArrangeBy keys=[[#0{e}]] // { types: "(double precision, boolean?)" } - Filter (#0{e}) IS NOT NULL // { types: "(double precision, boolean?)" } - ReadStorage materialize.public.v // { types: "(double precision?, boolean?)" } - ArrangeBy keys=[[#0]] // { types: "(double precision?)" } - Union // { types: "(double precision?)" } - Project (#1) // { types: "(double precision?)" } - Filter (#0{d}) IS NOT NULL // { types: "(integer, double precision?)" } - Map (integer_to_double(#0{d})) // { types: "(integer?, double precision?)" } - ReadStorage materialize.public.u // { types: "(integer?)" } - Map (error("more than one record produced in subquery")) // { types: "(double precision)" } - Project () // { types: "()" } - Filter (#0{count} > 1) // { types: "(bigint)" } - Reduce aggregates=[count(*)] // { types: "(bigint)" } - Project () // { types: "()" } - ReadStorage materialize.public.u // { types: "(integer?)" } - Return // { types: "(boolean?, bigint)" } - Threshold // { types: "(boolean?, bigint)" } - Union // { types: "(boolean?, bigint)" } - Negate // { types: "(boolean?, bigint)" } - TopK limit=1 // { types: "(boolean?, bigint)" } - Project (#1{min_f}, #3{count}) // { types: "(boolean?, bigint)" } - Join on=(#0{e} = #2{e}) type=differential // { types: "(double precision, boolean?, double precision, bigint)" } - ArrangeBy keys=[[#0{e}]] // { types: "(double precision, boolean?)" } - Reduce group_by=[#0{e}] aggregates=[min(#1{f})] // { types: "(double precision, boolean?)" } - Get l0 // { types: "(double precision, boolean?)" } - ArrangeBy keys=[[#0{e}]] // { types: "(double precision, bigint)" } - Reduce group_by=[#0{e}] aggregates=[count(*)] // { types: "(double precision, bigint)" } - Project (#0{e}) // { types: "(double precision)" } - Get l0 // { types: "(double precision, boolean?)" } - Constant // { types: "(boolean?, bigint)" } - - (null, 10) + Threshold // { types: "(boolean?, bigint)" } + Union // { types: "(boolean?, bigint)" } + Negate // { types: "(boolean?, bigint)" } + TopK limit=1 // { types: "(boolean?, bigint)" } + Project (#1{min_f}, #2{count}) // { types: "(boolean?, bigint)" } + Reduce group_by=[#0{e}] aggregates=[min(#1{f}), count(*)] // { types: "(double precision, boolean?, bigint)" } + Project (#0{e}, #1{f}) // { types: "(double precision, boolean?)" } + Join on=(#0{e} = #2) type=differential // { types: "(double precision, boolean?, double precision)" } + ArrangeBy keys=[[#0{e}]] // { types: "(double precision, boolean?)" } + Filter (#0{e}) IS NOT NULL // { types: "(double precision, boolean?)" } + ReadStorage materialize.public.v // { types: "(double precision?, boolean?)" } + ArrangeBy keys=[[#0]] // { types: "(double precision?)" } + Union // { types: "(double precision?)" } + Project (#1) // { types: "(double precision?)" } + Filter (#0{d}) IS NOT NULL // { types: "(integer, double precision?)" } + Map (integer_to_double(#0{d})) // { types: "(integer?, double precision?)" } + ReadStorage materialize.public.u // { types: "(integer?)" } + Map (error("more than one record produced in subquery")) // { types: "(double precision)" } + Project () // { types: "()" } + Filter (#0{count} > 1) // { types: "(bigint)" } + Reduce aggregates=[count(*)] // { types: "(bigint)" } + Project () // { types: "()" } + ReadStorage materialize.public.u // { types: "(integer?)" } + Constant // { types: "(boolean?, bigint)" } + - (null, 10) Source materialize.public.u Source materialize.public.v diff --git a/test/sqllogictest/autogenerated/all_parts_essential.slt b/test/sqllogictest/autogenerated/all_parts_essential.slt index 76a788d722c7a..737aba165f1f0 100644 --- a/test/sqllogictest/autogenerated/all_parts_essential.slt +++ b/test/sqllogictest/autogenerated/all_parts_essential.slt @@ -306,8 +306,8 @@ GROUP BY 1, 2 ---- Explained Query: - With - cte l0 = + Project (#0{l_partkey}..=#4{min_l_orderkey}, #3{min_c_nationkey}) // { arity: 6 } + Reduce group_by=[#1{l_partkey}, #2{c_nationkey}] aggregates=[count(*), min(#2{c_nationkey}), min(#0{l_orderkey})] // { arity: 5 } Project (#0{l_orderkey}, #1{l_partkey}, #28{c_nationkey}) // { arity: 3 } Filter (#10{l_shipdate} <= 1995-11-14) AND (#30{c_acctbal} <= 12907776) AND (#10{l_shipdate} >= 1995-04-19) AND (#30{c_acctbal} >= #5{l_extendedprice}) // { arity: 33 } Join on=(#0{l_orderkey} = #16{o_orderkey} AND #17{o_custkey} = #25{c_custkey}) type=delta // { arity: 33 } @@ -321,18 +321,6 @@ Explained Query: ReadIndex on=orders pk_orders_orderkey=[delta join lookup] fk_orders_custkey=[delta join lookup] // { arity: 9 } ArrangeBy keys=[[#0{c_custkey}]] // { arity: 8 } ReadIndex on=customer pk_customer_custkey=[delta join lookup] // { arity: 8 } - Return // { arity: 6 } - Project (#0{l_partkey}, #1{c_nationkey}, #6{count}, #2{min_c_nationkey}, #3{min_l_orderkey}, #2{min_c_nationkey}) // { arity: 6 } - Join on=(#0{l_partkey} = #4{l_partkey} AND #1{c_nationkey} = #5{c_nationkey}) type=differential // { arity: 7 } - implementation - %0[#0, #1]UKKA » %1[#0, #1]UKKA - ArrangeBy keys=[[#0{l_partkey}, #1{c_nationkey}]] // { arity: 4 } - Reduce group_by=[#1{l_partkey}, #2{c_nationkey}] aggregates=[min(#2{c_nationkey}), min(#0{l_orderkey})] // { arity: 4 } - Get l0 // { arity: 3 } - ArrangeBy keys=[[#0{l_partkey}, #1{c_nationkey}]] // { arity: 3 } - Reduce group_by=[#0{l_partkey}, #1{c_nationkey}] aggregates=[count(*)] // { arity: 3 } - Project (#1{l_partkey}, #2{c_nationkey}) // { arity: 2 } - Get l0 // { arity: 3 } Used Indexes: - materialize.public.pk_customer_custkey (delta join lookup) @@ -622,8 +610,8 @@ GROUP BY 1, 2 ---- Explained Query: - With - cte l0 = + Project (#0{o_custkey}, #0{o_custkey}..=#2{count}) // { arity: 4 } + Reduce group_by=[#1{o_custkey}] aggregates=[min(#0{o_orderkey}), count(*)] // { arity: 3 } Project (#2{o_orderkey}, #3{o_custkey}) // { arity: 2 } Filter (#1{l_shipdate} < #5{o_orderdate}) AND (#11{c_acctbal} > #4{o_totalprice}) AND (#11{c_acctbal} >= #0{l_extendedprice}) // { arity: 14 } Join on=(#3{o_custkey} = #6{c_custkey}) type=delta // { arity: 14 } @@ -640,18 +628,6 @@ Explained Query: ReadIndex on=orders pk_orders_orderkey=[*** full scan ***] // { arity: 9 } ArrangeBy keys=[[#0{c_custkey}]] // { arity: 8 } ReadIndex on=customer pk_customer_custkey=[delta join lookup] // { arity: 8 } - Return // { arity: 4 } - Project (#0{o_custkey}, #0{o_custkey}, #1{min_o_orderkey}, #3{count}) // { arity: 4 } - Join on=(#0{o_custkey} = #2{o_custkey}) type=differential // { arity: 4 } - implementation - %0[#0]UKA » %1[#0]UKA - ArrangeBy keys=[[#0{o_custkey}]] // { arity: 2 } - Reduce group_by=[#1{o_custkey}] aggregates=[min(#0{o_orderkey})] // { arity: 2 } - Get l0 // { arity: 2 } - ArrangeBy keys=[[#0{o_custkey}]] // { arity: 2 } - Reduce group_by=[#0{o_custkey}] aggregates=[count(*)] // { arity: 2 } - Project (#1{o_custkey}) // { arity: 1 } - Get l0 // { arity: 2 } Used Indexes: - materialize.public.pk_customer_custkey (delta join lookup) diff --git a/test/sqllogictest/cockroach/rows_from.slt b/test/sqllogictest/cockroach/rows_from.slt index d48087fdae5a9..8683df9854d7f 100644 --- a/test/sqllogictest/cockroach/rows_from.slt +++ b/test/sqllogictest/cockroach/rows_from.slt @@ -17,29 +17,26 @@ # 2.0 license, a copy of which can be found in the LICENSE file at the # root of this repository. -# not supported yet -halt - mode cockroach query II colnames SELECT * FROM ROWS FROM (generate_series(1,2), generate_series(4,8)) ---- -generate_series generate_series -1 4 -2 5 -NULL 6 -NULL 7 -NULL 8 +generate_series generate_series +NULL 6 +NULL 7 +NULL 8 +1 4 +2 5 query II colnames SELECT * FROM ROWS FROM (generate_series(1,4), generate_series(4,5)) ---- -generate_series generate_series -1 4 -2 5 -3 NULL -4 NULL +generate_series generate_series +3 NULL +4 NULL +1 4 +2 5 query II colnames SELECT * FROM ROWS FROM (generate_series(1,0), generate_series(1,0)) @@ -52,45 +49,46 @@ SELECT * FROM ROWS FROM (generate_series(1,0), generate_series(1,1)) generate_series generate_series NULL 1 -query II colnames -SELECT * FROM ROWS FROM (generate_series(1,2), greatest(1,2,3,4)) ----- -generate_series greatest -1 4 -2 NULL +## TODO: In Materialize, `TabletizedScalar` somehow doesn't work with `greatest`. This works in both Postgres and +## Cockroach. +#query II colnames +#SELECT * FROM ROWS FROM (generate_series(1,2), greatest(1,2,3,4)) +#---- +#generate_series greatest +#1 4 +#2 NULL query IT colnames -SELECT * FROM ROWS FROM (generate_series(1,2), current_user) +SELECT * FROM ROWS FROM (generate_series(1,2), current_user()) ---- -generate_series current_user -1 root -2 NULL +generate_series current_user +2 NULL +1 materialize query TI colnames -SELECT * FROM ROWS FROM (current_user, generate_series(1,2)) +SELECT * FROM ROWS FROM (current_user(), generate_series(1,2)) ---- current_user generate_series -root 1 -NULL 2 +NULL 2 +materialize 1 query TT colnames -SELECT * FROM ROWS FROM (current_user, current_user) +SELECT * FROM ROWS FROM (current_user(), current_user()) ---- current_user current_user -root root +materialize materialize query III colnames SELECT * FROM ROWS FROM (information_schema._pg_expandarray(array[4,5,6]), generate_series(10,15)); ---- -x n generate_series -4 1 10 -5 2 11 -6 3 12 +x n generate_series NULL NULL 13 NULL NULL 14 NULL NULL 15 +4 1 10 +5 2 11 +6 3 12 -# Regression test for materialize#27389. - -statement error pg_get_keywords\(\): set-returning functions must appear at the top level of FROM -SELECT * FROM ROWS FROM(generate_series(length((pg_get_keywords()).word),10)); +## No `pg_get_keywords` in Materialize +#statement error pg_get_keywords\(\): set-returning functions must appear at the top level of FROM +#SELECT * FROM ROWS FROM(generate_series(length((pg_get_keywords()).word),10)); diff --git a/test/sqllogictest/current_user.slt b/test/sqllogictest/current_user.slt index ac4b1dad94d9a..1b55fa8e9fbac 100644 --- a/test/sqllogictest/current_user.slt +++ b/test/sqllogictest/current_user.slt @@ -14,11 +14,6 @@ SELECT current_user() ---- materialize -query T -SELECT current_user = current_user() ----- -true - query T SELECT current_role() = current_user() ---- diff --git a/test/sqllogictest/distinct_arrangements.slt b/test/sqllogictest/distinct_arrangements.slt index 1c3c0798d987e..0e1d705644b0e 100644 --- a/test/sqllogictest/distinct_arrangements.slt +++ b/test/sqllogictest/distinct_arrangements.slt @@ -375,6 +375,7 @@ query T SELECT mdo.name FROM mz_introspection.mz_arrangement_sharing mash JOIN mz_introspection.mz_dataflow_operators mdo ON mash.operator_id = mdo.id ORDER BY mdo.name; ---- AccumulableErrorCheck +Arrange ReduceCollation Arrange ReduceMinsMaxes Arrange ReduceMinsMaxes Arrange ReduceMinsMaxes @@ -410,6 +411,8 @@ Distinct recursive err DistinctBy DistinctByErrorCheck ReduceAccumulable +ReduceCollation +ReduceCollation Errors ReduceMinsMaxes ReduceMinsMaxes ReduceMinsMaxes @@ -861,8 +864,10 @@ SELECT mdo.name FROM mz_introspection.mz_arrangement_sharing mash JOIN mz_intros AccumulableErrorCheck AccumulableErrorCheck AccumulableErrorCheck +Arrange ReduceCollation Arrange ReduceMinsMaxes Arrange ReduceMinsMaxes +Arrange bundle err ArrangeAccumulable [val: empty] ArrangeAccumulable [val: empty] ArrangeAccumulable [val: empty] @@ -871,8 +876,6 @@ ArrangeBy[[CallUnary(CastInt32ToInt64(CastInt32ToInt64), Column(0))]] ArrangeBy[[Column(0)]] ArrangeBy[[Column(0)]] ArrangeBy[[Column(0)]] -ArrangeBy[[Column(0)]] -ArrangeBy[[Column(0)]]-errors ArrangeBy[[Column(0)]]-errors ArrangeBy[[Column(0)]]-errors Arranged Accumulable Distinct [val: empty] @@ -890,6 +893,7 @@ Arranged MinsMaxesHierarchical input Arranged MinsMaxesHierarchical input Arranged MinsMaxesHierarchical input Arranged MinsMaxesHierarchical input +Arranged ReduceFuseBasic input Arranged ReduceInaccumulable Arranged ReduceInaccumulable Arranged TopK input @@ -903,10 +907,12 @@ Arranged TopK input ReduceAccumulable ReduceAccumulable ReduceAccumulable +ReduceCollation +ReduceCollation Errors +ReduceFuseBasic ReduceInaccumulable ReduceInaccumulable ReduceInaccumulable Error Check -ReduceInaccumulable Error Check ReduceMinsMaxes ReduceMinsMaxes Reduced Accumulable Distinct [val: empty] @@ -978,12 +984,13 @@ WHERE mdod.dataflow_name NOT LIKE '%introspection-subscribe%' GROUP BY mdod.name ORDER BY mdod.name; ---- -AccumulableErrorCheck 10 +AccumulableErrorCheck 11 +Arrange␠ReduceCollation 1 Arrange␠ReduceMinsMaxes 3 Arrange␠export␠iterative 2 Arrange␠export␠iterative␠err 2 Arrange␠recursive␠err 3 -ArrangeAccumulable␠[val:␠empty] 10 +ArrangeAccumulable␠[val:␠empty] 11 ArrangeBy[[CallBinary(JsonbGetStringStringify,␠Column(1),␠Literal(Ok(Row{[String("id")]}),␠ColumnType␠{␠scalar_type:␠String,␠nullable:␠false␠}))]] 2 ArrangeBy[[CallBinary(JsonbGetStringStringify,␠Column(2),␠Literal(Ok(Row{[String("id")]}),␠ColumnType␠{␠scalar_type:␠String,␠nullable:␠false␠}))]] 1 ArrangeBy[[CallVariadic(Coalesce,␠[Column(2),␠Column(3)])]] 2 @@ -992,6 +999,7 @@ ArrangeBy[[Column(0),␠CallUnary(CastUint64ToNumeric(CastUint64ToNumeric(None)) ArrangeBy[[Column(0),␠CallUnary(Lower(Lower),␠Column(1))]] 1 ArrangeBy[[Column(0),␠CallUnary(Lower(Lower),␠Column(2))]] 1 ArrangeBy[[Column(0),␠Column(1),␠Column(2),␠Column(3),␠Column(4),␠Column(5),␠Column(6),␠Column(7),␠Column(8),␠Column(9),␠Column(10),␠Column(11),␠Column(12),␠Column(13),␠Column(14),␠Column(15),␠Column(16),␠Column(17),␠Column(18),␠Column(19),␠Column(20),␠Column(21),␠Column(22),␠Column(23),␠Column(24),␠Column(25),␠Column(26),␠Column(27)]] 2 +ArrangeBy[[Column(0),␠Column(1),␠Column(2),␠Column(3),␠Column(4),␠Column(5),␠Column(6),␠Column(7),␠Column(8),␠Column(9),␠Column(10),␠Column(11),␠Column(12),␠Column(13),␠Column(14),␠Column(15),␠Column(16),␠Column(17),␠Column(18),␠Column(19)]] 2 ArrangeBy[[Column(0),␠Column(1),␠Column(2),␠Column(3),␠Column(4),␠Column(5),␠Column(6),␠Column(7),␠Column(8),␠Column(9),␠Column(10),␠Column(11),␠Column(12),␠Column(13)]] 1 ArrangeBy[[Column(0),␠Column(1),␠Column(2),␠Column(3),␠Column(4),␠Column(5),␠Column(6),␠Column(7),␠Column(8),␠Column(9),␠Column(10),␠Column(11),␠Column(12),␠Column(13)]]-errors 1 ArrangeBy[[Column(0),␠Column(1),␠Column(2),␠Column(3),␠Column(4),␠Column(5)]] 1 @@ -1002,7 +1010,7 @@ ArrangeBy[[Column(0),␠Column(1)]] 2 ArrangeBy[[Column(0),␠Column(2)]] 4 ArrangeBy[[Column(0),␠Column(3)]] 4 ArrangeBy[[Column(0),␠Column(4)]] 1 -ArrangeBy[[Column(0)]] 155 +ArrangeBy[[Column(0)]] 157 ArrangeBy[[Column(0)]]-errors 44 ArrangeBy[[Column(1),␠Column(0)]] 1 ArrangeBy[[Column(1),␠Column(2)]] 2 @@ -1018,21 +1026,24 @@ ArrangeBy[[Column(21)]] 1 ArrangeBy[[Column(21)]]-errors 1 ArrangeBy[[Column(3)]] 6 ArrangeBy[[Column(3)]]-errors 2 -ArrangeBy[[Column(4)]] 5 +ArrangeBy[[Column(4)]] 4 ArrangeBy[[Column(4)]]-errors 2 -ArrangeBy[[Column(5)]] 2 -ArrangeBy[[Column(6)]] 3 +ArrangeBy[[Column(5)]] 3 +ArrangeBy[[Column(6)]] 2 ArrangeBy[[Column(6)]]-errors 2 +ArrangeBy[[Column(7)]] 1 ArrangeBy[[Column(9)]] 1 ArrangeBy[[]] 11 -Arranged␠DistinctBy 47 +Arranged␠DistinctBy 49 Arranged␠MinsMaxesHierarchical␠input 14 Arranged␠ReduceInaccumulable 3 Arranged␠TopK␠input 92 Distinct␠recursive␠err 3 -DistinctBy 47 -DistinctByErrorCheck 47 -ReduceAccumulable 10 +DistinctBy 49 +DistinctByErrorCheck 49 +ReduceAccumulable 11 +ReduceCollation 1 +ReduceCollation␠Errors 1 ReduceInaccumulable 3 ReduceInaccumulable␠Error␠Check 3 ReduceMinsMaxes 3 @@ -1141,9 +1152,12 @@ GROUP BY mdod.name ORDER BY mdod.name ---- AccumulableErrorCheck 2 +Arrange␠ReduceCollation 1 Arrange␠ReduceMinsMaxes 1 ArrangeAccumulable␠[val:␠empty] 2 ReduceAccumulable 2 +ReduceCollation 1 +ReduceCollation␠Errors 1 ReduceMinsMaxes 1 ReduceMinsMaxes␠Error␠Check 1 diff --git a/test/sqllogictest/explain/aggregates.slt b/test/sqllogictest/explain/aggregates.slt index abfd3343587de..4dcc2fc3cdebe 100644 --- a/test/sqllogictest/explain/aggregates.slt +++ b/test/sqllogictest/explain/aggregates.slt @@ -37,16 +37,8 @@ query T multiline EXPLAIN OPTIMIZED PLAN WITH (humanized expressions) AS VERBOSE TEXT FOR SELECT a, array_agg(b), array_agg(c) FROM t GROUP BY a; ---- Explained Query: - Project (#0{a}, #1{array_agg}, #3{array_agg}) - Join on=(#0{a} = #2{a}) type=differential - ArrangeBy keys=[[#0{a}]] - Reduce group_by=[#0{a}] aggregates=[array_agg[order_by=[]](row(array[#1{b}]))] - Project (#0{a}, #1{b}) - ReadStorage materialize.public.t - ArrangeBy keys=[[#0{a}]] - Reduce group_by=[#0{a}] aggregates=[array_agg[order_by=[]](row(array[#1{c}]))] - Project (#0{a}, #2{c}) - ReadStorage materialize.public.t + Reduce group_by=[#0{a}] aggregates=[array_agg[order_by=[]](row(array[#1{b}])), array_agg[order_by=[]](row(array[#2{c}]))] + ReadStorage materialize.public.t Source materialize.public.t @@ -58,16 +50,8 @@ query T multiline EXPLAIN OPTIMIZED PLAN WITH (humanized expressions) AS VERBOSE TEXT FOR SELECT a, array_agg(b), string_agg(c, ',') FROM t GROUP BY a; ---- Explained Query: - Project (#0{a}, #1{array_agg}, #3{string_agg}) - Join on=(#0{a} = #2{a}) type=differential - ArrangeBy keys=[[#0{a}]] - Reduce group_by=[#0{a}] aggregates=[array_agg[order_by=[]](row(array[#1{b}]))] - Project (#0{a}, #1{b}) - ReadStorage materialize.public.t - ArrangeBy keys=[[#0{a}]] - Reduce group_by=[#0{a}] aggregates=[string_agg[order_by=[]](row(row(#1{c}, ",")))] - Project (#0{a}, #2{c}) - ReadStorage materialize.public.t + Reduce group_by=[#0{a}] aggregates=[array_agg[order_by=[]](row(array[#1{b}])), string_agg[order_by=[]](row(row(#2{c}, ",")))] + ReadStorage materialize.public.t Source materialize.public.t @@ -79,15 +63,8 @@ query T multiline EXPLAIN OPTIMIZED PLAN WITH (humanized expressions) AS VERBOSE TEXT FOR SELECT a, array_agg(b), string_agg(c, ',' ORDER BY b DESC) FROM t GROUP BY a; ---- Explained Query: - Project (#0{a}, #1{array_agg}, #3{string_agg}) - Join on=(#0{a} = #2{a}) type=differential - ArrangeBy keys=[[#0{a}]] - Reduce group_by=[#0{a}] aggregates=[array_agg[order_by=[]](row(array[#1{b}]))] - Project (#0{a}, #1{b}) - ReadStorage materialize.public.t - ArrangeBy keys=[[#0{a}]] - Reduce group_by=[#0{a}] aggregates=[string_agg[order_by=[#0{a} desc nulls_first]](row(row(#2{c}, ","), #1{b}))] - ReadStorage materialize.public.t + Reduce group_by=[#0{a}] aggregates=[array_agg[order_by=[]](row(array[#1{b}])), string_agg[order_by=[#0{a} desc nulls_first]](row(row(#2{c}, ","), #1{b}))] + ReadStorage materialize.public.t Source materialize.public.t @@ -99,21 +76,9 @@ query T multiline EXPLAIN OPTIMIZED PLAN WITH (humanized expressions) AS VERBOSE TEXT FOR SELECT a, array_agg(b), max(c) FROM t WHERE c <> 'x' GROUP BY a; ---- Explained Query: - With - cte l0 = - Filter (#2{c} != "x") - ReadStorage materialize.public.t - Return - Project (#0{a}, #3{array_agg}, #1{max_c}) - Join on=(#0{a} = #2{a}) type=differential - ArrangeBy keys=[[#0{a}]] - Reduce group_by=[#0{a}] aggregates=[max(#1{c})] - Project (#0{a}, #2{c}) - Get l0 - ArrangeBy keys=[[#0{a}]] - Reduce group_by=[#0{a}] aggregates=[array_agg[order_by=[]](row(array[#1{b}]))] - Project (#0{a}, #1{b}) - Get l0 + Reduce group_by=[#0{a}] aggregates=[array_agg[order_by=[]](row(array[#1{b}])), max(#2{c})] + Filter (#2{c} != "x") + ReadStorage materialize.public.t Source materialize.public.t filter=((#2{c} != "x")) @@ -126,24 +91,11 @@ query T multiline EXPLAIN OPTIMIZED PLAN WITH (humanized expressions) AS VERBOSE TEXT FOR SELECT a, array_agg(b), max(b) FROM t GROUP BY a HAVING count(a) > 1; ---- Explained Query: - With - cte l0 = - Project (#0{a}, #1{b}) - ReadStorage materialize.public.t - Return - Project (#0{a}, #5{array_agg}, #1{max_b}) - Filter (#3{count} > 1) - Join on=(#0{a} = #2{a} = #4{a}) type=delta - ArrangeBy keys=[[#0{a}]] - Reduce group_by=[#0{a}] aggregates=[max(#1{b})] - Get l0 - ArrangeBy keys=[[#0{a}]] - Reduce group_by=[#0{a}] aggregates=[count(*)] - Project (#0{a}) - ReadStorage materialize.public.t - ArrangeBy keys=[[#0{a}]] - Reduce group_by=[#0{a}] aggregates=[array_agg[order_by=[]](row(array[#1{b}]))] - Get l0 + Project (#0{a}..=#2{max_b}) + Filter (#3{count} > 1) + Reduce group_by=[#0{a}] aggregates=[array_agg[order_by=[]](row(array[#1{b}])), max(#1{b}), count(*)] + Project (#0{a}, #1{b}) + ReadStorage materialize.public.t Source materialize.public.t @@ -169,19 +121,9 @@ query T multiline EXPLAIN OPTIMIZED PLAN WITH (humanized expressions) AS VERBOSE TEXT FOR SELECT a, array_agg(b ORDER BY b ASC), array_agg(b ORDER BY b DESC) FROM t GROUP BY a; ---- Explained Query: - With - cte l0 = - Project (#0{a}, #1{b}) - ReadStorage materialize.public.t - Return - Project (#0{a}, #1{array_agg}, #3{array_agg}) - Join on=(#0{a} = #2{a}) type=differential - ArrangeBy keys=[[#0{a}]] - Reduce group_by=[#0{a}] aggregates=[array_agg[order_by=[#0{a} asc nulls_last]](row(array[#1{b}], #1{b}))] - Get l0 - ArrangeBy keys=[[#0{a}]] - Reduce group_by=[#0{a}] aggregates=[array_agg[order_by=[#0{a} desc nulls_first]](row(array[#1{b}], #1{b}))] - Get l0 + Reduce group_by=[#0{a}] aggregates=[array_agg[order_by=[#0{a} asc nulls_last]](row(array[#1{b}], #1{b})), array_agg[order_by=[#0{a} desc nulls_first]](row(array[#1{b}], #1{b}))] + Project (#0{a}, #1{b}) + ReadStorage materialize.public.t Source materialize.public.t @@ -195,31 +137,19 @@ EXPLAIN OPTIMIZED PLAN WITH (humanized expressions) AS VERBOSE TEXT FOR SELECT a Explained Query: With cte l0 = - Project (#1{b}) - ReadStorage materialize.public.t - cte l1 = - CrossJoin type=delta - ArrangeBy keys=[[]] - Reduce aggregates=[sum(1)] - Project () - ReadStorage materialize.public.t - ArrangeBy keys=[[]] - Reduce aggregates=[array_agg[order_by=[#0{b} asc nulls_last]](row(array[#0{b}], #0{b}))] - Get l0 - ArrangeBy keys=[[]] - Reduce aggregates=[array_agg[order_by=[#0{b} desc nulls_first]](row(array[#0{b}], #0{b}))] - Get l0 + Reduce aggregates=[array_agg[order_by=[#0{b} asc nulls_last]](row(array[#0{b}], #0{b})), array_agg[order_by=[#0{b} desc nulls_first]](row(array[#0{b}], #0{b})), sum(1)] + Project (#1{b}) + ReadStorage materialize.public.t Return Project (#0{array_agg}, #1{array_agg}, #3) Map ((#2{sum} > 0)) Union - Project (#1{array_agg}, #2{array_agg}, #0{sum}) - Get l1 + Get l0 Map (null, null, null) Union Negate Project () - Get l1 + Get l0 Constant - () @@ -262,29 +192,18 @@ EXPLAIN OPTIMIZED PLAN WITH (humanized expressions) AS VERBOSE TEXT FOR SELECT s Explained Query: With cte l0 = - Project (#1{b}) - ReadStorage materialize.public.t - cte l1 = - CrossJoin type=delta - ArrangeBy keys=[[]] - Reduce aggregates=[sum(#0{a})] - Project (#0{a}) - ReadStorage materialize.public.t - ArrangeBy keys=[[]] - Reduce aggregates=[jsonb_agg[order_by=[]](row(jsonbable_to_jsonb(#0{b})))] - Get l0 - ArrangeBy keys=[[]] - Reduce aggregates=[array_agg[order_by=[]](row(array[#0{b}]))] - Get l0 + Reduce aggregates=[sum(#0{a}), jsonb_agg[order_by=[]](row(jsonbable_to_jsonb(#1{b}))), array_agg[order_by=[]](row(array[#1{b}]))] + Project (#0{a}, #1{b}) + ReadStorage materialize.public.t Return Project (#0{sum_a}..=#2{array_agg}, #2{array_agg}) Union - Get l1 + Get l0 Map (null, null, null) Union Negate Project () - Get l1 + Get l0 Constant - () @@ -298,19 +217,11 @@ query T multiline EXPLAIN OPTIMIZED PLAN WITH (humanized expressions) AS VERBOSE TEXT FOR SELECT a, array_agg(b ORDER BY b) FROM t GROUP BY a HAVING array_agg(b ORDER BY b) = array_agg(b ORDER BY b DESC); ---- Explained Query: - With - cte l0 = - Project (#0{a}, #1{b}) - ReadStorage materialize.public.t - Return - Project (#0{a}, #1{array_agg}) - Join on=(#0{a} = #2{a} AND #1{array_agg} = #3{array_agg}) type=differential - ArrangeBy keys=[[#0{a}, #1{array_agg}]] - Reduce group_by=[#0{a}] aggregates=[array_agg[order_by=[#0{a} asc nulls_last]](row(array[#1{b}], #1{b}))] - Get l0 - ArrangeBy keys=[[#0{a}, #1{array_agg}]] - Reduce group_by=[#0{a}] aggregates=[array_agg[order_by=[#0{a} desc nulls_first]](row(array[#1{b}], #1{b}))] - Get l0 + Project (#0{a}, #1{array_agg}) + Filter (#1{array_agg} = #2{array_agg}) + Reduce group_by=[#0{a}] aggregates=[array_agg[order_by=[#0{a} asc nulls_last]](row(array[#1{b}], #1{b})), array_agg[order_by=[#0{a} desc nulls_first]](row(array[#1{b}], #1{b}))] + Project (#0{a}, #1{b}) + ReadStorage materialize.public.t Source materialize.public.t @@ -323,19 +234,9 @@ EXPLAIN OPTIMIZED PLAN WITH (humanized expressions) AS VERBOSE TEXT FOR SELECT a ---- Explained Query: - With - cte l0 = - Project (#0{a}, #1{b}) - ReadStorage materialize.public.t - Return - Project (#0{a}, #3{array_agg}, #1{array_agg}) - Join on=(#0{a} = #2{a}) type=differential - ArrangeBy keys=[[#0{a}]] - Reduce group_by=[#0{a}] aggregates=[array_agg[order_by=[]](row(array[digest(text_to_bytea(#1{b}), "sha256")]))] - Get l0 - ArrangeBy keys=[[#0{a}]] - Reduce group_by=[#0{a}] aggregates=[array_agg[order_by=[]](row(array[#1{b}]))] - Get l0 + Reduce group_by=[#0{a}] aggregates=[array_agg[order_by=[]](row(array[#1{b}])), array_agg[order_by=[]](row(array[digest(text_to_bytea(#1{b}), "sha256")]))] + Project (#0{a}, #1{b}) + ReadStorage materialize.public.t Source materialize.public.t @@ -348,19 +249,9 @@ query T multiline EXPLAIN OPTIMIZED PLAN WITH (humanized expressions) AS VERBOSE TEXT FOR SELECT a, array_agg(b), array_agg(CASE WHEN a = 1 THEN 'ooo' ELSE b END) FROM t GROUP BY a; ---- Explained Query: - With - cte l0 = - Project (#0{a}, #1{b}) - ReadStorage materialize.public.t - Return - Project (#0{a}, #1{array_agg}, #3{array_agg}) - Join on=(#0{a} = #2{a}) type=differential - ArrangeBy keys=[[#0{a}]] - Reduce group_by=[#0{a}] aggregates=[array_agg[order_by=[]](row(array[#1{b}]))] - Get l0 - ArrangeBy keys=[[#0{a}]] - Reduce group_by=[#0{a}] aggregates=[array_agg[order_by=[]](row(array[case when (#0{a} = 1) then "ooo" else #1{b} end]))] - Get l0 + Reduce group_by=[#0{a}] aggregates=[array_agg[order_by=[]](row(array[#1{b}])), array_agg[order_by=[]](row(array[case when (#0{a} = 1) then "ooo" else #1{b} end]))] + Project (#0{a}, #1{b}) + ReadStorage materialize.public.t Source materialize.public.t diff --git a/test/sqllogictest/explain/physical_plan_aggregates.slt b/test/sqllogictest/explain/physical_plan_aggregates.slt index 9d0af2a9b0dc4..b2778054bc5c6 100644 --- a/test/sqllogictest/explain/physical_plan_aggregates.slt +++ b/test/sqllogictest/explain/physical_plan_aggregates.slt @@ -44,31 +44,18 @@ query T multiline EXPLAIN PHYSICAL PLAN AS VERBOSE TEXT FOR SELECT a, array_agg(b), array_agg(c) FROM t GROUP BY a; ---- Explained Query: - Join::Linear - linear_stage[0] - lookup={ relation=1, key=[#0] } - stream={ key=[#0], thinning=(#1) } - source={ relation=0, key=[#0] } - Reduce::Basic - aggr=(0, array_agg[order_by=[]](row(array[#1{b}]))) - val_plan - project=(#2) - map=(row(array[#1{b}])) - key_plan - project=(#0) - Get::Collection materialize.public.t - project=(#0, #1) - raw=true - Reduce::Basic - aggr=(0, array_agg[order_by=[]](row(array[#1{c}]))) - val_plan - project=(#2) - map=(row(array[#1{c}])) - key_plan - project=(#0) - Get::Collection materialize.public.t - project=(#0, #2) - raw=true + Reduce::Basic + aggrs[0]=(0, array_agg[order_by=[]](row(array[#1{b}]))) + aggrs[1]=(1, array_agg[order_by=[]](row(array[#2{c}]))) + val_plan + project=(#3, #4) + map=(row(array[#1{b}]), row(array[#2{c}])) + key_plan + project=(#0) + Get::PassArrangements materialize.public.t + raw=true + +Source materialize.public.t Target cluster: quickstart @@ -78,31 +65,18 @@ query T multiline EXPLAIN PHYSICAL PLAN AS VERBOSE TEXT FOR SELECT a, array_agg(b), string_agg(c, ',') FROM t GROUP BY a; ---- Explained Query: - Join::Linear - linear_stage[0] - lookup={ relation=1, key=[#0] } - stream={ key=[#0], thinning=(#1) } - source={ relation=0, key=[#0] } - Reduce::Basic - aggr=(0, array_agg[order_by=[]](row(array[#1{b}]))) - val_plan - project=(#2) - map=(row(array[#1{b}])) - key_plan - project=(#0) - Get::Collection materialize.public.t - project=(#0, #1) - raw=true - Reduce::Basic - aggr=(0, string_agg[order_by=[]](row(row(#1{c}, ",")))) - val_plan - project=(#2) - map=(row(row(#1{c}, ","))) - key_plan - project=(#0) - Get::Collection materialize.public.t - project=(#0, #2) - raw=true + Reduce::Basic + aggrs[0]=(0, array_agg[order_by=[]](row(array[#1{b}]))) + aggrs[1]=(1, string_agg[order_by=[]](row(row(#2{c}, ",")))) + val_plan + project=(#3, #4) + map=(row(array[#1{b}]), row(row(#2{c}, ","))) + key_plan + project=(#0) + Get::PassArrangements materialize.public.t + raw=true + +Source materialize.public.t Target cluster: quickstart @@ -112,30 +86,16 @@ query T multiline EXPLAIN PHYSICAL PLAN AS VERBOSE TEXT FOR SELECT a, array_agg(b), string_agg(c, ',' ORDER BY b DESC) FROM t GROUP BY a; ---- Explained Query: - Join::Linear - linear_stage[0] - lookup={ relation=1, key=[#0] } - stream={ key=[#0], thinning=(#1) } - source={ relation=0, key=[#0] } - Reduce::Basic - aggr=(0, array_agg[order_by=[]](row(array[#1{b}]))) - val_plan - project=(#2) - map=(row(array[#1{b}])) - key_plan - project=(#0) - Get::Collection materialize.public.t - project=(#0, #1) - raw=true - Reduce::Basic - aggr=(0, string_agg[order_by=[#0 desc nulls_first]](row(row(#2{c}, ","), #1{b}))) - val_plan - project=(#3) - map=(row(row(#2{c}, ","), #1{b})) - key_plan - project=(#0) - Get::PassArrangements materialize.public.t - raw=true + Reduce::Basic + aggrs[0]=(0, array_agg[order_by=[]](row(array[#1{b}]))) + aggrs[1]=(1, string_agg[order_by=[#0 desc nulls_first]](row(row(#2{c}, ","), #1{b}))) + val_plan + project=(#3, #4) + map=(row(array[#1{b}]), row(row(#2{c}, ","), #1{b})) + key_plan + project=(#0) + Get::PassArrangements materialize.public.t + raw=true Source materialize.public.t @@ -147,40 +107,22 @@ query T multiline EXPLAIN PHYSICAL PLAN AS VERBOSE TEXT FOR SELECT a, array_agg(b), max(c) FROM t WHERE c <> 'x' GROUP BY a; ---- Explained Query: - With - cte l0 = - Get::Collection materialize.public.t - raw=true - Return - Join::Linear - linear_stage[0] - closure - project=(#0, #2, #1) - lookup={ relation=1, key=[#0] } - stream={ key=[#0], thinning=(#1) } - source={ relation=0, key=[#0] } - Reduce::Hierarchical - aggr_funcs=[max] - skips=[0] - monotonic - must_consolidate - val_plan - project=(#1) - key_plan - project=(#0) - Get::Collection l0 - project=(#0, #2) - raw=true - Reduce::Basic - aggr=(0, array_agg[order_by=[]](row(array[#1{b}]))) - val_plan - project=(#2) - map=(row(array[#1{b}])) - key_plan - project=(#0) - Get::Collection l0 - project=(#0, #1) - raw=true + Reduce::Collation + aggregate_types=[b, h] + hierarchical + aggr_funcs=[max] + skips=[1] + monotonic + must_consolidate + basic + aggr=(0, array_agg[order_by=[]](row(array[#1{b}]))) + val_plan + project=(#3, #2) + map=(row(array[#1{b}])) + key_plan + project=(#0) + Get::Collection materialize.public.t + raw=true Source materialize.public.t filter=((#2{c} != "x")) @@ -193,80 +135,27 @@ query T multiline EXPLAIN PHYSICAL PLAN AS VERBOSE TEXT FOR SELECT a, array_agg(b), max(b) FROM t GROUP BY a HAVING count(a) > 1; ---- Explained Query: - With - cte l0 = - Get::Collection materialize.public.t - raw=true - Return - Join::Delta - plan_path[0] - delta_stage[1] - closure - project=(#0, #2, #1) - lookup={ relation=2, key=[#0] } - stream={ key=[#0], thinning=(#1) } - delta_stage[0] - closure - project=(#0, #1) - filter=((#2{"?column?"} > 1)) - lookup={ relation=1, key=[#0] } - stream={ key=[#0], thinning=(#1) } - source={ relation=0, key=[#0] } - plan_path[1] - delta_stage[1] - closure - project=(#1, #3, #2) - lookup={ relation=2, key=[#0] } - stream={ key=[#2], thinning=(#0, #1) } - delta_stage[0] - closure - project=(#0, #1, #0) - lookup={ relation=0, key=[#0] } - stream={ key=[#0], thinning=() } - initial_closure - project=(#0) - filter=((#1{"?column?"} > 1)) - source={ relation=1, key=[#0] } - plan_path[2] - delta_stage[1] - lookup={ relation=0, key=[#0] } - stream={ key=[#0], thinning=(#1) } - delta_stage[0] - closure - project=(#0, #1) - filter=((#2{"?column?"} > 1)) - lookup={ relation=1, key=[#0] } - stream={ key=[#0], thinning=(#1) } - source={ relation=2, key=[#0] } - Reduce::Hierarchical - aggr_funcs=[max] - skips=[0] - monotonic - must_consolidate - val_plan - project=(#1) - key_plan - project=(#0) - Get::PassArrangements l0 - raw=true - Reduce::Accumulable - simple_aggrs[0]=(0, 0, count(*)) - val_plan - project=(#1) - map=(true) - key_plan=id - Get::Collection materialize.public.t - project=(#0) - raw=true - Reduce::Basic - aggr=(0, array_agg[order_by=[]](row(array[#1{b}]))) - val_plan - project=(#2) - map=(row(array[#1{b}])) - key_plan - project=(#0) - Get::PassArrangements l0 - raw=true + Reduce::Collation + aggregate_types=[b, h, a] + accumulable + simple_aggrs[0]=(0, 2, count(*)) + hierarchical + aggr_funcs=[max] + skips=[1] + monotonic + must_consolidate + basic + aggr=(0, array_agg[order_by=[]](row(array[#1{b}]))) + val_plan + project=(#2, #1, #3) + map=(row(array[#1{b}]), true) + key_plan + project=(#0) + mfp_after + project=(#0..=#2) + filter=((#3{"?column?"} > 1)) + Get::Collection materialize.public.t + raw=true Source materialize.public.t project=(#0, #1) @@ -302,34 +191,16 @@ query T multiline EXPLAIN PHYSICAL PLAN AS VERBOSE TEXT FOR SELECT a, array_agg(b ORDER BY b ASC), array_agg(b ORDER BY b DESC) FROM t GROUP BY a; ---- Explained Query: - With - cte l0 = - Get::Collection materialize.public.t - raw=true - Return - Join::Linear - linear_stage[0] - lookup={ relation=1, key=[#0] } - stream={ key=[#0], thinning=(#1) } - source={ relation=0, key=[#0] } - Reduce::Basic - aggr=(0, array_agg[order_by=[#0 asc nulls_last]](row(array[#1{b}], #1{b}))) - val_plan - project=(#2) - map=(row(array[#1{b}], #1{b})) - key_plan - project=(#0) - Get::PassArrangements l0 - raw=true - Reduce::Basic - aggr=(0, array_agg[order_by=[#0 desc nulls_first]](row(array[#1{b}], #1{b}))) - val_plan - project=(#2) - map=(row(array[#1{b}], #1{b})) - key_plan - project=(#0) - Get::PassArrangements l0 - raw=true + Reduce::Basic + aggrs[0]=(0, array_agg[order_by=[#0 asc nulls_last]](row(array[#1{b}], #1{b}))) + aggrs[1]=(1, array_agg[order_by=[#0 desc nulls_first]](row(array[#1{b}], #1{b}))) + val_plan + project=(#2, #2) + map=(row(array[#1{b}], #1{b})) + key_plan + project=(#0) + Get::Collection materialize.public.t + raw=true Source materialize.public.t project=(#0, #1) @@ -344,83 +215,41 @@ EXPLAIN PHYSICAL PLAN AS VERBOSE TEXT FOR SELECT array_agg(b ORDER BY b ASC), ar Explained Query: With cte l0 = - Get::Collection materialize.public.t - raw=true - cte l1 = - Join::Delta - plan_path[0] - delta_stage[1] - lookup={ relation=2, key=[] } - stream={ key=[], thinning=(#0, #1) } - delta_stage[0] - lookup={ relation=1, key=[] } - stream={ key=[], thinning=(#0) } - source={ relation=0, key=[] } - plan_path[1] - delta_stage[1] - lookup={ relation=2, key=[] } - stream={ key=[], thinning=(#0, #1) } - delta_stage[0] - closure - project=(#1, #0) - lookup={ relation=0, key=[] } - stream={ key=[], thinning=(#0) } - source={ relation=1, key=[] } - plan_path[2] - delta_stage[1] - closure - project=(#0, #2, #1) - lookup={ relation=1, key=[] } - stream={ key=[], thinning=(#0, #1) } - delta_stage[0] - closure - project=(#1, #0) - lookup={ relation=0, key=[] } - stream={ key=[], thinning=(#0) } - source={ relation=2, key=[] } - Reduce::Accumulable - simple_aggrs[0]=(0, 0, sum(1)) - val_plan - project=(#0) - map=(1) - key_plan=id - Get::Collection materialize.public.t - project=() - raw=true - Reduce::Basic - aggr=(0, array_agg[order_by=[#0 asc nulls_last]](row(array[#0{b}], #0{b}))) - val_plan - project=(#1) - map=(row(array[#0{b}], #0{b})) - key_plan - project=() - Get::PassArrangements l0 - raw=true - Reduce::Basic - aggr=(0, array_agg[order_by=[#0 desc nulls_first]](row(array[#0{b}], #0{b}))) - val_plan - project=(#1) - map=(row(array[#0{b}], #0{b})) - key_plan - project=() - Get::PassArrangements l0 - raw=true + Reduce::Collation + aggregate_types=[b, b, a] + accumulable + simple_aggrs[0]=(0, 2, sum(1)) + basic + aggrs[0]=(0, array_agg[order_by=[#0 asc nulls_last]](row(array[#0{b}], #0{b}))) + aggrs[1]=(1, array_agg[order_by=[#0 desc nulls_first]](row(array[#0{b}], #0{b}))) + val_plan + project=(#1, #1, #2) + map=(row(array[#0{b}], #0{b}), 1) + key_plan + project=() + Get::Collection materialize.public.t + raw=true Return Mfp project=(#0, #1, #3) map=((#2{"?column?"} > 0)) Union - Get::Collection l1 - project=(#1, #2, #0) + ArrangeBy + input_key=[] raw=true + Get::PassArrangements l0 + raw=false + arrangements[0]={ key=[], permutation=id, thinning=(#0..=#2) } Mfp project=(#0..=#2) map=(null, null, null) Union consolidate_output=true Negate - Get::Collection l1 + Get::Arrangement l0 project=() - raw=true + key= + raw=false + arrangements[0]={ key=[], permutation=id, thinning=(#0..=#2) } Constant - () @@ -485,81 +314,40 @@ EXPLAIN PHYSICAL PLAN AS VERBOSE TEXT FOR SELECT sum(a), jsonb_agg(b), array_agg Explained Query: With cte l0 = - Get::Collection materialize.public.t - project=(#1) - raw=true - cte l1 = - Join::Delta - plan_path[0] - delta_stage[1] - lookup={ relation=2, key=[] } - stream={ key=[], thinning=(#0, #1) } - delta_stage[0] - lookup={ relation=1, key=[] } - stream={ key=[], thinning=(#0) } - source={ relation=0, key=[] } - plan_path[1] - delta_stage[1] - lookup={ relation=2, key=[] } - stream={ key=[], thinning=(#0, #1) } - delta_stage[0] - closure - project=(#1, #0) - lookup={ relation=0, key=[] } - stream={ key=[], thinning=(#0) } - source={ relation=1, key=[] } - plan_path[2] - delta_stage[1] - closure - project=(#0, #2, #1) - lookup={ relation=1, key=[] } - stream={ key=[], thinning=(#0, #1) } - delta_stage[0] - closure - project=(#1, #0) - lookup={ relation=0, key=[] } - stream={ key=[], thinning=(#0) } - source={ relation=2, key=[] } - Reduce::Accumulable + Reduce::Collation + aggregate_types=[a, b, b] + accumulable simple_aggrs[0]=(0, 0, sum(#0{a})) - val_plan=id - key_plan - project=() - Get::Collection materialize.public.t - project=(#0) - raw=true - Reduce::Basic - aggr=(0, jsonb_agg[order_by=[]](row(jsonbable_to_jsonb(#0{b})))) - val_plan - project=(#1) - map=(row(jsonbable_to_jsonb(#0{b}))) - key_plan - project=() - Get::PassArrangements l0 - raw=true - Reduce::Basic - aggr=(0, array_agg[order_by=[]](row(array[#0{b}]))) - val_plan - project=(#1) - map=(row(array[#0{b}])) - key_plan - project=() - Get::PassArrangements l0 - raw=true + basic + aggrs[0]=(1, jsonb_agg[order_by=[]](row(jsonbable_to_jsonb(#1{b})))) + aggrs[1]=(2, array_agg[order_by=[]](row(array[#1{b}]))) + val_plan + project=(#0, #2, #3) + map=(row(jsonbable_to_jsonb(#1{b})), row(array[#1{b}])) + key_plan + project=() + Get::Collection materialize.public.t + raw=true Return Mfp project=(#0..=#2, #2) Union - Get::PassArrangements l1 + ArrangeBy + input_key=[] raw=true + Get::PassArrangements l0 + raw=false + arrangements[0]={ key=[], permutation=id, thinning=(#0..=#2) } Mfp project=(#0..=#2) map=(null, null, null) Union consolidate_output=true Negate - Get::Collection l1 + Get::Arrangement l0 project=() - raw=true + key= + raw=false + arrangements[0]={ key=[], permutation=id, thinning=(#0..=#2) } Constant - () @@ -574,46 +362,19 @@ query T multiline EXPLAIN PHYSICAL PLAN AS VERBOSE TEXT FOR SELECT a, array_agg(b ORDER BY b) FROM t GROUP BY a HAVING array_agg(b ORDER BY b) = array_agg(b ORDER BY b DESC); ---- Explained Query: - With - cte l0 = - Get::Collection materialize.public.t - raw=true - Return - Join::Linear - linear_stage[0] - lookup={ relation=1, key=[#0, #1{"?column?"}] } - stream={ key=[#0, #1{"?column?"}], thinning=() } - source={ relation=0, key=[#0, #1{"?column?"}] } - ArrangeBy - input_key=[#0] - raw=false - arrangements[0]={ key=[#0], permutation=id, thinning=(#1) } - arrangements[1]={ key=[#0, #1{"?column?"}], permutation=id, thinning=() } - types=[integer, text[]] - Reduce::Basic - aggr=(0, array_agg[order_by=[#0 asc nulls_last]](row(array[#1{b}], #1{b}))) - val_plan - project=(#2) - map=(row(array[#1{b}], #1{b})) - key_plan - project=(#0) - Get::PassArrangements l0 - raw=true - ArrangeBy - input_key=[#0] - raw=false - arrangements[0]={ key=[#0], permutation=id, thinning=(#1) } - arrangements[1]={ key=[#0, #1{"?column?"}], permutation=id, thinning=() } - types=[integer, text[]] - Reduce::Basic - aggr=(0, array_agg[order_by=[#0 desc nulls_first]](row(array[#1{b}], #1{b}))) - val_plan - project=(#2) - map=(row(array[#1{b}], #1{b})) - key_plan - project=(#0) - Get::PassArrangements l0 - raw=true + Reduce::Basic + aggrs[0]=(0, array_agg[order_by=[#0 asc nulls_last]](row(array[#1{b}], #1{b}))) + aggrs[1]=(1, array_agg[order_by=[#0 desc nulls_first]](row(array[#1{b}], #1{b}))) + val_plan + project=(#2, #2) + map=(row(array[#1{b}], #1{b})) + key_plan + project=(#0) + mfp_after + project=(#0, #1) + filter=((#1{"?column?"} = #2{"?column?"})) + Get::Collection materialize.public.t + raw=true Source materialize.public.t project=(#0, #1) @@ -627,36 +388,16 @@ EXPLAIN PHYSICAL PLAN AS VERBOSE TEXT FOR SELECT a, array_agg(b), array_agg(sha2 ---- Explained Query: - With - cte l0 = - Get::Collection materialize.public.t - raw=true - Return - Join::Linear - linear_stage[0] - closure - project=(#0, #2, #1) - lookup={ relation=1, key=[#0] } - stream={ key=[#0], thinning=(#1) } - source={ relation=0, key=[#0] } - Reduce::Basic - aggr=(0, array_agg[order_by=[]](row(array[digest(text_to_bytea(#1{b}), "sha256")]))) - val_plan - project=(#2) - map=(row(array[digest(text_to_bytea(#1{b}), "sha256")])) - key_plan - project=(#0) - Get::PassArrangements l0 - raw=true - Reduce::Basic - aggr=(0, array_agg[order_by=[]](row(array[#1{b}]))) - val_plan - project=(#2) - map=(row(array[#1{b}])) - key_plan - project=(#0) - Get::PassArrangements l0 - raw=true + Reduce::Basic + aggrs[0]=(0, array_agg[order_by=[]](row(array[#1{b}]))) + aggrs[1]=(1, array_agg[order_by=[]](row(array[digest(text_to_bytea(#1{b}), "sha256")]))) + val_plan + project=(#2, #3) + map=(row(array[#1{b}]), row(array[digest(text_to_bytea(#1{b}), "sha256")])) + key_plan + project=(#0) + Get::Collection materialize.public.t + raw=true Source materialize.public.t project=(#0, #1) @@ -670,34 +411,16 @@ query T multiline EXPLAIN PHYSICAL PLAN AS VERBOSE TEXT FOR SELECT a, array_agg(b), array_agg(CASE WHEN a = 1 THEN 'ooo' ELSE b END) FROM t GROUP BY a; ---- Explained Query: - With - cte l0 = - Get::Collection materialize.public.t - raw=true - Return - Join::Linear - linear_stage[0] - lookup={ relation=1, key=[#0] } - stream={ key=[#0], thinning=(#1) } - source={ relation=0, key=[#0] } - Reduce::Basic - aggr=(0, array_agg[order_by=[]](row(array[#1{b}]))) - val_plan - project=(#2) - map=(row(array[#1{b}])) - key_plan - project=(#0) - Get::PassArrangements l0 - raw=true - Reduce::Basic - aggr=(0, array_agg[order_by=[]](row(array[case when (#0{a} = 1) then "ooo" else #1{b} end]))) - val_plan - project=(#2) - map=(row(array[case when (#0{a} = 1) then "ooo" else #1{b} end])) - key_plan - project=(#0) - Get::PassArrangements l0 - raw=true + Reduce::Basic + aggrs[0]=(0, array_agg[order_by=[]](row(array[#1{b}]))) + aggrs[1]=(1, array_agg[order_by=[]](row(array[case when (#0{a} = 1) then "ooo" else #1{b} end]))) + val_plan + project=(#2, #3) + map=(row(array[#1{b}]), row(array[case when (#0{a} = 1) then "ooo" else #1{b} end])) + key_plan + project=(#0) + Get::Collection materialize.public.t + raw=true Source materialize.public.t project=(#0, #1) diff --git a/test/sqllogictest/explain/physical_plan_as_json.slt b/test/sqllogictest/explain/physical_plan_as_json.slt index 910722927dc65..5147c20ef91ee 100644 --- a/test/sqllogictest/explain/physical_plan_as_json.slt +++ b/test/sqllogictest/explain/physical_plan_as_json.slt @@ -3272,345 +3272,730 @@ GROUP BY a { "id": "Explained Query", "plan": { - "lir_id": 5, + "lir_id": 2, "node": { - "Join": { - "inputs": [ - { - "lir_id": 2, - "node": { - "Reduce": { - "input": { - "lir_id": 1, - "node": { - "Get": { - "id": { - "Global": { - "User": 1 - } - }, - "keys": { - "raw": false, - "arranged": [ - [ - [ - { - "Column": [ - 0, - "a" - ] - } - ], - [ - 0, - 1 - ], - [ - 1 - ] - ] - ], - "types": [ - { - "scalar_type": "Int32", - "nullable": true - }, - { - "scalar_type": "Int32", - "nullable": true - } + "Reduce": { + "input": { + "lir_id": 1, + "node": { + "Get": { + "id": { + "Global": { + "User": 1 + } + }, + "keys": { + "raw": false, + "arranged": [ + [ + [ + { + "Column": [ + 0, + "a" ] - }, - "plan": "PassArrangements" + } + ], + [ + 0, + 1 + ], + [ + 1 + ] + ] + ], + "types": [ + { + "scalar_type": "Int32", + "nullable": true + }, + { + "scalar_type": "Int32", + "nullable": true + } + ] + }, + "plan": "PassArrangements" + } + } + }, + "key_val_plan": { + "key_plan": { + "mfp": { + "expressions": [], + "predicates": [], + "projection": [ + 0 + ], + "input_arity": 2 + } + }, + "val_plan": { + "mfp": { + "expressions": [ + { + "CallUnary": { + "func": { + "CastInt32ToString": null + }, + "expr": { + "Column": [ + 1, + "b" + ] } } }, - "key_val_plan": { - "key_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0 - ], - "input_arity": 2 + { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "" + ] + } + }, + "exprs": [ + { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "value", + "sep" + ] + } + }, + "exprs": [ + { + "CallBinary": { + "func": "TextConcat", + "expr1": { + "Column": [ + 2, + null + ] + }, + "expr2": { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 49 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + } + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 44 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + ] + } + } + ] + } + }, + { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "" + ] + } + }, + "exprs": [ + { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "value", + "sep" + ] + } + }, + "exprs": [ + { + "CallBinary": { + "func": "TextConcat", + "expr1": { + "Column": [ + 2, + null + ] + }, + "expr2": { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 50 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + } + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 44 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + ] + } + } + ] + } + } + ], + "predicates": [], + "projection": [ + 3, + 4 + ], + "input_arity": 2 + } + } + }, + "plan": { + "Basic": { + "Multiple": [ + [ + 0, + { + "func": { + "StringAgg": { + "order_by": [] } }, - "val_plan": { - "mfp": { - "expressions": [ + "expr": { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "" + ] + } + }, + "exprs": [ { "CallVariadic": { "func": { "RecordCreate": { "field_names": [ - "" + "value", + "sep" ] } }, "exprs": [ { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "value", - "sep" - ] + "CallBinary": { + "func": "TextConcat", + "expr1": { + "CallUnary": { + "func": { + "CastInt32ToString": null + }, + "expr": { + "Column": [ + 1, + "b" + ] + } } }, - "exprs": [ - { - "CallBinary": { - "func": "TextConcat", - "expr1": { - "CallUnary": { - "func": { - "CastInt32ToString": null - }, - "expr": { - "Column": [ - 1, - "b" - ] - } - } - }, - "expr2": { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 49 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } + "expr2": { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 49 ] } + }, + { + "scalar_type": "String", + "nullable": false } - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 44 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } + ] + } + } + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 44 ] } - ] - } + }, + { + "scalar_type": "String", + "nullable": false + } + ] } ] } } - ], - "predicates": [], - "projection": [ - 2 - ], - "input_arity": 2 + ] } - } - }, - "plan": { - "Basic": { - "Single": { - "index": 0, - "expr": { - "func": { - "StringAgg": { - "order_by": [] - } - }, - "expr": { + }, + "distinct": false + } + ], + [ + 1, + { + "func": { + "StringAgg": { + "order_by": [] + } + }, + "expr": { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "" + ] + } + }, + "exprs": [ + { "CallVariadic": { "func": { "RecordCreate": { "field_names": [ - "" + "value", + "sep" ] } }, "exprs": [ { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "value", - "sep" - ] + "CallBinary": { + "func": "TextConcat", + "expr1": { + "CallUnary": { + "func": { + "CastInt32ToString": null + }, + "expr": { + "Column": [ + 1, + "b" + ] + } } }, - "exprs": [ - { - "CallBinary": { - "func": "TextConcat", - "expr1": { - "CallUnary": { - "func": { - "CastInt32ToString": null - }, - "expr": { - "Column": [ - 1, - "b" - ] - } - } - }, - "expr2": { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 49 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } + "expr2": { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 50 ] } + }, + { + "scalar_type": "String", + "nullable": false } - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 44 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - ] + ] + } } - } - ] - } - }, - "distinct": false - }, - "fused_unnest_list": false - } - } - }, - "input_key": [ - { - "Column": [ - 0, - "a" - ] - } - ], - "mfp_after": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1 - ], - "input_arity": 2 - } - } - } - }, - { - "lir_id": 4, - "node": { - "Reduce": { - "input": { - "lir_id": 3, - "node": { - "Get": { - "id": { - "Global": { - "User": 1 - } - }, - "keys": { - "raw": false, - "arranged": [ - [ - [ + }, { - "Column": [ - 0, - "a" + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 44 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } ] } - ], - [ - 0, - 1 - ], - [ - 1 ] - ] - ], - "types": [ - { - "scalar_type": "Int32", - "nullable": true - }, - { - "scalar_type": "Int32", - "nullable": true } - ] - }, - "plan": "PassArrangements" - } - } - }, - "key_val_plan": { - "key_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0 - ], - "input_arity": 2 + } + ] } }, - "val_plan": { - "mfp": { - "expressions": [ - { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "" + "distinct": false + } + ] + ] + } + }, + "input_key": [ + { + "Column": [ + 0, + "a" + ] + } + ], + "mfp_after": { + "expressions": [], + "predicates": [], + "projection": [ + 0, + 1, + 2 + ], + "input_arity": 3 + } + } + } + } + } + ], + "sources": [] +} +EOF + +# Test Reduce::Basic (global aggregate). +query T multiline +EXPLAIN PHYSICAL PLAN AS JSON FOR +SELECT + STRING_AGG(b::text || '1', ','), + STRING_AGG(b::text || '2', ',') +FROM t +---- +{ + "plans": [ + { + "id": "Explained Query", + "plan": { + "lir_id": 11, + "node": { + "Let": { + "id": 0, + "value": { + "lir_id": 2, + "node": { + "Reduce": { + "input": { + "lir_id": 1, + "node": { + "Get": { + "id": { + "Global": { + "User": 1 + } + }, + "keys": { + "raw": false, + "arranged": [ + [ + [ + { + "Column": [ + 0, + "a" + ] + } + ], + [ + 0, + 1 + ], + [ + 1 + ] + ] + ], + "types": [ + { + "scalar_type": "Int32", + "nullable": true + }, + { + "scalar_type": "Int32", + "nullable": true + } + ] + }, + "plan": { + "Arrangement": [ + [ + { + "Column": [ + 0, + "a" + ] + } + ], + null, + { + "expressions": [], + "predicates": [], + "projection": [ + 1 + ], + "input_arity": 2 + } + ] + } + } + } + }, + "key_val_plan": { + "key_plan": { + "mfp": { + "expressions": [], + "predicates": [], + "projection": [], + "input_arity": 1 + } + }, + "val_plan": { + "mfp": { + "expressions": [ + { + "CallUnary": { + "func": { + "CastInt32ToString": null + }, + "expr": { + "Column": [ + 0, + "b" + ] + } + } + }, + { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "" + ] + } + }, + "exprs": [ + { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "value", + "sep" + ] + } + }, + "exprs": [ + { + "CallBinary": { + "func": "TextConcat", + "expr1": { + "Column": [ + 1, + null + ] + }, + "expr2": { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 49 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + } + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 44 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + ] + } + } + ] + } + }, + { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "" + ] + } + }, + "exprs": [ + { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "value", + "sep" + ] + } + }, + "exprs": [ + { + "CallBinary": { + "func": "TextConcat", + "expr1": { + "Column": [ + 1, + null + ] + }, + "expr2": { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 50 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + } + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 44 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + ] + } + } + ] + } + } + ], + "predicates": [], + "projection": [ + 2, + 3 + ], + "input_arity": 1 + } + } + }, + "plan": { + "Basic": { + "Multiple": [ + [ + 0, + { + "func": { + "StringAgg": { + "order_by": [] + } + }, + "expr": { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "" ] } }, @@ -3636,7 +4021,7 @@ GROUP BY a }, "expr": { "Column": [ - 1, + 0, "b" ] } @@ -3649,7 +4034,7 @@ GROUP BY a "data": [ 19, 1, - 50 + 49 ] } }, @@ -3683,21 +4068,13 @@ GROUP BY a } ] } - } - ], - "predicates": [], - "projection": [ - 2 - ], - "input_arity": 2 - } - } - }, - "plan": { - "Basic": { - "Single": { - "index": 0, - "expr": { + }, + "distinct": false + } + ], + [ + 1, + { "func": { "StringAgg": { "order_by": [] @@ -3734,7 +4111,7 @@ GROUP BY a }, "expr": { "Column": [ - 1, + 0, "b" ] } @@ -3783,1229 +4160,682 @@ GROUP BY a } }, "distinct": false - }, - "fused_unnest_list": false - } - } - }, - "input_key": [ - { - "Column": [ - 0, - "a" + } ] - } - ], - "mfp_after": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1 - ], - "input_arity": 2 + ] } - } - } - } - ], - "plan": { - "Linear": { - "source_relation": 0, - "source_key": [ - { - "Column": [ + }, + "input_key": null, + "mfp_after": { + "expressions": [], + "predicates": [], + "projection": [ 0, - null - ] - } - ], - "initial_closure": null, - "stage_plans": [ - { - "lookup_relation": 1, - "stream_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "stream_thinning": [ 1 ], - "lookup_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 2 - ], - "input_arity": 3 - } - } - } - } - ], - "final_closure": null - } - } - } - } - } - } - ], - "sources": [] -} -EOF - -# Test Reduce::Basic (global aggregate). -query T multiline -EXPLAIN PHYSICAL PLAN AS JSON FOR -SELECT - STRING_AGG(b::text || '1', ','), - STRING_AGG(b::text || '2', ',') -FROM t ----- -{ - "plans": [ - { - "id": "Explained Query", - "plan": { - "lir_id": 15, - "node": { - "Let": { - "id": 0, - "value": { - "lir_id": 1, - "node": { - "Get": { - "id": { - "Global": { - "User": 1 - } - }, - "keys": { - "raw": false, - "arranged": [ - [ - [ - { - "Column": [ - 0, - "a" - ] - } - ], - [ - 0, - 1 - ], - [ - 1 - ] - ] - ], - "types": [ - { - "scalar_type": "Int32", - "nullable": true - }, - { - "scalar_type": "Int32", - "nullable": true - } - ] - }, - "plan": { - "Arrangement": [ - [ - { - "Column": [ - 0, - "a" - ] - } - ], - null, - { - "expressions": [], - "predicates": [], - "projection": [ - 1 - ], - "input_arity": 2 - } - ] + "input_arity": 2 } } } }, "body": { - "lir_id": 14, + "lir_id": 10, "node": { - "Let": { - "id": 1, - "value": { - "lir_id": 6, - "node": { - "Join": { - "inputs": [ - { + "Union": { + "inputs": [ + { + "lir_id": 9, + "node": { + "ArrangeBy": { + "input": { "lir_id": 3, "node": { - "Reduce": { - "input": { - "lir_id": 2, - "node": { - "Get": { - "id": { - "Local": 0 - }, - "keys": { - "raw": true, - "arranged": [], - "types": null - }, - "plan": "PassArrangements" - } - } + "Get": { + "id": { + "Local": 0 }, - "key_val_plan": { - "key_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [], - "input_arity": 1 - } - }, - "val_plan": { - "mfp": { - "expressions": [ - { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "" - ] - } - }, - "exprs": [ - { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "value", - "sep" - ] - } - }, - "exprs": [ - { - "CallBinary": { - "func": "TextConcat", - "expr1": { - "CallUnary": { - "func": { - "CastInt32ToString": null - }, - "expr": { - "Column": [ - 0, - "b" - ] - } - } - }, - "expr2": { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 49 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - } - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 44 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - ] - } - } - ] - } - } - ], - "predicates": [], - "projection": [ + "keys": { + "raw": false, + "arranged": [ + [ + [], + [ + 0, 1 ], - "input_arity": 1 - } - } + [ + 0, + 1 + ] + ] + ], + "types": null }, - "plan": { - "Basic": { - "Single": { - "index": 0, - "expr": { - "func": { - "StringAgg": { - "order_by": [] - } - }, - "expr": { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "" - ] - } - }, - "exprs": [ - { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "value", - "sep" - ] - } - }, - "exprs": [ - { - "CallBinary": { - "func": "TextConcat", - "expr1": { - "CallUnary": { - "func": { - "CastInt32ToString": null - }, - "expr": { - "Column": [ - 0, - "b" - ] - } - } - }, - "expr2": { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 49 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - } - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 44 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - ] - } - } - ] - } - }, - "distinct": false - }, - "fused_unnest_list": false - } - } - }, - "input_key": null, - "mfp_after": { - "expressions": [], - "predicates": [], - "projection": [ - 0 - ], - "input_arity": 1 - } + "plan": "PassArrangements" } } }, - { - "lir_id": 5, + "forms": { + "raw": true, + "arranged": [], + "types": null + }, + "input_key": [], + "input_mfp": { + "expressions": [], + "predicates": [], + "projection": [ + 0, + 1 + ], + "input_arity": 2 + } + } + } + }, + { + "lir_id": 8, + "node": { + "Mfp": { + "input": { + "lir_id": 7, "node": { - "Reduce": { - "input": { - "lir_id": 4, - "node": { - "Get": { - "id": { - "Local": 0 - }, - "keys": { - "raw": true, - "arranged": [], - "types": null - }, - "plan": "PassArrangements" - } - } - }, - "key_val_plan": { - "key_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [], - "input_arity": 1 - } - }, - "val_plan": { - "mfp": { - "expressions": [ - { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "" - ] - } - }, - "exprs": [ - { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "value", - "sep" - ] - } - }, - "exprs": [ - { - "CallBinary": { - "func": "TextConcat", - "expr1": { - "CallUnary": { - "func": { - "CastInt32ToString": null - }, - "expr": { - "Column": [ - 0, - "b" - ] - } - } - }, - "expr2": { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 50 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - } - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 44 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } + "Union": { + "inputs": [ + { + "lir_id": 5, + "node": { + "Negate": { + "input": { + "lir_id": 4, + "node": { + "Get": { + "id": { + "Local": 0 + }, + "keys": { + "raw": false, + "arranged": [ + [ + [], + [ + 0, + 1 + ], + [ + 0, + 1 + ] ] - } + ], + "types": null + }, + "plan": { + "Arrangement": [ + [], + null, + { + "expressions": [], + "predicates": [], + "projection": [], + "input_arity": 2 + } + ] } - ] + } } } - ], - "predicates": [], - "projection": [ - 1 - ], - "input_arity": 1 + } } - } - }, - "plan": { - "Basic": { - "Single": { - "index": 0, - "expr": { - "func": { - "StringAgg": { - "order_by": [] - } - }, - "expr": { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "" - ] - } - }, - "exprs": [ + }, + { + "lir_id": 6, + "node": { + "Constant": { + "rows": { + "Ok": [ + [ { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "value", - "sep" - ] - } - }, - "exprs": [ - { - "CallBinary": { - "func": "TextConcat", - "expr1": { - "CallUnary": { - "func": { - "CastInt32ToString": null - }, - "expr": { - "Column": [ - 0, - "b" - ] - } - } - }, - "expr2": { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 50 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - } - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 44 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - ] - } - } + "data": [] + }, + 0, + 1 ] - } - }, - "distinct": false - }, - "fused_unnest_list": false + ] + } + } } } - }, - "input_key": null, - "mfp_after": { - "expressions": [], - "predicates": [], - "projection": [ - 0 - ], - "input_arity": 1 - } + ], + "consolidate_output": true } } - } - ], - "plan": { - "Linear": { - "source_relation": 0, - "source_key": [], - "initial_closure": null, - "stage_plans": [ + }, + "mfp": { + "expressions": [ { - "lookup_relation": 1, - "stream_key": [], - "stream_thinning": [ - 0 - ], - "lookup_key": [], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1 - ], - "input_arity": 2 + "Literal": [ + { + "Ok": { + "data": [ + 0 + ] + } + }, + { + "scalar_type": "String", + "nullable": true + } + ] + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 0 + ] } + }, + { + "scalar_type": "String", + "nullable": true } - } + ] } ], - "final_closure": null - } + "predicates": [], + "projection": [ + 0, + 1 + ], + "input_arity": 0 + }, + "input_key_val": null } } } + ], + "consolidate_output": false + } + } + } + } + } + } + } + ], + "sources": [] +} +EOF + +# Test Reduce::Collated (with GROUP BY). +query T multiline +EXPLAIN PHYSICAL PLAN AS JSON FOR +MATERIALIZED VIEW collated_group_by_mv +---- +{ + "plans": [ + { + "id": "materialize.public.collated_group_by_mv", + "plan": { + "lir_id": 2, + "node": { + "Reduce": { + "input": { + "lir_id": 1, + "node": { + "Get": { + "id": { + "Global": { + "User": 1 + } }, - "body": { - "lir_id": 13, - "node": { - "Union": { - "inputs": [ + "keys": { + "raw": false, + "arranged": [ + [ + [ { - "lir_id": 7, - "node": { - "Get": { - "id": { - "Local": 1 - }, - "keys": { - "raw": true, - "arranged": [], - "types": null - }, - "plan": "PassArrangements" - } - } - }, + "Column": [ + 0, + "a" + ] + } + ], + [ + 0, + 1 + ], + [ + 1 + ] + ] + ], + "types": [ + { + "scalar_type": "Int32", + "nullable": true + }, + { + "scalar_type": "Int32", + "nullable": true + } + ] + }, + "plan": "PassArrangements" + } + } + }, + "key_val_plan": { + "key_plan": { + "mfp": { + "expressions": [], + "predicates": [], + "projection": [ + 0 + ], + "input_arity": 2 + } + }, + "val_plan": { + "mfp": { + "expressions": [ + { + "CallUnary": { + "func": { + "CastInt32ToString": null + }, + "expr": { + "Column": [ + 1, + "b" + ] + } + } + }, + { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "" + ] + } + }, + "exprs": [ { - "lir_id": 12, - "node": { - "Mfp": { - "input": { - "lir_id": 11, - "node": { - "Union": { - "inputs": [ - { - "lir_id": 9, - "node": { - "Negate": { - "input": { - "lir_id": 8, - "node": { - "Get": { - "id": { - "Local": 1 - }, - "keys": { - "raw": true, - "arranged": [], - "types": null - }, - "plan": { - "Collection": { - "expressions": [], - "predicates": [], - "projection": [], - "input_arity": 2 - } - } - } - } - } - } - } - }, - { - "lir_id": 10, - "node": { - "Constant": { - "rows": { - "Ok": [ - [ - { - "data": [] - }, - 0, - 1 - ] - ] - } - } - } - } - ], - "consolidate_output": true - } - } - }, - "mfp": { - "expressions": [ - { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "value", + "sep" + ] + } + }, + "exprs": [ + { + "CallBinary": { + "func": "TextConcat", + "expr1": { + "Column": [ + 2, + null + ] + }, + "expr2": { "Literal": [ { "Ok": { "data": [ - 0 + 19, + 1, + 49 ] } }, { "scalar_type": "String", - "nullable": true + "nullable": false } ] + } + } + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 44 + ] + } }, { + "scalar_type": "String", + "nullable": false + } + ] + } + ] + } + } + ] + } + }, + { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "" + ] + } + }, + "exprs": [ + { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "value", + "sep" + ] + } + }, + "exprs": [ + { + "CallBinary": { + "func": "TextConcat", + "expr1": { + "Column": [ + 2, + null + ] + }, + "expr2": { "Literal": [ { "Ok": { "data": [ - 0 + 19, + 1, + 50 ] } }, { "scalar_type": "String", - "nullable": true + "nullable": false } ] } - ], - "predicates": [], - "projection": [ - 0, - 1 - ], - "input_arity": 0 + } }, - "input_key_val": null - } + { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 44 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + ] } } - ], - "consolidate_output": false + ] } } - } + ], + "predicates": [], + "projection": [ + 1, + 3, + 1, + 1, + 1, + 4 + ], + "input_arity": 2 } } - } - } - } - } - } - ], - "sources": [] -} -EOF - -# Test Reduce::Collated (with GROUP BY). -query T multiline -EXPLAIN PHYSICAL PLAN AS JSON FOR -MATERIALIZED VIEW collated_group_by_mv ----- -{ - "plans": [ - { - "id": "materialize.public.collated_group_by_mv", - "plan": { - "lir_id": 9, - "node": { - "Join": { - "inputs": [ - { - "lir_id": 2, - "node": { - "Reduce": { - "input": { - "lir_id": 1, - "node": { - "Get": { - "id": { - "Global": { - "User": 1 - } - }, - "keys": { - "raw": false, - "arranged": [ - [ - [ - { - "Column": [ - 0, - "a" - ] - } - ], - [ - 0, - 1 - ], - [ - 1 - ] - ] - ], - "types": [ - { - "scalar_type": "Int32", - "nullable": true - }, - { - "scalar_type": "Int32", - "nullable": true - } - ] - }, - "plan": "PassArrangements" - } - } - }, - "key_val_plan": { - "key_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0 - ], - "input_arity": 2 - } + }, + "plan": { + "Collation": { + "accumulable": { + "full_aggrs": [ + { + "func": "Count", + "expr": { + "Column": [ + 1, + "b" + ] }, - "val_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 1, - 1 - ], - "input_arity": 2 - } - } + "distinct": true }, - "plan": { - "Hierarchical": { - "Bucketed": { - "aggr_funcs": [ - "MinInt32", - "MaxInt32" - ], - "skips": [ - 0, - 0 - ], - "buckets": [ - 268435456, - 16777216, - 1048576, - 65536, - 4096, - 256, - 16 + { + "func": "SumInt32", + "expr": { + "Column": [ + 1, + "b" + ] + }, + "distinct": false + } + ], + "simple_aggrs": [ + [ + 1, + 4, + { + "func": "SumInt32", + "expr": { + "Column": [ + 1, + "b" ] - } + }, + "distinct": false } - }, - "input_key": [ + ] + ], + "distinct_aggrs": [ + [ + 0, + 0, { - "Column": [ - 0, - "a" - ] + "func": "Count", + "expr": { + "Column": [ + 1, + "b" + ] + }, + "distinct": true } + ] + ] + }, + "hierarchical": { + "Bucketed": { + "aggr_funcs": [ + "MinInt32", + "MaxInt32" ], - "mfp_after": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 2 - ], - "input_arity": 3 - } + "skips": [ + 2, + 0 + ], + "buckets": [ + 268435456, + 16777216, + 1048576, + 65536, + 4096, + 256, + 16 + ] } - } - }, - { - "lir_id": 4, - "node": { - "Reduce": { - "input": { - "lir_id": 3, - "node": { - "Get": { - "id": { - "Global": { - "User": 1 - } - }, - "keys": { - "raw": false, - "arranged": [ - [ - [ - { - "Column": [ - 0, - "a" - ] - } - ], - [ - 0, - 1 - ], - [ - 1 + }, + "basic": { + "Multiple": [ + [ + 1, + { + "func": { + "StringAgg": { + "order_by": [] + } + }, + "expr": { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "" ] - ] - ], - "types": [ - { - "scalar_type": "Int32", - "nullable": true - }, + } + }, + "exprs": [ { - "scalar_type": "Int32", - "nullable": true + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "value", + "sep" + ] + } + }, + "exprs": [ + { + "CallBinary": { + "func": "TextConcat", + "expr1": { + "CallUnary": { + "func": { + "CastInt32ToString": null + }, + "expr": { + "Column": [ + 1, + "b" + ] + } + } + }, + "expr2": { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 49 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + } + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 44 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + ] + } } ] - }, - "plan": "PassArrangements" - } - } - }, - "key_val_plan": { - "key_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0 - ], - "input_arity": 2 - } - }, - "val_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 1, - 1 - ], - "input_arity": 2 - } - } - }, - "plan": { - "Accumulable": { - "full_aggrs": [ - { - "func": "Count", - "expr": { - "Column": [ - 1, - "b" - ] - }, - "distinct": true - }, - { - "func": "SumInt32", - "expr": { - "Column": [ - 1, - "b" - ] - }, - "distinct": false } - ], - "simple_aggrs": [ - [ - 1, - 1, - { - "func": "SumInt32", - "expr": { - "Column": [ - 1, - "b" - ] - }, - "distinct": false - } - ] - ], - "distinct_aggrs": [ - [ - 0, - 0, - { - "func": "Count", - "expr": { - "Column": [ - 1, - "b" - ] - }, - "distinct": true - } - ] - ] + }, + "distinct": false } - }, - "input_key": [ + ], + [ + 5, { - "Column": [ - 0, - "a" - ] - } - ], - "mfp_after": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 2 - ], - "input_arity": 3 - } - } - } - }, - { - "lir_id": 6, - "node": { - "Reduce": { - "input": { - "lir_id": 5, - "node": { - "Get": { - "id": { - "Global": { - "User": 1 - } - }, - "keys": { - "raw": false, - "arranged": [ - [ - [ - { - "Column": [ - 0, - "a" - ] - } - ], - [ - 0, - 1 - ], - [ - 1 + "func": { + "StringAgg": { + "order_by": [] + } + }, + "expr": { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "" ] - ] - ], - "types": [ - { - "scalar_type": "Int32", - "nullable": true - }, - { - "scalar_type": "Int32", - "nullable": true } - ] - }, - "plan": "PassArrangements" - } - } - }, - "key_val_plan": { - "key_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0 - ], - "input_arity": 2 - } - }, - "val_plan": { - "mfp": { - "expressions": [ - { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "" - ] - } - }, - "exprs": [ - { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "value", - "sep" - ] - } - }, - "exprs": [ - { - "CallBinary": { - "func": "TextConcat", - "expr1": { - "CallUnary": { - "func": { - "CastInt32ToString": null - }, - "expr": { - "Column": [ - 1, - "b" - ] - } - } + }, + "exprs": [ + { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "value", + "sep" + ] + } + }, + "exprs": [ + { + "CallBinary": { + "func": "TextConcat", + "expr1": { + "CallUnary": { + "func": { + "CastInt32ToString": null }, - "expr2": { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 49 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } + "expr": { + "Column": [ + 1, + "b" ] } } }, - { + "expr2": { "Literal": [ { "Ok": { "data": [ 19, 1, - 44 + 50 ] } }, @@ -5015,361 +4845,446 @@ MATERIALIZED VIEW collated_group_by_mv } ] } - ] - } - } - ] - } - } - ], - "predicates": [], - "projection": [ - 2 - ], - "input_arity": 2 - } - } - }, - "plan": { - "Basic": { - "Single": { - "index": 0, - "expr": { - "func": { - "StringAgg": { - "order_by": [] - } - }, - "expr": { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "" - ] - } - }, - "exprs": [ - { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "value", - "sep" - ] - } - }, - "exprs": [ + } + }, + { + "Literal": [ { - "CallBinary": { - "func": "TextConcat", - "expr1": { - "CallUnary": { - "func": { - "CastInt32ToString": null - }, - "expr": { - "Column": [ - 1, - "b" - ] - } - } - }, - "expr2": { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 49 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } + "Ok": { + "data": [ + 19, + 1, + 44 + ] } }, { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 44 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] + "scalar_type": "String", + "nullable": false } ] } - } - ] + ] + } } - }, - "distinct": false - }, - "fused_unnest_list": false - } - } - }, - "input_key": [ - { - "Column": [ - 0, - "a" - ] + ] + } + }, + "distinct": false } - ], - "mfp_after": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1 - ], - "input_arity": 2 + ] + ] + }, + "aggregate_types": [ + "Accumulable", + "Basic", + "Hierarchical", + "Hierarchical", + "Accumulable", + "Basic" + ] + } + }, + "input_key": [ + { + "Column": [ + 0, + "a" + ] + } + ], + "mfp_after": { + "expressions": [], + "predicates": [], + "projection": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6 + ], + "input_arity": 7 + } + } + } + } + } + ], + "sources": [] +} +EOF + +# Test Reduce::Collated (with GROUP BY, one-shot). +query T multiline +EXPLAIN PHYSICAL PLAN AS JSON FOR +SELECT * FROM collated_group_by +---- +{ + "plans": [ + { + "id": "Explained Query", + "plan": { + "lir_id": 2, + "node": { + "Reduce": { + "input": { + "lir_id": 1, + "node": { + "Get": { + "id": { + "Global": { + "User": 1 } - } + }, + "keys": { + "raw": false, + "arranged": [ + [ + [ + { + "Column": [ + 0, + "a" + ] + } + ], + [ + 0, + 1 + ], + [ + 1 + ] + ] + ], + "types": [ + { + "scalar_type": "Int32", + "nullable": true + }, + { + "scalar_type": "Int32", + "nullable": true + } + ] + }, + "plan": "PassArrangements" + } + } + }, + "key_val_plan": { + "key_plan": { + "mfp": { + "expressions": [], + "predicates": [], + "projection": [ + 0 + ], + "input_arity": 2 } }, - { - "lir_id": 8, - "node": { - "Reduce": { - "input": { - "lir_id": 7, - "node": { - "Get": { - "id": { - "Global": { - "User": 1 - } - }, - "keys": { - "raw": false, - "arranged": [ - [ - [ - { - "Column": [ - 0, - "a" - ] - } - ], - [ - 0, - 1 - ], - [ - 1 - ] - ] - ], - "types": [ - { - "scalar_type": "Int32", - "nullable": true - }, - { - "scalar_type": "Int32", - "nullable": true - } - ] - }, - "plan": "PassArrangements" + "val_plan": { + "mfp": { + "expressions": [ + { + "CallUnary": { + "func": { + "CastInt32ToString": null + }, + "expr": { + "Column": [ + 1, + "b" + ] } } }, - "key_val_plan": { - "key_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0 - ], - "input_arity": 2 - } - }, - "val_plan": { - "mfp": { - "expressions": [ - { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "" - ] - } - }, - "exprs": [ - { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "value", - "sep" - ] - } - }, - "exprs": [ + { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "" + ] + } + }, + "exprs": [ + { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "value", + "sep" + ] + } + }, + "exprs": [ + { + "CallBinary": { + "func": "TextConcat", + "expr1": { + "Column": [ + 2, + null + ] + }, + "expr2": { + "Literal": [ { - "CallBinary": { - "func": "TextConcat", - "expr1": { - "CallUnary": { - "func": { - "CastInt32ToString": null - }, - "expr": { - "Column": [ - 1, - "b" - ] - } - } - }, - "expr2": { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 50 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } + "Ok": { + "data": [ + 19, + 1, + 49 + ] } }, { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 44 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] + "scalar_type": "String", + "nullable": false } ] } } - ] - } - } - ], - "predicates": [], - "projection": [ - 2 - ], - "input_arity": 2 - } + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 44 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + ] + } + } + ] } }, - "plan": { - "Basic": { - "Single": { - "index": 0, - "expr": { + { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "" + ] + } + }, + "exprs": [ + { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "value", + "sep" + ] + } + }, + "exprs": [ + { + "CallBinary": { + "func": "TextConcat", + "expr1": { + "Column": [ + 2, + null + ] + }, + "expr2": { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 50 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + } + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 44 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + ] + } + } + ] + } + } + ], + "predicates": [], + "projection": [ + 1, + 3, + 1, + 1, + 1, + 4 + ], + "input_arity": 2 + } + } + }, + "plan": { + "Collation": { + "accumulable": { + "full_aggrs": [ + { + "func": "Count", + "expr": { + "Column": [ + 1, + "b" + ] + }, + "distinct": true + }, + { + "func": "SumInt32", + "expr": { + "Column": [ + 1, + "b" + ] + }, + "distinct": false + } + ], + "simple_aggrs": [ + [ + 1, + 4, + { + "func": "SumInt32", + "expr": { + "Column": [ + 1, + "b" + ] + }, + "distinct": false + } + ] + ], + "distinct_aggrs": [ + [ + 0, + 0, + { + "func": "Count", + "expr": { + "Column": [ + 1, + "b" + ] + }, + "distinct": true + } + ] + ] + }, + "hierarchical": { + "Monotonic": { + "aggr_funcs": [ + "MinInt32", + "MaxInt32" + ], + "skips": [ + 2, + 0 + ], + "must_consolidate": true + } + }, + "basic": { + "Multiple": [ + [ + 1, + { + "func": { + "StringAgg": { + "order_by": [] + } + }, + "expr": { + "CallVariadic": { "func": { - "StringAgg": { - "order_by": [] + "RecordCreate": { + "field_names": [ + "" + ] } }, - "expr": { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "" - ] - } - }, - "exprs": [ - { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "value", - "sep" - ] - } - }, - "exprs": [ - { - "CallBinary": { - "func": "TextConcat", - "expr1": { - "CallUnary": { - "func": { - "CastInt32ToString": null - }, - "expr": { - "Column": [ - 1, - "b" - ] - } - } + "exprs": [ + { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "value", + "sep" + ] + } + }, + "exprs": [ + { + "CallBinary": { + "func": "TextConcat", + "expr1": { + "CallUnary": { + "func": { + "CastInt32ToString": null }, - "expr2": { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 50 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } + "expr": { + "Column": [ + 1, + "b" ] } } }, - { + "expr2": { "Literal": [ { "Ok": { "data": [ 19, 1, - 44 + 49 ] } }, @@ -5379,3499 +5294,986 @@ MATERIALIZED VIEW collated_group_by_mv } ] } + } + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 44 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } ] } - } - ] + ] + } } - }, - "distinct": false - }, - "fused_unnest_list": false - } - } - }, - "input_key": [ - { - "Column": [ - 0, - "a" - ] + ] + } + }, + "distinct": false } ], - "mfp_after": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1 - ], - "input_arity": 2 - } - } - } - } - ], - "plan": { - "Delta": { - "path_plans": [ - { - "source_relation": 0, - "source_key": [ + [ + 5, { - "Column": [ - 0, - null - ] - } - ], - "initial_closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 2 - ], - "input_arity": 3 - } - } - }, - "stage_plans": [ - { - "lookup_relation": 1, - "stream_key": [ - { - "Column": [ - 0, - null - ] + "func": { + "StringAgg": { + "order_by": [] } - ], - "stream_thinning": [ - 1, - 2 - ], - "lookup_key": [ - { - "Column": [ - 0, - null + }, + "expr": { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "" + ] + } + }, + "exprs": [ + { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "value", + "sep" + ] + } + }, + "exprs": [ + { + "CallBinary": { + "func": "TextConcat", + "expr1": { + "CallUnary": { + "func": { + "CastInt32ToString": null + }, + "expr": { + "Column": [ + 1, + "b" + ] + } + } + }, + "expr2": { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 50 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + } + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 44 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + ] + } + } ] } - ], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ + }, + "distinct": false + } + ] + ] + }, + "aggregate_types": [ + "Accumulable", + "Basic", + "Hierarchical", + "Hierarchical", + "Accumulable", + "Basic" + ] + } + }, + "input_key": [ + { + "Column": [ + 0, + "a" + ] + } + ], + "mfp_after": { + "expressions": [], + "predicates": [], + "projection": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6 + ], + "input_arity": 7 + } + } + } + } + } + ], + "sources": [] +} +EOF + +# Test Reduce::Collated (global aggregate). +query T multiline +EXPLAIN PHYSICAL PLAN AS JSON FOR +MATERIALIZED VIEW collated_global_mv +---- +{ + "plans": [ + { + "id": "materialize.public.collated_global_mv", + "plan": { + "lir_id": 11, + "node": { + "Let": { + "id": 0, + "value": { + "lir_id": 2, + "node": { + "Reduce": { + "input": { + "lir_id": 1, + "node": { + "Get": { + "id": { + "Global": { + "User": 1 + } + }, + "keys": { + "raw": false, + "arranged": [ + [ + [ + { + "Column": [ + 0, + "a" + ] + } + ], + [ 0, - 1, - 2, - 3, - 4 + 1 ], - "input_arity": 5 - } - } - } - }, - { - "lookup_relation": 2, - "stream_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "stream_thinning": [ - 1, - 2, - 3, - 4 - ], - "lookup_key": [ - { - "Column": [ - 0, - null + [ + 1 + ] ] - } - ], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { + ], + "types": [ + { + "scalar_type": "Int32", + "nullable": true + }, + { + "scalar_type": "Int32", + "nullable": true + } + ] + }, + "plan": { + "Arrangement": [ + [ + { + "Column": [ + 0, + "a" + ] + } + ], + null, + { "expressions": [], "predicates": [], "projection": [ - 0, - 1, - 2, - 3, - 4, - 5 + 1 ], - "input_arity": 6 + "input_arity": 2 } - } + ] } - }, - { - "lookup_relation": 3, - "stream_key": [ + } + } + }, + "key_val_plan": { + "key_plan": { + "mfp": { + "expressions": [], + "predicates": [], + "projection": [], + "input_arity": 1 + } + }, + "val_plan": { + "mfp": { + "expressions": [ { - "Column": [ - 0, - null - ] - } - ], - "stream_thinning": [ - 1, - 2, - 3, - 4, - 5 - ], - "lookup_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 3, - 5, - 1, - 2, - 4, - 6 - ], - "input_arity": 7 + "CallUnary": { + "func": { + "CastInt32ToString": null + }, + "expr": { + "Column": [ + 0, + "b" + ] + } } - } - } - } - ], - "final_closure": null - }, - { - "source_relation": 1, - "source_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "initial_closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 2 - ], - "input_arity": 3 - } - } - }, - "stage_plans": [ - { - "lookup_relation": 0, - "stream_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "stream_thinning": [ - 1, - 2 - ], - "lookup_key": [ + }, { - "Column": [ - 0, - null - ] - } - ], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 3, - 4, - 0, - 1, - 2 - ], - "input_arity": 5 + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "" + ] + } + }, + "exprs": [ + { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "value", + "sep" + ] + } + }, + "exprs": [ + { + "CallBinary": { + "func": "TextConcat", + "expr1": { + "Column": [ + 1, + null + ] + }, + "expr2": { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 49 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + } + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 44 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + ] + } + } + ] } - } - } - }, - { - "lookup_relation": 2, - "stream_key": [ + }, { - "Column": [ - 3, - null - ] + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "" + ] + } + }, + "exprs": [ + { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "value", + "sep" + ] + } + }, + "exprs": [ + { + "CallBinary": { + "func": "TextConcat", + "expr1": { + "Column": [ + 1, + null + ] + }, + "expr2": { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 50 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + } + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 44 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + ] + } + } + ] + } } ], - "stream_thinning": [ + "predicates": [], + "projection": [ 0, - 1, 2, - 4, - 5 + 0, + 0, + 0, + 3 ], - "lookup_key": [ + "input_arity": 1 + } + } + }, + "plan": { + "Collation": { + "accumulable": { + "full_aggrs": [ { - "Column": [ - 0, - null - ] - } - ], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 1, - 2, - 3, + "func": "Count", + "expr": { + "Column": [ 0, - 4, - 5, - 6 - ], - "input_arity": 7 - } - } - } - }, - { - "lookup_relation": 3, - "stream_key": [ + "b" + ] + }, + "distinct": true + }, { - "Column": [ - 3, - null - ] + "func": "SumInt32", + "expr": { + "Column": [ + 0, + "b" + ] + }, + "distinct": false } ], - "stream_thinning": [ - 0, - 1, - 2, - 4, - 5, - 6 - ], - "lookup_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 1, - 4, - 6, - 2, - 3, - 5, - 7 - ], - "input_arity": 8 - } - } - } - } - ], - "final_closure": null - }, - { - "source_relation": 2, - "source_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "initial_closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1 - ], - "input_arity": 2 - } - } - }, - "stage_plans": [ - { - "lookup_relation": 0, - "stream_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "stream_thinning": [ - 1 - ], - "lookup_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 2, - 3, - 0, - 1 - ], - "input_arity": 4 - } - } - } - }, - { - "lookup_relation": 1, - "stream_key": [ - { - "Column": [ - 3, - null - ] - } - ], - "stream_thinning": [ - 0, - 1, - 2, - 4 - ], - "lookup_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 1, - 2, - 3, - 5, - 6, - 0, - 4 - ], - "input_arity": 7 - } - } - } - }, - { - "lookup_relation": 3, - "stream_key": [ - { - "Column": [ - 5, - null - ] - } - ], - "stream_thinning": [ - 0, - 1, - 2, - 3, - 4, - 6 - ], - "lookup_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 1, - 4, - 6, - 2, - 3, - 5, - 7 - ], - "input_arity": 8 - } - } - } - } - ], - "final_closure": null - }, - { - "source_relation": 3, - "source_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "initial_closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1 - ], - "input_arity": 2 - } - } - }, - "stage_plans": [ - { - "lookup_relation": 0, - "stream_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "stream_thinning": [ - 1 - ], - "lookup_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 2, - 3, - 0, - 1 - ], - "input_arity": 4 - } - } - } - }, - { - "lookup_relation": 1, - "stream_key": [ - { - "Column": [ - 3, - null - ] - } - ], - "stream_thinning": [ - 0, - 1, - 2, - 4 - ], - "lookup_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 1, - 2, - 3, - 5, - 6, - 0, - 4 - ], - "input_arity": 7 - } - } - } - }, - { - "lookup_relation": 2, - "stream_key": [ - { - "Column": [ - 5, - null - ] - } - ], - "stream_thinning": [ - 0, - 1, - 2, - 3, - 4, - 6 - ], - "lookup_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 1, - 4, - 7, - 2, - 3, - 5, - 6 - ], - "input_arity": 8 - } - } - } - } - ], - "final_closure": null - } - ] - } - } - } - } - } - } - ], - "sources": [] -} -EOF - -# Test Reduce::Collated (with GROUP BY, one-shot). -query T multiline -EXPLAIN PHYSICAL PLAN AS JSON FOR -SELECT * FROM collated_group_by ----- -{ - "plans": [ - { - "id": "Explained Query", - "plan": { - "lir_id": 9, - "node": { - "Join": { - "inputs": [ - { - "lir_id": 2, - "node": { - "Reduce": { - "input": { - "lir_id": 1, - "node": { - "Get": { - "id": { - "Global": { - "User": 1 - } - }, - "keys": { - "raw": false, - "arranged": [ - [ - [ - { - "Column": [ - 0, - "a" - ] - } - ], - [ - 0, - 1 - ], - [ - 1 - ] - ] - ], - "types": [ - { - "scalar_type": "Int32", - "nullable": true - }, - { - "scalar_type": "Int32", - "nullable": true - } - ] - }, - "plan": "PassArrangements" - } - } - }, - "key_val_plan": { - "key_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0 - ], - "input_arity": 2 - } - }, - "val_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 1, - 1 - ], - "input_arity": 2 - } - } - }, - "plan": { - "Hierarchical": { - "Monotonic": { - "aggr_funcs": [ - "MinInt32", - "MaxInt32" - ], - "skips": [ - 0, - 0 - ], - "must_consolidate": true - } - } - }, - "input_key": [ - { - "Column": [ - 0, - "a" - ] - } - ], - "mfp_after": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 2 - ], - "input_arity": 3 - } - } - } - }, - { - "lir_id": 4, - "node": { - "Reduce": { - "input": { - "lir_id": 3, - "node": { - "Get": { - "id": { - "Global": { - "User": 1 - } - }, - "keys": { - "raw": false, - "arranged": [ - [ - [ - { - "Column": [ - 0, - "a" - ] - } - ], - [ - 0, - 1 - ], - [ - 1 - ] - ] - ], - "types": [ - { - "scalar_type": "Int32", - "nullable": true - }, - { - "scalar_type": "Int32", - "nullable": true - } - ] - }, - "plan": "PassArrangements" - } - } - }, - "key_val_plan": { - "key_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0 - ], - "input_arity": 2 - } - }, - "val_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 1, - 1 - ], - "input_arity": 2 - } - } - }, - "plan": { - "Accumulable": { - "full_aggrs": [ - { - "func": "Count", - "expr": { - "Column": [ - 1, - "b" - ] - }, - "distinct": true - }, - { - "func": "SumInt32", - "expr": { - "Column": [ - 1, - "b" - ] - }, - "distinct": false - } - ], - "simple_aggrs": [ - [ - 1, - 1, - { - "func": "SumInt32", - "expr": { - "Column": [ - 1, - "b" - ] - }, - "distinct": false - } - ] - ], - "distinct_aggrs": [ - [ - 0, - 0, - { - "func": "Count", - "expr": { - "Column": [ - 1, - "b" - ] - }, - "distinct": true - } - ] - ] - } - }, - "input_key": [ - { - "Column": [ - 0, - "a" - ] - } - ], - "mfp_after": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 2 - ], - "input_arity": 3 - } - } - } - }, - { - "lir_id": 6, - "node": { - "Reduce": { - "input": { - "lir_id": 5, - "node": { - "Get": { - "id": { - "Global": { - "User": 1 - } - }, - "keys": { - "raw": false, - "arranged": [ - [ - [ - { - "Column": [ - 0, - "a" - ] - } - ], - [ - 0, - 1 - ], - [ - 1 - ] - ] - ], - "types": [ - { - "scalar_type": "Int32", - "nullable": true - }, - { - "scalar_type": "Int32", - "nullable": true - } - ] - }, - "plan": "PassArrangements" - } - } - }, - "key_val_plan": { - "key_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0 - ], - "input_arity": 2 - } - }, - "val_plan": { - "mfp": { - "expressions": [ - { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "" - ] - } - }, - "exprs": [ - { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "value", - "sep" - ] - } - }, - "exprs": [ - { - "CallBinary": { - "func": "TextConcat", - "expr1": { - "CallUnary": { - "func": { - "CastInt32ToString": null - }, - "expr": { - "Column": [ - 1, - "b" - ] - } - } - }, - "expr2": { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 49 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - } - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 44 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - ] - } - } - ] - } - } - ], - "predicates": [], - "projection": [ - 2 - ], - "input_arity": 2 - } - } - }, - "plan": { - "Basic": { - "Single": { - "index": 0, - "expr": { - "func": { - "StringAgg": { - "order_by": [] - } - }, - "expr": { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "" - ] - } - }, - "exprs": [ - { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "value", - "sep" - ] - } - }, - "exprs": [ - { - "CallBinary": { - "func": "TextConcat", - "expr1": { - "CallUnary": { - "func": { - "CastInt32ToString": null - }, - "expr": { - "Column": [ - 1, - "b" - ] - } - } - }, - "expr2": { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 49 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - } - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 44 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - ] - } - } - ] - } - }, - "distinct": false - }, - "fused_unnest_list": false - } - } - }, - "input_key": [ - { - "Column": [ - 0, - "a" - ] - } - ], - "mfp_after": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1 - ], - "input_arity": 2 - } - } - } - }, - { - "lir_id": 8, - "node": { - "Reduce": { - "input": { - "lir_id": 7, - "node": { - "Get": { - "id": { - "Global": { - "User": 1 - } - }, - "keys": { - "raw": false, - "arranged": [ - [ - [ - { - "Column": [ - 0, - "a" - ] - } - ], - [ - 0, - 1 - ], - [ - 1 - ] - ] - ], - "types": [ - { - "scalar_type": "Int32", - "nullable": true - }, - { - "scalar_type": "Int32", - "nullable": true - } - ] - }, - "plan": "PassArrangements" - } - } - }, - "key_val_plan": { - "key_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0 - ], - "input_arity": 2 - } - }, - "val_plan": { - "mfp": { - "expressions": [ - { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "" - ] - } - }, - "exprs": [ - { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "value", - "sep" - ] - } - }, - "exprs": [ - { - "CallBinary": { - "func": "TextConcat", - "expr1": { - "CallUnary": { - "func": { - "CastInt32ToString": null - }, - "expr": { - "Column": [ - 1, - "b" - ] - } - } - }, - "expr2": { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 50 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - } - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 44 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - ] - } - } - ] - } - } - ], - "predicates": [], - "projection": [ - 2 - ], - "input_arity": 2 - } - } - }, - "plan": { - "Basic": { - "Single": { - "index": 0, - "expr": { - "func": { - "StringAgg": { - "order_by": [] - } - }, - "expr": { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "" - ] - } - }, - "exprs": [ - { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "value", - "sep" - ] - } - }, - "exprs": [ - { - "CallBinary": { - "func": "TextConcat", - "expr1": { - "CallUnary": { - "func": { - "CastInt32ToString": null - }, - "expr": { - "Column": [ - 1, - "b" - ] - } - } - }, - "expr2": { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 50 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - } - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 44 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - ] - } - } - ] - } - }, - "distinct": false - }, - "fused_unnest_list": false - } - } - }, - "input_key": [ - { - "Column": [ - 0, - "a" - ] - } - ], - "mfp_after": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1 - ], - "input_arity": 2 - } - } - } - } - ], - "plan": { - "Delta": { - "path_plans": [ - { - "source_relation": 0, - "source_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "initial_closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 2 - ], - "input_arity": 3 - } - } - }, - "stage_plans": [ - { - "lookup_relation": 1, - "stream_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "stream_thinning": [ - 1, - 2 - ], - "lookup_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 2, - 3, - 4 - ], - "input_arity": 5 - } - } - } - }, - { - "lookup_relation": 2, - "stream_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "stream_thinning": [ - 1, - 2, - 3, - 4 - ], - "lookup_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 2, - 3, - 4, - 5 - ], - "input_arity": 6 - } - } - } - }, - { - "lookup_relation": 3, - "stream_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "stream_thinning": [ - 1, - 2, - 3, - 4, - 5 - ], - "lookup_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 3, - 5, - 1, - 2, - 4, - 6 - ], - "input_arity": 7 - } - } - } - } - ], - "final_closure": null - }, - { - "source_relation": 1, - "source_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "initial_closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 2 - ], - "input_arity": 3 - } - } - }, - "stage_plans": [ - { - "lookup_relation": 0, - "stream_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "stream_thinning": [ - 1, - 2 - ], - "lookup_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 3, - 4, - 0, - 1, - 2 - ], - "input_arity": 5 - } - } - } - }, - { - "lookup_relation": 2, - "stream_key": [ - { - "Column": [ - 3, - null - ] - } - ], - "stream_thinning": [ - 0, - 1, - 2, - 4, - 5 - ], - "lookup_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 1, - 2, - 3, - 0, - 4, - 5, - 6 - ], - "input_arity": 7 - } - } - } - }, - { - "lookup_relation": 3, - "stream_key": [ - { - "Column": [ - 3, - null - ] - } - ], - "stream_thinning": [ - 0, - 1, - 2, - 4, - 5, - 6 - ], - "lookup_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 1, - 4, - 6, - 2, - 3, - 5, - 7 - ], - "input_arity": 8 - } - } - } - } - ], - "final_closure": null - }, - { - "source_relation": 2, - "source_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "initial_closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1 - ], - "input_arity": 2 - } - } - }, - "stage_plans": [ - { - "lookup_relation": 0, - "stream_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "stream_thinning": [ - 1 - ], - "lookup_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 2, - 3, - 0, - 1 - ], - "input_arity": 4 - } - } - } - }, - { - "lookup_relation": 1, - "stream_key": [ - { - "Column": [ - 3, - null - ] - } - ], - "stream_thinning": [ - 0, - 1, - 2, - 4 - ], - "lookup_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 1, - 2, - 3, - 5, - 6, - 0, - 4 - ], - "input_arity": 7 - } - } - } - }, - { - "lookup_relation": 3, - "stream_key": [ - { - "Column": [ - 5, - null - ] - } - ], - "stream_thinning": [ - 0, - 1, - 2, - 3, - 4, - 6 - ], - "lookup_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 1, - 4, - 6, - 2, - 3, - 5, - 7 - ], - "input_arity": 8 - } - } - } - } - ], - "final_closure": null - }, - { - "source_relation": 3, - "source_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "initial_closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1 - ], - "input_arity": 2 - } - } - }, - "stage_plans": [ - { - "lookup_relation": 0, - "stream_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "stream_thinning": [ - 1 - ], - "lookup_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 2, - 3, - 0, - 1 - ], - "input_arity": 4 - } - } - } - }, - { - "lookup_relation": 1, - "stream_key": [ - { - "Column": [ - 3, - null - ] - } - ], - "stream_thinning": [ - 0, - 1, - 2, - 4 - ], - "lookup_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 1, - 2, - 3, - 5, - 6, - 0, - 4 - ], - "input_arity": 7 - } - } - } - }, - { - "lookup_relation": 2, - "stream_key": [ - { - "Column": [ - 5, - null - ] - } - ], - "stream_thinning": [ - 0, - 1, - 2, - 3, - 4, - 6 - ], - "lookup_key": [ - { - "Column": [ - 0, - null - ] - } - ], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 1, - 4, - 7, - 2, - 3, - 5, - 6 - ], - "input_arity": 8 + "simple_aggrs": [ + [ + 1, + 4, + { + "func": "SumInt32", + "expr": { + "Column": [ + 0, + "b" + ] + }, + "distinct": false } - } - } - } - ], - "final_closure": null - } - ] - } - } - } - } - } - } - ], - "sources": [] -} -EOF - -# Test Reduce::Collated (global aggregate). -query T multiline -EXPLAIN PHYSICAL PLAN AS JSON FOR -MATERIALIZED VIEW collated_global_mv ----- -{ - "plans": [ - { - "id": "materialize.public.collated_global_mv", - "plan": { - "lir_id": 19, - "node": { - "Let": { - "id": 0, - "value": { - "lir_id": 1, - "node": { - "Get": { - "id": { - "Global": { - "User": 1 - } - }, - "keys": { - "raw": false, - "arranged": [ - [ - [ - { - "Column": [ - 0, - "a" - ] - } - ], - [ - 0, - 1 - ], - [ - 1 - ] - ] - ], - "types": [ - { - "scalar_type": "Int32", - "nullable": true - }, - { - "scalar_type": "Int32", - "nullable": true - } - ] - }, - "plan": { - "Arrangement": [ - [ - { - "Column": [ - 0, - "a" ] - } - ], - null, - { - "expressions": [], - "predicates": [], - "projection": [ - 1 ], - "input_arity": 2 - } - ] - } - } - } - }, - "body": { - "lir_id": 18, - "node": { - "Let": { - "id": 1, - "value": { - "lir_id": 10, - "node": { - "Join": { - "inputs": [ - { - "lir_id": 3, - "node": { - "Reduce": { - "input": { - "lir_id": 2, - "node": { - "Get": { - "id": { - "Local": 0 - }, - "keys": { - "raw": true, - "arranged": [], - "types": null - }, - "plan": "PassArrangements" - } - } - }, - "key_val_plan": { - "key_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [], - "input_arity": 1 - } - }, - "val_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 0 - ], - "input_arity": 1 - } - } - }, - "plan": { - "Hierarchical": { - "Bucketed": { - "aggr_funcs": [ - "MinInt32", - "MaxInt32" - ], - "skips": [ - 0, - 0 - ], - "buckets": [ - 268435456, - 16777216, - 1048576, - 65536, - 4096, - 256, - 16 - ] - } - } - }, - "input_key": null, - "mfp_after": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1 - ], - "input_arity": 2 - } - } + "distinct_aggrs": [ + [ + 0, + 0, + { + "func": "Count", + "expr": { + "Column": [ + 0, + "b" + ] + }, + "distinct": true } - }, - { - "lir_id": 5, - "node": { - "Reduce": { - "input": { - "lir_id": 4, - "node": { - "Get": { - "id": { - "Local": 0 - }, - "keys": { - "raw": true, - "arranged": [], - "types": null - }, - "plan": "PassArrangements" - } - } - }, - "key_val_plan": { - "key_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [], - "input_arity": 1 - } - }, - "val_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 0 - ], - "input_arity": 1 - } - } - }, - "plan": { - "Accumulable": { - "full_aggrs": [ - { - "func": "Count", - "expr": { - "Column": [ - 0, - "b" - ] - }, - "distinct": true - }, - { - "func": "SumInt32", - "expr": { - "Column": [ - 0, - "b" - ] - }, - "distinct": false - } - ], - "simple_aggrs": [ - [ - 1, - 1, - { - "func": "SumInt32", - "expr": { - "Column": [ - 0, - "b" - ] - }, - "distinct": false - } - ] - ], - "distinct_aggrs": [ - [ - 0, - 0, - { - "func": "Count", - "expr": { - "Column": [ - 0, - "b" - ] - }, - "distinct": true - } - ] - ] - } - }, - "input_key": null, - "mfp_after": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1 - ], - "input_arity": 2 + ] + ] + }, + "hierarchical": { + "Bucketed": { + "aggr_funcs": [ + "MinInt32", + "MaxInt32" + ], + "skips": [ + 2, + 0 + ], + "buckets": [ + 268435456, + 16777216, + 1048576, + 65536, + 4096, + 256, + 16 + ] + } + }, + "basic": { + "Multiple": [ + [ + 1, + { + "func": { + "StringAgg": { + "order_by": [] } - } - } - }, - { - "lir_id": 7, - "node": { - "Reduce": { - "input": { - "lir_id": 6, - "node": { - "Get": { - "id": { - "Local": 0 - }, - "keys": { - "raw": true, - "arranged": [], - "types": null - }, - "plan": "PassArrangements" - } - } - }, - "key_val_plan": { - "key_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [], - "input_arity": 1 + }, + "expr": { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "" + ] } }, - "val_plan": { - "mfp": { - "expressions": [ - { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "" - ] - } - }, - "exprs": [ - { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "value", - "sep" - ] - } - }, - "exprs": [ - { - "CallBinary": { - "func": "TextConcat", - "expr1": { - "CallUnary": { - "func": { - "CastInt32ToString": null - }, - "expr": { - "Column": [ - 0, - "b" - ] - } - } - }, - "expr2": { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 49 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - } - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 44 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - ] - } - } - ] - } - } - ], - "predicates": [], - "projection": [ - 1 - ], - "input_arity": 1 - } - } - }, - "plan": { - "Basic": { - "Single": { - "index": 0, - "expr": { + "exprs": [ + { + "CallVariadic": { "func": { - "StringAgg": { - "order_by": [] + "RecordCreate": { + "field_names": [ + "value", + "sep" + ] } }, - "expr": { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "" - ] - } - }, - "exprs": [ - { - "CallVariadic": { + "exprs": [ + { + "CallBinary": { + "func": "TextConcat", + "expr1": { + "CallUnary": { "func": { - "RecordCreate": { - "field_names": [ - "value", - "sep" - ] - } + "CastInt32ToString": null }, - "exprs": [ - { - "CallBinary": { - "func": "TextConcat", - "expr1": { - "CallUnary": { - "func": { - "CastInt32ToString": null - }, - "expr": { - "Column": [ - 0, - "b" - ] - } - } - }, - "expr2": { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 49 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - } - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 44 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } + "expr": { + "Column": [ + 0, + "b" + ] + } + } + }, + "expr2": { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 49 ] } - ] - } - } - ] - } - }, - "distinct": false - }, - "fused_unnest_list": false - } - } - }, - "input_key": null, - "mfp_after": { - "expressions": [], - "predicates": [], - "projection": [ - 0 - ], - "input_arity": 1 - } - } - } - }, - { - "lir_id": 9, - "node": { - "Reduce": { - "input": { - "lir_id": 8, - "node": { - "Get": { - "id": { - "Local": 0 - }, - "keys": { - "raw": true, - "arranged": [], - "types": null - }, - "plan": "PassArrangements" - } - } - }, - "key_val_plan": { - "key_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [], - "input_arity": 1 - } - }, - "val_plan": { - "mfp": { - "expressions": [ - { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "" + }, + { + "scalar_type": "String", + "nullable": false + } ] } - }, - "exprs": [ + } + }, + { + "Literal": [ { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "value", - "sep" - ] - } - }, - "exprs": [ - { - "CallBinary": { - "func": "TextConcat", - "expr1": { - "CallUnary": { - "func": { - "CastInt32ToString": null - }, - "expr": { - "Column": [ - 0, - "b" - ] - } - } - }, - "expr2": { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 50 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - } - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 44 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } + "Ok": { + "data": [ + 19, + 1, + 44 ] } + }, + { + "scalar_type": "String", + "nullable": false } ] } - } - ], - "predicates": [], - "projection": [ - 1 - ], - "input_arity": 1 + ] + } + } + ] + } + }, + "distinct": false + } + ], + [ + 5, + { + "func": { + "StringAgg": { + "order_by": [] + } + }, + "expr": { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "" + ] } - } - }, - "plan": { - "Basic": { - "Single": { - "index": 0, - "expr": { + }, + "exprs": [ + { + "CallVariadic": { "func": { - "StringAgg": { - "order_by": [] + "RecordCreate": { + "field_names": [ + "value", + "sep" + ] } }, - "expr": { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "" - ] - } - }, - "exprs": [ - { - "CallVariadic": { + "exprs": [ + { + "CallBinary": { + "func": "TextConcat", + "expr1": { + "CallUnary": { "func": { - "RecordCreate": { - "field_names": [ - "value", - "sep" - ] - } + "CastInt32ToString": null }, - "exprs": [ - { - "CallBinary": { - "func": "TextConcat", - "expr1": { - "CallUnary": { - "func": { - "CastInt32ToString": null - }, - "expr": { - "Column": [ - 0, - "b" - ] - } - } - }, - "expr2": { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 50 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - } - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 44 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } + "expr": { + "Column": [ + 0, + "b" + ] + } + } + }, + "expr2": { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 50 ] } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + } + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 44 ] } + }, + { + "scalar_type": "String", + "nullable": false } ] } - }, - "distinct": false - }, - "fused_unnest_list": false - } - } - }, - "input_key": null, - "mfp_after": { - "expressions": [], - "predicates": [], - "projection": [ - 0 - ], - "input_arity": 1 - } - } - } - } - ], - "plan": { - "Delta": { - "path_plans": [ - { - "source_relation": 0, - "source_key": [], - "initial_closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1 - ], - "input_arity": 2 - } - } - }, - "stage_plans": [ - { - "lookup_relation": 1, - "stream_key": [], - "stream_thinning": [ - 0, - 1 - ], - "lookup_key": [], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 2, - 3 - ], - "input_arity": 4 - } - } - } - }, - { - "lookup_relation": 2, - "stream_key": [], - "stream_thinning": [ - 0, - 1, - 2, - 3 - ], - "lookup_key": [], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 2, - 3, - 4 - ], - "input_arity": 5 - } - } - } - }, - { - "lookup_relation": 3, - "stream_key": [], - "stream_thinning": [ - 0, - 1, - 2, - 3, - 4 - ], - "lookup_key": [], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 2, - 3, - 4, - 5 - ], - "input_arity": 6 - } - } - } - } - ], - "final_closure": null - }, - { - "source_relation": 1, - "source_key": [], - "initial_closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1 - ], - "input_arity": 2 - } - } - }, - "stage_plans": [ - { - "lookup_relation": 0, - "stream_key": [], - "stream_thinning": [ - 0, - 1 - ], - "lookup_key": [], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 2, - 3, - 0, - 1 - ], - "input_arity": 4 - } - } - } - }, - { - "lookup_relation": 2, - "stream_key": [], - "stream_thinning": [ - 0, - 1, - 2, - 3 - ], - "lookup_key": [], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 2, - 3, - 4 - ], - "input_arity": 5 - } - } - } - }, - { - "lookup_relation": 3, - "stream_key": [], - "stream_thinning": [ - 0, - 1, - 2, - 3, - 4 - ], - "lookup_key": [], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 2, - 3, - 4, - 5 - ], - "input_arity": 6 - } - } - } - } - ], - "final_closure": null - }, - { - "source_relation": 2, - "source_key": [], - "initial_closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0 - ], - "input_arity": 1 - } - } - }, - "stage_plans": [ - { - "lookup_relation": 0, - "stream_key": [], - "stream_thinning": [ - 0 - ], - "lookup_key": [], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 1, - 2, - 0 - ], - "input_arity": 3 - } - } - } - }, - { - "lookup_relation": 1, - "stream_key": [], - "stream_thinning": [ - 0, - 1, - 2 - ], - "lookup_key": [], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 3, - 4, - 2 - ], - "input_arity": 5 - } - } - } - }, - { - "lookup_relation": 3, - "stream_key": [], - "stream_thinning": [ - 0, - 1, - 2, - 3, - 4 - ], - "lookup_key": [], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 2, - 3, - 4, - 5 - ], - "input_arity": 6 - } - } - } - } - ], - "final_closure": null - }, - { - "source_relation": 3, - "source_key": [], - "initial_closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0 - ], - "input_arity": 1 - } - } - }, - "stage_plans": [ - { - "lookup_relation": 0, - "stream_key": [], - "stream_thinning": [ - 0 - ], - "lookup_key": [], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 1, - 2, - 0 - ], - "input_arity": 3 - } - } - } - }, - { - "lookup_relation": 1, - "stream_key": [], - "stream_thinning": [ - 0, - 1, - 2 - ], - "lookup_key": [], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 3, - 4, - 2 - ], - "input_arity": 5 - } - } - } - }, - { - "lookup_relation": 2, - "stream_key": [], - "stream_thinning": [ - 0, - 1, - 2, - 3, - 4 - ], - "lookup_key": [], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 2, - 3, - 5, - 4 - ], - "input_arity": 6 - } + ] } } - } - ], - "final_closure": null - } - ] - } - } - } + ] + } + }, + "distinct": false + } + ] + ] + }, + "aggregate_types": [ + "Accumulable", + "Basic", + "Hierarchical", + "Hierarchical", + "Accumulable", + "Basic" + ] } }, - "body": { - "lir_id": 17, - "node": { - "Union": { - "inputs": [ - { - "lir_id": 11, + "input_key": null, + "mfp_after": { + "expressions": [], + "predicates": [], + "projection": [ + 0, + 1, + 2, + 3, + 4, + 5 + ], + "input_arity": 6 + } + } + } + }, + "body": { + "lir_id": 10, + "node": { + "Union": { + "inputs": [ + { + "lir_id": 9, + "node": { + "ArrangeBy": { + "input": { + "lir_id": 3, "node": { "Get": { "id": { - "Local": 1 + "Local": 0 }, "keys": { - "raw": true, - "arranged": [], + "raw": false, + "arranged": [ + [ + [], + [ + 0, + 1, + 2, + 3, + 4, + 5 + ], + [ + 0, + 1, + 2, + 3, + 4, + 5 + ] + ] + ], "types": null }, - "plan": { - "Collection": { - "expressions": [], - "predicates": [], - "projection": [ - 2, - 4, - 0, - 1, - 3, - 5 - ], - "input_arity": 6 - } - } + "plan": "PassArrangements" } } }, - { - "lir_id": 16, + "forms": { + "raw": true, + "arranged": [], + "types": null + }, + "input_key": [], + "input_mfp": { + "expressions": [], + "predicates": [], + "projection": [ + 0, + 1, + 2, + 3, + 4, + 5 + ], + "input_arity": 6 + } + } + } + }, + { + "lir_id": 8, + "node": { + "Mfp": { + "input": { + "lir_id": 7, "node": { - "Mfp": { - "input": { - "lir_id": 15, - "node": { - "Union": { - "inputs": [ - { - "lir_id": 13, - "node": { - "Negate": { - "input": { - "lir_id": 12, - "node": { - "Get": { - "id": { - "Local": 1 - }, - "keys": { - "raw": true, - "arranged": [], - "types": null - }, - "plan": { - "Collection": { - "expressions": [], - "predicates": [], - "projection": [], - "input_arity": 6 - } - } - } - } - } - } - } - }, - { - "lir_id": 14, + "Union": { + "inputs": [ + { + "lir_id": 5, + "node": { + "Negate": { + "input": { + "lir_id": 4, "node": { - "Constant": { - "rows": { - "Ok": [ + "Get": { + "id": { + "Local": 0 + }, + "keys": { + "raw": false, + "arranged": [ [ - { - "data": [] - }, - 0, - 1 + [], + [ + 0, + 1, + 2, + 3, + 4, + 5 + ], + [ + 0, + 1, + 2, + 3, + 4, + 5 + ] ] + ], + "types": null + }, + "plan": { + "Arrangement": [ + [], + null, + { + "expressions": [], + "predicates": [], + "projection": [], + "input_arity": 6 + } ] } } } } - ], - "consolidate_output": true + } } - } - }, - "mfp": { - "expressions": [ - { - "Literal": [ - { - "Ok": { - "data": [ - 49 + }, + { + "lir_id": 6, + "node": { + "Constant": { + "rows": { + "Ok": [ + [ + { + "data": [] + }, + 0, + 1 ] - } - }, - { - "scalar_type": "Int64", - "nullable": false + ] } + } + } + } + ], + "consolidate_output": true + } + } + }, + "mfp": { + "expressions": [ + { + "Literal": [ + { + "Ok": { + "data": [ + 49 ] - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 0 - ] - } - }, - { - "scalar_type": "String", - "nullable": true - } + } + }, + { + "scalar_type": "Int64", + "nullable": false + } + ] + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 0 ] - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 0 - ] - } - }, - { - "scalar_type": "Int32", - "nullable": true - } + } + }, + { + "scalar_type": "String", + "nullable": true + } + ] + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 0 ] - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 0 - ] - } - }, - { - "scalar_type": "Int32", - "nullable": true - } + } + }, + { + "scalar_type": "Int32", + "nullable": true + } + ] + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 0 ] - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 0 - ] - } - }, - { - "scalar_type": "Int64", - "nullable": true - } + } + }, + { + "scalar_type": "Int32", + "nullable": true + } + ] + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 0 ] - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 0 - ] - } - }, - { - "scalar_type": "String", - "nullable": true - } + } + }, + { + "scalar_type": "Int64", + "nullable": true + } + ] + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 0 ] } - ], - "predicates": [], - "projection": [ - 0, - 1, - 2, - 3, - 4, - 5 - ], - "input_arity": 0 - }, - "input_key_val": null + }, + { + "scalar_type": "String", + "nullable": true + } + ] } - } - } - ], - "consolidate_output": false + ], + "predicates": [], + "projection": [ + 0, + 1, + 2, + 3, + 4, + 5 + ], + "input_arity": 0 + }, + "input_key_val": null + } } } - } + ], + "consolidate_output": false } } } @@ -8894,1348 +6296,807 @@ SELECT * FROM collated_global { "id": "Explained Query", "plan": { - "lir_id": 19, + "lir_id": 11, "node": { "Let": { "id": 0, "value": { - "lir_id": 1, - "node": { - "Get": { - "id": { - "Global": { - "User": 1 - } - }, - "keys": { - "raw": false, - "arranged": [ - [ - [ - { - "Column": [ - 0, - "a" - ] - } - ], - [ - 0, - 1 - ], - [ - 1 - ] - ] - ], - "types": [ - { - "scalar_type": "Int32", - "nullable": true - }, - { - "scalar_type": "Int32", - "nullable": true - } - ] - }, - "plan": { - "Arrangement": [ - [ - { - "Column": [ - 0, - "a" - ] - } - ], - null, - { - "expressions": [], - "predicates": [], - "projection": [ - 1 - ], - "input_arity": 2 - } - ] - } - } - } - }, - "body": { - "lir_id": 18, + "lir_id": 2, "node": { - "Let": { - "id": 1, - "value": { - "lir_id": 10, + "Reduce": { + "input": { + "lir_id": 1, "node": { - "Join": { - "inputs": [ - { - "lir_id": 3, - "node": { - "Reduce": { - "input": { - "lir_id": 2, - "node": { - "Get": { - "id": { - "Local": 0 - }, - "keys": { - "raw": true, - "arranged": [], - "types": null - }, - "plan": "PassArrangements" - } - } - }, - "key_val_plan": { - "key_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [], - "input_arity": 1 - } - }, - "val_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 0 - ], - "input_arity": 1 - } - } - }, - "plan": { - "Hierarchical": { - "Monotonic": { - "aggr_funcs": [ - "MinInt32", - "MaxInt32" - ], - "skips": [ - 0, - 0 - ], - "must_consolidate": true - } - } - }, - "input_key": null, - "mfp_after": { - "expressions": [], - "predicates": [], - "projection": [ + "Get": { + "id": { + "Global": { + "User": 1 + } + }, + "keys": { + "raw": false, + "arranged": [ + [ + [ + { + "Column": [ 0, - 1 - ], - "input_arity": 2 + "a" + ] } + ], + [ + 0, + 1 + ], + [ + 1 + ] + ] + ], + "types": [ + { + "scalar_type": "Int32", + "nullable": true + }, + { + "scalar_type": "Int32", + "nullable": true + } + ] + }, + "plan": { + "Arrangement": [ + [ + { + "Column": [ + 0, + "a" + ] + } + ], + null, + { + "expressions": [], + "predicates": [], + "projection": [ + 1 + ], + "input_arity": 2 + } + ] + } + } + } + }, + "key_val_plan": { + "key_plan": { + "mfp": { + "expressions": [], + "predicates": [], + "projection": [], + "input_arity": 1 + } + }, + "val_plan": { + "mfp": { + "expressions": [ + { + "CallUnary": { + "func": { + "CastInt32ToString": null + }, + "expr": { + "Column": [ + 0, + "b" + ] } } }, { - "lir_id": 5, - "node": { - "Reduce": { - "input": { - "lir_id": 4, - "node": { - "Get": { - "id": { - "Local": 0 - }, - "keys": { - "raw": true, - "arranged": [], - "types": null - }, - "plan": "PassArrangements" - } - } - }, - "key_val_plan": { - "key_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [], - "input_arity": 1 - } - }, - "val_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 0 - ], - "input_arity": 1 - } - } - }, - "plan": { - "Accumulable": { - "full_aggrs": [ - { - "func": "Count", - "expr": { - "Column": [ - 0, - "b" - ] - }, - "distinct": true - }, - { - "func": "SumInt32", - "expr": { - "Column": [ - 0, - "b" - ] - }, - "distinct": false + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "" + ] + } + }, + "exprs": [ + { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "value", + "sep" + ] } - ], - "simple_aggrs": [ - [ - 1, - 1, - { - "func": "SumInt32", - "expr": { - "Column": [ - 0, - "b" - ] - }, - "distinct": false - } - ] - ], - "distinct_aggrs": [ - [ - 0, - 0, - { - "func": "Count", - "expr": { + }, + "exprs": [ + { + "CallBinary": { + "func": "TextConcat", + "expr1": { "Column": [ - 0, - "b" + 1, + null ] }, - "distinct": true - } - ] - ] - } - }, - "input_key": null, - "mfp_after": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1 - ], - "input_arity": 2 - } - } - } - }, - { - "lir_id": 7, - "node": { - "Reduce": { - "input": { - "lir_id": 6, - "node": { - "Get": { - "id": { - "Local": 0 - }, - "keys": { - "raw": true, - "arranged": [], - "types": null - }, - "plan": "PassArrangements" - } - } - }, - "key_val_plan": { - "key_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [], - "input_arity": 1 - } - }, - "val_plan": { - "mfp": { - "expressions": [ - { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "" - ] - } - }, - "exprs": [ + "expr2": { + "Literal": [ { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "value", - "sep" - ] - } - }, - "exprs": [ - { - "CallBinary": { - "func": "TextConcat", - "expr1": { - "CallUnary": { - "func": { - "CastInt32ToString": null - }, - "expr": { - "Column": [ - 0, - "b" - ] - } - } - }, - "expr2": { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 49 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - } - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 44 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } + "Ok": { + "data": [ + 19, + 1, + 49 ] } - } - ] - } - } - ], - "predicates": [], - "projection": [ - 1 - ], - "input_arity": 1 - } - } - }, - "plan": { - "Basic": { - "Single": { - "index": 0, - "expr": { - "func": { - "StringAgg": { - "order_by": [] - } - }, - "expr": { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "" - ] - } - }, - "exprs": [ + }, { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "value", - "sep" - ] - } - }, - "exprs": [ - { - "CallBinary": { - "func": "TextConcat", - "expr1": { - "CallUnary": { - "func": { - "CastInt32ToString": null - }, - "expr": { - "Column": [ - 0, - "b" - ] - } - } - }, - "expr2": { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 49 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - } - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 44 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - ] - } + "scalar_type": "String", + "nullable": false } ] } - }, - "distinct": false - }, - "fused_unnest_list": false - } - } - }, - "input_key": null, - "mfp_after": { - "expressions": [], - "predicates": [], - "projection": [ - 0 - ], - "input_arity": 1 - } - } - } - }, - { - "lir_id": 9, - "node": { - "Reduce": { - "input": { - "lir_id": 8, - "node": { - "Get": { - "id": { - "Local": 0 - }, - "keys": { - "raw": true, - "arranged": [], - "types": null + } }, - "plan": "PassArrangements" - } - } - }, - "key_val_plan": { - "key_plan": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [], - "input_arity": 1 - } - }, - "val_plan": { - "mfp": { - "expressions": [ - { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "" - ] - } - }, - "exprs": [ - { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "value", - "sep" - ] - } - }, - "exprs": [ - { - "CallBinary": { - "func": "TextConcat", - "expr1": { - "CallUnary": { - "func": { - "CastInt32ToString": null - }, - "expr": { - "Column": [ - 0, - "b" - ] - } - } - }, - "expr2": { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 50 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - } - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 44 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - ] - } - } - ] + { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 44 + ] + } + }, + { + "scalar_type": "String", + "nullable": false } - } - ], - "predicates": [], - "projection": [ - 1 - ], - "input_arity": 1 - } + ] + } + ] } - }, - "plan": { - "Basic": { - "Single": { - "index": 0, - "expr": { - "func": { - "StringAgg": { - "order_by": [] - } - }, - "expr": { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "" - ] - } - }, - "exprs": [ + } + ] + } + }, + { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "" + ] + } + }, + "exprs": [ + { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "value", + "sep" + ] + } + }, + "exprs": [ + { + "CallBinary": { + "func": "TextConcat", + "expr1": { + "Column": [ + 1, + null + ] + }, + "expr2": { + "Literal": [ { - "CallVariadic": { - "func": { - "RecordCreate": { - "field_names": [ - "value", - "sep" - ] - } - }, - "exprs": [ - { - "CallBinary": { - "func": "TextConcat", - "expr1": { - "CallUnary": { - "func": { - "CastInt32ToString": null - }, - "expr": { - "Column": [ - 0, - "b" - ] - } - } - }, - "expr2": { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 50 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } - } - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 19, - 1, - 44 - ] - } - }, - { - "scalar_type": "String", - "nullable": false - } - ] - } + "Ok": { + "data": [ + 19, + 1, + 50 ] } + }, + { + "scalar_type": "String", + "nullable": false } ] } - }, - "distinct": false + } }, - "fused_unnest_list": false - } + { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 44 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + ] } - }, - "input_key": null, - "mfp_after": { - "expressions": [], - "predicates": [], - "projection": [ - 0 - ], - "input_arity": 1 } - } + ] } } ], - "plan": { - "Delta": { - "path_plans": [ - { - "source_relation": 0, - "source_key": [], - "initial_closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1 - ], - "input_arity": 2 - } - } - }, - "stage_plans": [ - { - "lookup_relation": 1, - "stream_key": [], - "stream_thinning": [ - 0, - 1 - ], - "lookup_key": [], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 2, - 3 - ], - "input_arity": 4 - } - } - } - }, - { - "lookup_relation": 2, - "stream_key": [], - "stream_thinning": [ - 0, - 1, - 2, - 3 - ], - "lookup_key": [], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 2, - 3, - 4 - ], - "input_arity": 5 - } - } - } - }, - { - "lookup_relation": 3, - "stream_key": [], - "stream_thinning": [ - 0, - 1, - 2, - 3, - 4 - ], - "lookup_key": [], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 2, - 3, - 4, - 5 - ], - "input_arity": 6 - } - } - } - } - ], - "final_closure": null + "predicates": [], + "projection": [ + 0, + 2, + 0, + 0, + 0, + 3 + ], + "input_arity": 1 + } + } + }, + "plan": { + "Collation": { + "accumulable": { + "full_aggrs": [ + { + "func": "Count", + "expr": { + "Column": [ + 0, + "b" + ] + }, + "distinct": true + }, + { + "func": "SumInt32", + "expr": { + "Column": [ + 0, + "b" + ] + }, + "distinct": false + } + ], + "simple_aggrs": [ + [ + 1, + 4, + { + "func": "SumInt32", + "expr": { + "Column": [ + 0, + "b" + ] }, - { - "source_relation": 1, - "source_key": [], - "initial_closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1 - ], - "input_arity": 2 - } - } - }, - "stage_plans": [ - { - "lookup_relation": 0, - "stream_key": [], - "stream_thinning": [ - 0, - 1 - ], - "lookup_key": [], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 2, - 3, - 0, - 1 - ], - "input_arity": 4 - } - } - } - }, - { - "lookup_relation": 2, - "stream_key": [], - "stream_thinning": [ - 0, - 1, - 2, - 3 - ], - "lookup_key": [], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 2, - 3, - 4 - ], - "input_arity": 5 - } - } + "distinct": false + } + ] + ], + "distinct_aggrs": [ + [ + 0, + 0, + { + "func": "Count", + "expr": { + "Column": [ + 0, + "b" + ] + }, + "distinct": true + } + ] + ] + }, + "hierarchical": { + "Monotonic": { + "aggr_funcs": [ + "MinInt32", + "MaxInt32" + ], + "skips": [ + 2, + 0 + ], + "must_consolidate": true + } + }, + "basic": { + "Multiple": [ + [ + 1, + { + "func": { + "StringAgg": { + "order_by": [] + } + }, + "expr": { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "" + ] } }, - { - "lookup_relation": 3, - "stream_key": [], - "stream_thinning": [ - 0, - 1, - 2, - 3, - 4 - ], - "lookup_key": [], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 2, - 3, - 4, - 5 - ], - "input_arity": 6 - } + "exprs": [ + { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "value", + "sep" + ] + } + }, + "exprs": [ + { + "CallBinary": { + "func": "TextConcat", + "expr1": { + "CallUnary": { + "func": { + "CastInt32ToString": null + }, + "expr": { + "Column": [ + 0, + "b" + ] + } + } + }, + "expr2": { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 49 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + } + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 44 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + ] } } - } - ], - "final_closure": null + ] + } }, - { - "source_relation": 2, - "source_key": [], - "initial_closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0 - ], - "input_arity": 1 - } - } - }, - "stage_plans": [ - { - "lookup_relation": 0, - "stream_key": [], - "stream_thinning": [ - 0 - ], - "lookup_key": [], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 1, - 2, - 0 - ], - "input_arity": 3 - } - } - } - }, - { - "lookup_relation": 1, - "stream_key": [], - "stream_thinning": [ - 0, - 1, - 2 - ], - "lookup_key": [], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 3, - 4, - 2 - ], - "input_arity": 5 - } - } + "distinct": false + } + ], + [ + 5, + { + "func": { + "StringAgg": { + "order_by": [] + } + }, + "expr": { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "" + ] } }, - { - "lookup_relation": 3, - "stream_key": [], - "stream_thinning": [ - 0, - 1, - 2, - 3, - 4 - ], - "lookup_key": [], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 2, - 3, - 4, - 5 - ], - "input_arity": 6 - } + "exprs": [ + { + "CallVariadic": { + "func": { + "RecordCreate": { + "field_names": [ + "value", + "sep" + ] + } + }, + "exprs": [ + { + "CallBinary": { + "func": "TextConcat", + "expr1": { + "CallUnary": { + "func": { + "CastInt32ToString": null + }, + "expr": { + "Column": [ + 0, + "b" + ] + } + } + }, + "expr2": { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 50 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + } + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 19, + 1, + 44 + ] + } + }, + { + "scalar_type": "String", + "nullable": false + } + ] + } + ] } } - } - ], - "final_closure": null + ] + } }, - { - "source_relation": 3, - "source_key": [], - "initial_closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0 + "distinct": false + } + ] + ] + }, + "aggregate_types": [ + "Accumulable", + "Basic", + "Hierarchical", + "Hierarchical", + "Accumulable", + "Basic" + ] + } + }, + "input_key": null, + "mfp_after": { + "expressions": [], + "predicates": [], + "projection": [ + 0, + 1, + 2, + 3, + 4, + 5 + ], + "input_arity": 6 + } + } + } + }, + "body": { + "lir_id": 10, + "node": { + "Union": { + "inputs": [ + { + "lir_id": 9, + "node": { + "ArrangeBy": { + "input": { + "lir_id": 3, + "node": { + "Get": { + "id": { + "Local": 0 + }, + "keys": { + "raw": false, + "arranged": [ + [ + [], + [ + 0, + 1, + 2, + 3, + 4, + 5 ], - "input_arity": 1 - } - } + [ + 0, + 1, + 2, + 3, + 4, + 5 + ] + ] + ], + "types": null }, - "stage_plans": [ - { - "lookup_relation": 0, - "stream_key": [], - "stream_thinning": [ - 0 - ], - "lookup_key": [], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 1, - 2, - 0 - ], - "input_arity": 3 - } - } - } - }, + "plan": "PassArrangements" + } + } + }, + "forms": { + "raw": true, + "arranged": [], + "types": null + }, + "input_key": [], + "input_mfp": { + "expressions": [], + "predicates": [], + "projection": [ + 0, + 1, + 2, + 3, + 4, + 5 + ], + "input_arity": 6 + } + } + } + }, + { + "lir_id": 8, + "node": { + "Mfp": { + "input": { + "lir_id": 7, + "node": { + "Union": { + "inputs": [ { - "lookup_relation": 1, - "stream_key": [], - "stream_thinning": [ - 0, - 1, - 2 - ], - "lookup_key": [], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 3, - 4, - 2 - ], - "input_arity": 5 + "lir_id": 5, + "node": { + "Negate": { + "input": { + "lir_id": 4, + "node": { + "Get": { + "id": { + "Local": 0 + }, + "keys": { + "raw": false, + "arranged": [ + [ + [], + [ + 0, + 1, + 2, + 3, + 4, + 5 + ], + [ + 0, + 1, + 2, + 3, + 4, + 5 + ] + ] + ], + "types": null + }, + "plan": { + "Arrangement": [ + [], + null, + { + "expressions": [], + "predicates": [], + "projection": [], + "input_arity": 6 + } + ] + } + } + } } } } }, { - "lookup_relation": 2, - "stream_key": [], - "stream_thinning": [ - 0, - 1, - 2, - 3, - 4 - ], - "lookup_key": [], - "closure": { - "ready_equivalences": [], - "before": { - "mfp": { - "expressions": [], - "predicates": [], - "projection": [ - 0, - 1, - 2, - 3, - 5, - 4 - ], - "input_arity": 6 + "lir_id": 6, + "node": { + "Constant": { + "rows": { + "Ok": [ + [ + { + "data": [] + }, + 0, + 1 + ] + ] } } } } ], - "final_closure": null - } - ] - } - } - } - } - }, - "body": { - "lir_id": 17, - "node": { - "Union": { - "inputs": [ - { - "lir_id": 11, - "node": { - "Get": { - "id": { - "Local": 1 - }, - "keys": { - "raw": true, - "arranged": [], - "types": null - }, - "plan": { - "Collection": { - "expressions": [], - "predicates": [], - "projection": [ - 2, - 4, - 0, - 1, - 3, - 5 - ], - "input_arity": 6 - } - } + "consolidate_output": true } } }, - { - "lir_id": 16, - "node": { - "Mfp": { - "input": { - "lir_id": 15, - "node": { - "Union": { - "inputs": [ - { - "lir_id": 13, - "node": { - "Negate": { - "input": { - "lir_id": 12, - "node": { - "Get": { - "id": { - "Local": 1 - }, - "keys": { - "raw": true, - "arranged": [], - "types": null - }, - "plan": { - "Collection": { - "expressions": [], - "predicates": [], - "projection": [], - "input_arity": 6 - } - } - } - } - } - } - } - }, - { - "lir_id": 14, - "node": { - "Constant": { - "rows": { - "Ok": [ - [ - { - "data": [] - }, - 0, - 1 - ] - ] - } - } - } - } - ], - "consolidate_output": true + "mfp": { + "expressions": [ + { + "Literal": [ + { + "Ok": { + "data": [ + 49 + ] } + }, + { + "scalar_type": "Int64", + "nullable": false } - }, - "mfp": { - "expressions": [ - { - "Literal": [ - { - "Ok": { - "data": [ - 49 - ] - } - }, - { - "scalar_type": "Int64", - "nullable": false - } - ] - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 0 - ] - } - }, - { - "scalar_type": "String", - "nullable": true - } + ] + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 0 ] - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 0 - ] - } - }, - { - "scalar_type": "Int32", - "nullable": true - } + } + }, + { + "scalar_type": "String", + "nullable": true + } + ] + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 0 ] - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 0 - ] - } - }, - { - "scalar_type": "Int32", - "nullable": true - } + } + }, + { + "scalar_type": "Int32", + "nullable": true + } + ] + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 0 ] - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 0 - ] - } - }, - { - "scalar_type": "Int64", - "nullable": true - } + } + }, + { + "scalar_type": "Int32", + "nullable": true + } + ] + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 0 ] - }, - { - "Literal": [ - { - "Ok": { - "data": [ - 0 - ] - } - }, - { - "scalar_type": "String", - "nullable": true - } + } + }, + { + "scalar_type": "Int64", + "nullable": true + } + ] + }, + { + "Literal": [ + { + "Ok": { + "data": [ + 0 ] } - ], - "predicates": [], - "projection": [ - 0, - 1, - 2, - 3, - 4, - 5 - ], - "input_arity": 0 - }, - "input_key_val": null + }, + { + "scalar_type": "String", + "nullable": true + } + ] } - } - } - ], - "consolidate_output": false + ], + "predicates": [], + "projection": [ + 0, + 1, + 2, + 3, + 4, + 5 + ], + "input_arity": 0 + }, + "input_key_val": null + } } } - } + ], + "consolidate_output": false } } } diff --git a/test/sqllogictest/explain/physical_plan_as_text.slt b/test/sqllogictest/explain/physical_plan_as_text.slt index d4b8788f1eba2..64597944a07e6 100644 --- a/test/sqllogictest/explain/physical_plan_as_text.slt +++ b/test/sqllogictest/explain/physical_plan_as_text.slt @@ -696,35 +696,19 @@ FROM t GROUP BY a ---- Explained Query: - Join::Linear - linear_stage[0] - lookup={ relation=1, key=[#0] } - stream={ key=[#0], thinning=(#1) } - source={ relation=0, key=[#0] } - Reduce::Basic - aggr=(0, string_agg[order_by=[]](row(row((integer_to_text(#1{b}) || "1"), ",")))) - val_plan - project=(#2) - map=(row(row((integer_to_text(#1{b}) || "1"), ","))) - key_plan - project=(#0) - input_key=#0{a} - Get::PassArrangements materialize.public.t - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] - Reduce::Basic - aggr=(0, string_agg[order_by=[]](row(row((integer_to_text(#1{b}) || "2"), ",")))) - val_plan - project=(#2) - map=(row(row((integer_to_text(#1{b}) || "2"), ","))) - key_plan - project=(#0) - input_key=#0{a} - Get::PassArrangements materialize.public.t - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] + Reduce::Basic + aggrs[0]=(0, string_agg[order_by=[]](row(row((integer_to_text(#1{b}) || "1"), ",")))) + aggrs[1]=(1, string_agg[order_by=[]](row(row((integer_to_text(#1{b}) || "2"), ",")))) + val_plan + project=(#3, #4) + map=(integer_to_text(#1{b}), row(row((#2 || "1"), ",")), row(row((#2 || "2"), ","))) + key_plan + project=(#0) + input_key=#0{a} + Get::PassArrangements materialize.public.t + raw=false + arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } + types=[integer?, integer?] Used Indexes: - materialize.public.t_a_idx (*** full scan ***) @@ -744,48 +728,38 @@ FROM t Explained Query: With cte l0 = - Get::Arrangement materialize.public.t - project=(#1) - key=#0{a} - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] - cte l1 = - Join::Linear - linear_stage[0] - lookup={ relation=1, key=[] } - stream={ key=[], thinning=(#0) } - source={ relation=0, key=[] } - Reduce::Basic - aggr=(0, string_agg[order_by=[]](row(row((integer_to_text(#0{b}) || "1"), ",")))) - val_plan - project=(#1) - map=(row(row((integer_to_text(#0{b}) || "1"), ","))) - key_plan - project=() - Get::PassArrangements l0 - raw=true - Reduce::Basic - aggr=(0, string_agg[order_by=[]](row(row((integer_to_text(#0{b}) || "2"), ",")))) - val_plan - project=(#1) - map=(row(row((integer_to_text(#0{b}) || "2"), ","))) - key_plan - project=() - Get::PassArrangements l0 - raw=true + Reduce::Basic + aggrs[0]=(0, string_agg[order_by=[]](row(row((integer_to_text(#0{b}) || "1"), ",")))) + aggrs[1]=(1, string_agg[order_by=[]](row(row((integer_to_text(#0{b}) || "2"), ",")))) + val_plan + project=(#2, #3) + map=(integer_to_text(#0{b}), row(row((#1 || "1"), ",")), row(row((#1 || "2"), ","))) + key_plan + project=() + Get::Arrangement materialize.public.t + project=(#1) + key=#0{a} + raw=false + arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } + types=[integer?, integer?] Return Union - Get::PassArrangements l1 + ArrangeBy + input_key=[] raw=true + Get::PassArrangements l0 + raw=false + arrangements[0]={ key=[], permutation=id, thinning=(#0, #1) } Mfp project=(#0, #1) map=(null, null) Union consolidate_output=true Negate - Get::Collection l1 + Get::Arrangement l0 project=() - raw=true + key= + raw=false + arrangements[0]={ key=[], permutation=id, thinning=(#0, #1) } Constant - () @@ -802,120 +776,28 @@ EXPLAIN PHYSICAL PLAN AS VERBOSE TEXT FOR MATERIALIZED VIEW collated_group_by_mv ---- materialize.public.collated_group_by_mv: - Join::Delta - plan_path[0] - delta_stage[2] - closure - project=(#0, #3, #5, #1, #2, #4, #6) - lookup={ relation=3, key=[#0] } - stream={ key=[#0], thinning=(#1..=#5) } - delta_stage[1] - lookup={ relation=2, key=[#0] } - stream={ key=[#0], thinning=(#1..=#4) } - delta_stage[0] - lookup={ relation=1, key=[#0] } - stream={ key=[#0], thinning=(#1, #2) } - source={ relation=0, key=[#0] } - plan_path[1] - delta_stage[2] - closure - project=(#1, #4, #6, #2, #3, #5, #7) - lookup={ relation=3, key=[#0] } - stream={ key=[#3], thinning=(#0..=#2, #4..=#6) } - delta_stage[1] - closure - project=(#1..=#3, #0, #4..=#6) - lookup={ relation=2, key=[#0] } - stream={ key=[#3], thinning=(#0..=#2, #4, #5) } - delta_stage[0] - closure - project=(#0, #3, #4, #0..=#2) - lookup={ relation=0, key=[#0] } - stream={ key=[#0], thinning=(#1, #2) } - source={ relation=1, key=[#0] } - plan_path[2] - delta_stage[2] - closure - project=(#1, #4, #6, #2, #3, #5, #7) - lookup={ relation=3, key=[#0] } - stream={ key=[#5], thinning=(#0..=#4, #6) } - delta_stage[1] - closure - project=(#1..=#3, #5, #6, #0, #4) - lookup={ relation=1, key=[#0] } - stream={ key=[#3], thinning=(#0..=#2, #4) } - delta_stage[0] - closure - project=(#0, #2, #3, #0, #1) - lookup={ relation=0, key=[#0] } - stream={ key=[#0], thinning=(#1) } - source={ relation=2, key=[#0] } - plan_path[3] - delta_stage[2] - closure - project=(#1, #4, #7, #2, #3, #5, #6) - lookup={ relation=2, key=[#0] } - stream={ key=[#5], thinning=(#0..=#4, #6) } - delta_stage[1] - closure - project=(#1..=#3, #5, #6, #0, #4) - lookup={ relation=1, key=[#0] } - stream={ key=[#3], thinning=(#0..=#2, #4) } - delta_stage[0] - closure - project=(#0, #2, #3, #0, #1) - lookup={ relation=0, key=[#0] } - stream={ key=[#0], thinning=(#1) } - source={ relation=3, key=[#0] } - Reduce::Hierarchical + Reduce::Collation + aggregate_types=[a, b, h, h, a, b] + accumulable + simple_aggrs[0]=(1, 4, sum(#1{b})) + distinct_aggrs[0]=(0, 0, count(distinct #1{b})) + hierarchical aggr_funcs=[min, max] - skips=[0, 0] + skips=[2, 0] buckets=[268435456, 16777216, 1048576, 65536, 4096, 256, 16] - val_plan - project=(#1, #1) - key_plan - project=(#0) - input_key=#0{a} - Get::PassArrangements materialize.public.t - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] - Reduce::Accumulable - simple_aggrs[0]=(1, 1, sum(#1{b})) - distinct_aggrs[0]=(0, 0, count(distinct #1{b})) - val_plan - project=(#1, #1) - key_plan - project=(#0) - input_key=#0{a} - Get::PassArrangements materialize.public.t - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] - Reduce::Basic - aggr=(0, string_agg[order_by=[]](row(row((integer_to_text(#1{b}) || "1"), ",")))) - val_plan - project=(#2) - map=(row(row((integer_to_text(#1{b}) || "1"), ","))) - key_plan - project=(#0) - input_key=#0{a} - Get::PassArrangements materialize.public.t - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] - Reduce::Basic - aggr=(0, string_agg[order_by=[]](row(row((integer_to_text(#1{b}) || "2"), ",")))) - val_plan - project=(#2) - map=(row(row((integer_to_text(#1{b}) || "2"), ","))) - key_plan - project=(#0) - input_key=#0{a} - Get::PassArrangements materialize.public.t - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] + basic + aggrs[0]=(1, string_agg[order_by=[]](row(row((integer_to_text(#1{b}) || "1"), ",")))) + aggrs[1]=(5, string_agg[order_by=[]](row(row((integer_to_text(#1{b}) || "2"), ",")))) + val_plan + project=(#1, #3, #1, #1, #1, #4) + map=(integer_to_text(#1{b}), row(row((#2 || "1"), ",")), row(row((#2 || "2"), ","))) + key_plan + project=(#0) + input_key=#0{a} + Get::PassArrangements materialize.public.t + raw=false + arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } + types=[integer?, integer?] Used Indexes: - materialize.public.t_a_idx (*** full scan ***) @@ -930,121 +812,29 @@ EXPLAIN PHYSICAL PLAN AS VERBOSE TEXT FOR SELECT * FROM collated_group_by ---- Explained Query: - Join::Delta - plan_path[0] - delta_stage[2] - closure - project=(#0, #3, #5, #1, #2, #4, #6) - lookup={ relation=3, key=[#0] } - stream={ key=[#0], thinning=(#1..=#5) } - delta_stage[1] - lookup={ relation=2, key=[#0] } - stream={ key=[#0], thinning=(#1..=#4) } - delta_stage[0] - lookup={ relation=1, key=[#0] } - stream={ key=[#0], thinning=(#1, #2) } - source={ relation=0, key=[#0] } - plan_path[1] - delta_stage[2] - closure - project=(#1, #4, #6, #2, #3, #5, #7) - lookup={ relation=3, key=[#0] } - stream={ key=[#3], thinning=(#0..=#2, #4..=#6) } - delta_stage[1] - closure - project=(#1..=#3, #0, #4..=#6) - lookup={ relation=2, key=[#0] } - stream={ key=[#3], thinning=(#0..=#2, #4, #5) } - delta_stage[0] - closure - project=(#0, #3, #4, #0..=#2) - lookup={ relation=0, key=[#0] } - stream={ key=[#0], thinning=(#1, #2) } - source={ relation=1, key=[#0] } - plan_path[2] - delta_stage[2] - closure - project=(#1, #4, #6, #2, #3, #5, #7) - lookup={ relation=3, key=[#0] } - stream={ key=[#5], thinning=(#0..=#4, #6) } - delta_stage[1] - closure - project=(#1..=#3, #5, #6, #0, #4) - lookup={ relation=1, key=[#0] } - stream={ key=[#3], thinning=(#0..=#2, #4) } - delta_stage[0] - closure - project=(#0, #2, #3, #0, #1) - lookup={ relation=0, key=[#0] } - stream={ key=[#0], thinning=(#1) } - source={ relation=2, key=[#0] } - plan_path[3] - delta_stage[2] - closure - project=(#1, #4, #7, #2, #3, #5, #6) - lookup={ relation=2, key=[#0] } - stream={ key=[#5], thinning=(#0..=#4, #6) } - delta_stage[1] - closure - project=(#1..=#3, #5, #6, #0, #4) - lookup={ relation=1, key=[#0] } - stream={ key=[#3], thinning=(#0..=#2, #4) } - delta_stage[0] - closure - project=(#0, #2, #3, #0, #1) - lookup={ relation=0, key=[#0] } - stream={ key=[#0], thinning=(#1) } - source={ relation=3, key=[#0] } - Reduce::Hierarchical + Reduce::Collation + aggregate_types=[a, b, h, h, a, b] + accumulable + simple_aggrs[0]=(1, 4, sum(#1{b})) + distinct_aggrs[0]=(0, 0, count(distinct #1{b})) + hierarchical aggr_funcs=[min, max] - skips=[0, 0] + skips=[2, 0] monotonic must_consolidate - val_plan - project=(#1, #1) - key_plan - project=(#0) - input_key=#0{a} - Get::PassArrangements materialize.public.t - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] - Reduce::Accumulable - simple_aggrs[0]=(1, 1, sum(#1{b})) - distinct_aggrs[0]=(0, 0, count(distinct #1{b})) - val_plan - project=(#1, #1) - key_plan - project=(#0) - input_key=#0{a} - Get::PassArrangements materialize.public.t - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] - Reduce::Basic - aggr=(0, string_agg[order_by=[]](row(row((integer_to_text(#1{b}) || "1"), ",")))) - val_plan - project=(#2) - map=(row(row((integer_to_text(#1{b}) || "1"), ","))) - key_plan - project=(#0) - input_key=#0{a} - Get::PassArrangements materialize.public.t - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] - Reduce::Basic - aggr=(0, string_agg[order_by=[]](row(row((integer_to_text(#1{b}) || "2"), ",")))) - val_plan - project=(#2) - map=(row(row((integer_to_text(#1{b}) || "2"), ","))) - key_plan - project=(#0) - input_key=#0{a} - Get::PassArrangements materialize.public.t - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] + basic + aggrs[0]=(1, string_agg[order_by=[]](row(row((integer_to_text(#1{b}) || "1"), ",")))) + aggrs[1]=(5, string_agg[order_by=[]](row(row((integer_to_text(#1{b}) || "2"), ",")))) + val_plan + project=(#1, #3, #1, #1, #1, #4) + map=(integer_to_text(#1{b}), row(row((#2 || "1"), ",")), row(row((#2 || "2"), ","))) + key_plan + project=(#0) + input_key=#0{a} + Get::PassArrangements materialize.public.t + raw=false + arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } + types=[integer?, integer?] Used Indexes: - materialize.public.t_a_idx (*** full scan ***) @@ -1061,120 +851,47 @@ MATERIALIZED VIEW collated_global_mv materialize.public.collated_global_mv: With cte l0 = - Get::Arrangement materialize.public.t - project=(#1) - key=#0{a} - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] - cte l1 = - Join::Delta - plan_path[0] - delta_stage[2] - lookup={ relation=3, key=[] } - stream={ key=[], thinning=(#0..=#4) } - delta_stage[1] - lookup={ relation=2, key=[] } - stream={ key=[], thinning=(#0..=#3) } - delta_stage[0] - lookup={ relation=1, key=[] } - stream={ key=[], thinning=(#0, #1) } - source={ relation=0, key=[] } - plan_path[1] - delta_stage[2] - lookup={ relation=3, key=[] } - stream={ key=[], thinning=(#0..=#4) } - delta_stage[1] - lookup={ relation=2, key=[] } - stream={ key=[], thinning=(#0..=#3) } - delta_stage[0] - closure - project=(#2, #3, #0, #1) - lookup={ relation=0, key=[] } - stream={ key=[], thinning=(#0, #1) } - source={ relation=1, key=[] } - plan_path[2] - delta_stage[2] - lookup={ relation=3, key=[] } - stream={ key=[], thinning=(#0..=#4) } - delta_stage[1] - closure - project=(#0, #1, #3, #4, #2) - lookup={ relation=1, key=[] } - stream={ key=[], thinning=(#0..=#2) } - delta_stage[0] - closure - project=(#1, #2, #0) - lookup={ relation=0, key=[] } - stream={ key=[], thinning=(#0) } - source={ relation=2, key=[] } - plan_path[3] - delta_stage[2] - closure - project=(#0..=#3, #5, #4) - lookup={ relation=2, key=[] } - stream={ key=[], thinning=(#0..=#4) } - delta_stage[1] - closure - project=(#0, #1, #3, #4, #2) - lookup={ relation=1, key=[] } - stream={ key=[], thinning=(#0..=#2) } - delta_stage[0] - closure - project=(#1, #2, #0) - lookup={ relation=0, key=[] } - stream={ key=[], thinning=(#0) } - source={ relation=3, key=[] } - Reduce::Hierarchical + Reduce::Collation + aggregate_types=[a, b, h, h, a, b] + accumulable + simple_aggrs[0]=(1, 4, sum(#0{b})) + distinct_aggrs[0]=(0, 0, count(distinct #0{b})) + hierarchical aggr_funcs=[min, max] - skips=[0, 0] + skips=[2, 0] buckets=[268435456, 16777216, 1048576, 65536, 4096, 256, 16] - val_plan - project=(#0, #0) - key_plan - project=() - Get::PassArrangements l0 - raw=true - Reduce::Accumulable - simple_aggrs[0]=(1, 1, sum(#0{b})) - distinct_aggrs[0]=(0, 0, count(distinct #0{b})) - val_plan - project=(#0, #0) - key_plan - project=() - Get::PassArrangements l0 - raw=true - Reduce::Basic - aggr=(0, string_agg[order_by=[]](row(row((integer_to_text(#0{b}) || "1"), ",")))) - val_plan - project=(#1) - map=(row(row((integer_to_text(#0{b}) || "1"), ","))) - key_plan - project=() - Get::PassArrangements l0 - raw=true - Reduce::Basic - aggr=(0, string_agg[order_by=[]](row(row((integer_to_text(#0{b}) || "2"), ",")))) - val_plan - project=(#1) - map=(row(row((integer_to_text(#0{b}) || "2"), ","))) - key_plan - project=() - Get::PassArrangements l0 - raw=true + basic + aggrs[0]=(1, string_agg[order_by=[]](row(row((integer_to_text(#0{b}) || "1"), ",")))) + aggrs[1]=(5, string_agg[order_by=[]](row(row((integer_to_text(#0{b}) || "2"), ",")))) + val_plan + project=(#0, #2, #0, #0, #0, #3) + map=(integer_to_text(#0{b}), row(row((#1 || "1"), ",")), row(row((#1 || "2"), ","))) + key_plan + project=() + Get::Arrangement materialize.public.t + project=(#1) + key=#0{a} + raw=false + arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } + types=[integer?, integer?] Return Union - Get::Collection l1 - project=(#2, #4, #0, #1, #3, #5) + ArrangeBy + input_key=[] raw=true + Get::PassArrangements l0 + raw=false + arrangements[0]={ key=[], permutation=id, thinning=(#0..=#5) } Mfp project=(#0..=#5) map=(0, null, null, null, null, null) Union consolidate_output=true Negate - Get::Collection l1 + Get::Arrangement l0 project=() - raw=true + key= + raw=false + arrangements[0]={ key=[], permutation=id, thinning=(#0..=#5) } Constant - () @@ -1193,121 +910,48 @@ SELECT * FROM collated_global Explained Query: With cte l0 = - Get::Arrangement materialize.public.t - project=(#1) - key=#0{a} - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] - cte l1 = - Join::Delta - plan_path[0] - delta_stage[2] - lookup={ relation=3, key=[] } - stream={ key=[], thinning=(#0..=#4) } - delta_stage[1] - lookup={ relation=2, key=[] } - stream={ key=[], thinning=(#0..=#3) } - delta_stage[0] - lookup={ relation=1, key=[] } - stream={ key=[], thinning=(#0, #1) } - source={ relation=0, key=[] } - plan_path[1] - delta_stage[2] - lookup={ relation=3, key=[] } - stream={ key=[], thinning=(#0..=#4) } - delta_stage[1] - lookup={ relation=2, key=[] } - stream={ key=[], thinning=(#0..=#3) } - delta_stage[0] - closure - project=(#2, #3, #0, #1) - lookup={ relation=0, key=[] } - stream={ key=[], thinning=(#0, #1) } - source={ relation=1, key=[] } - plan_path[2] - delta_stage[2] - lookup={ relation=3, key=[] } - stream={ key=[], thinning=(#0..=#4) } - delta_stage[1] - closure - project=(#0, #1, #3, #4, #2) - lookup={ relation=1, key=[] } - stream={ key=[], thinning=(#0..=#2) } - delta_stage[0] - closure - project=(#1, #2, #0) - lookup={ relation=0, key=[] } - stream={ key=[], thinning=(#0) } - source={ relation=2, key=[] } - plan_path[3] - delta_stage[2] - closure - project=(#0..=#3, #5, #4) - lookup={ relation=2, key=[] } - stream={ key=[], thinning=(#0..=#4) } - delta_stage[1] - closure - project=(#0, #1, #3, #4, #2) - lookup={ relation=1, key=[] } - stream={ key=[], thinning=(#0..=#2) } - delta_stage[0] - closure - project=(#1, #2, #0) - lookup={ relation=0, key=[] } - stream={ key=[], thinning=(#0) } - source={ relation=3, key=[] } - Reduce::Hierarchical + Reduce::Collation + aggregate_types=[a, b, h, h, a, b] + accumulable + simple_aggrs[0]=(1, 4, sum(#0{b})) + distinct_aggrs[0]=(0, 0, count(distinct #0{b})) + hierarchical aggr_funcs=[min, max] - skips=[0, 0] + skips=[2, 0] monotonic must_consolidate - val_plan - project=(#0, #0) - key_plan - project=() - Get::PassArrangements l0 - raw=true - Reduce::Accumulable - simple_aggrs[0]=(1, 1, sum(#0{b})) - distinct_aggrs[0]=(0, 0, count(distinct #0{b})) - val_plan - project=(#0, #0) - key_plan - project=() - Get::PassArrangements l0 - raw=true - Reduce::Basic - aggr=(0, string_agg[order_by=[]](row(row((integer_to_text(#0{b}) || "1"), ",")))) - val_plan - project=(#1) - map=(row(row((integer_to_text(#0{b}) || "1"), ","))) - key_plan - project=() - Get::PassArrangements l0 - raw=true - Reduce::Basic - aggr=(0, string_agg[order_by=[]](row(row((integer_to_text(#0{b}) || "2"), ",")))) - val_plan - project=(#1) - map=(row(row((integer_to_text(#0{b}) || "2"), ","))) - key_plan - project=() - Get::PassArrangements l0 - raw=true + basic + aggrs[0]=(1, string_agg[order_by=[]](row(row((integer_to_text(#0{b}) || "1"), ",")))) + aggrs[1]=(5, string_agg[order_by=[]](row(row((integer_to_text(#0{b}) || "2"), ",")))) + val_plan + project=(#0, #2, #0, #0, #0, #3) + map=(integer_to_text(#0{b}), row(row((#1 || "1"), ",")), row(row((#1 || "2"), ","))) + key_plan + project=() + Get::Arrangement materialize.public.t + project=(#1) + key=#0{a} + raw=false + arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } + types=[integer?, integer?] Return Union - Get::Collection l1 - project=(#2, #4, #0, #1, #3, #5) + ArrangeBy + input_key=[] raw=true + Get::PassArrangements l0 + raw=false + arrangements[0]={ key=[], permutation=id, thinning=(#0..=#5) } Mfp project=(#0..=#5) map=(0, null, null, null, null, null) Union consolidate_output=true Negate - Get::Collection l1 + Get::Arrangement l0 project=() - raw=true + key= + raw=false + arrangements[0]={ key=[], permutation=id, thinning=(#0..=#5) } Constant - () diff --git a/test/sqllogictest/explain/physical_plan_as_text_redacted.slt b/test/sqllogictest/explain/physical_plan_as_text_redacted.slt index f3b56ebcf5e90..87095a5ab7d1b 100644 --- a/test/sqllogictest/explain/physical_plan_as_text_redacted.slt +++ b/test/sqllogictest/explain/physical_plan_as_text_redacted.slt @@ -681,35 +681,19 @@ FROM t GROUP BY a ---- Explained Query: - Join::Linear - linear_stage[0] - lookup={ relation=1, key=[#0] } - stream={ key=[#0], thinning=(#1) } - source={ relation=0, key=[#0] } - Reduce::Basic - aggr=(0, string_agg[order_by=[]](row(row((integer_to_text(#1{b}) || █), █)))) - val_plan - project=(#2) - map=(row(row((integer_to_text(#1{b}) || █), █))) - key_plan - project=(#0) - input_key=#0{a} - Get::PassArrangements materialize.public.t - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] - Reduce::Basic - aggr=(0, string_agg[order_by=[]](row(row((integer_to_text(#1{b}) || █), █)))) - val_plan - project=(#2) - map=(row(row((integer_to_text(#1{b}) || █), █))) - key_plan - project=(#0) - input_key=#0{a} - Get::PassArrangements materialize.public.t - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] + Reduce::Basic + aggrs[0]=(0, string_agg[order_by=[]](row(row((integer_to_text(#1{b}) || █), █)))) + aggrs[1]=(1, string_agg[order_by=[]](row(row((integer_to_text(#1{b}) || █), █)))) + val_plan + project=(#3, #4) + map=(integer_to_text(#1{b}), row(row((#2 || █), █)), row(row((#2 || █), █))) + key_plan + project=(#0) + input_key=#0{a} + Get::PassArrangements materialize.public.t + raw=false + arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } + types=[integer?, integer?] Used Indexes: - materialize.public.t_a_idx (*** full scan ***) @@ -729,48 +713,38 @@ FROM t Explained Query: With cte l0 = - Get::Arrangement materialize.public.t - project=(#1) - key=#0{a} - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] - cte l1 = - Join::Linear - linear_stage[0] - lookup={ relation=1, key=[] } - stream={ key=[], thinning=(#0) } - source={ relation=0, key=[] } - Reduce::Basic - aggr=(0, string_agg[order_by=[]](row(row((integer_to_text(#0{b}) || █), █)))) - val_plan - project=(#1) - map=(row(row((integer_to_text(#0{b}) || █), █))) - key_plan - project=() - Get::PassArrangements l0 - raw=true - Reduce::Basic - aggr=(0, string_agg[order_by=[]](row(row((integer_to_text(#0{b}) || █), █)))) - val_plan - project=(#1) - map=(row(row((integer_to_text(#0{b}) || █), █))) - key_plan - project=() - Get::PassArrangements l0 - raw=true + Reduce::Basic + aggrs[0]=(0, string_agg[order_by=[]](row(row((integer_to_text(#0{b}) || █), █)))) + aggrs[1]=(1, string_agg[order_by=[]](row(row((integer_to_text(#0{b}) || █), █)))) + val_plan + project=(#2, #3) + map=(integer_to_text(#0{b}), row(row((#1 || █), █)), row(row((#1 || █), █))) + key_plan + project=() + Get::Arrangement materialize.public.t + project=(#1) + key=#0{a} + raw=false + arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } + types=[integer?, integer?] Return Union - Get::PassArrangements l1 + ArrangeBy + input_key=[] raw=true + Get::PassArrangements l0 + raw=false + arrangements[0]={ key=[], permutation=id, thinning=(#0, #1) } Mfp project=(#0, #1) map=(█, █) Union consolidate_output=true Negate - Get::Collection l1 + Get::Arrangement l0 project=() - raw=true + key= + raw=false + arrangements[0]={ key=[], permutation=id, thinning=(#0, #1) } Constant - () @@ -787,120 +761,28 @@ EXPLAIN PHYSICAL PLAN WITH(redacted) AS VERBOSE TEXT FOR MATERIALIZED VIEW collated_group_by_mv ---- materialize.public.collated_group_by_mv: - Join::Delta - plan_path[0] - delta_stage[2] - closure - project=(#0, #3, #5, #1, #2, #4, #6) - lookup={ relation=3, key=[#0] } - stream={ key=[#0], thinning=(#1..=#5) } - delta_stage[1] - lookup={ relation=2, key=[#0] } - stream={ key=[#0], thinning=(#1..=#4) } - delta_stage[0] - lookup={ relation=1, key=[#0] } - stream={ key=[#0], thinning=(#1, #2) } - source={ relation=0, key=[#0] } - plan_path[1] - delta_stage[2] - closure - project=(#1, #4, #6, #2, #3, #5, #7) - lookup={ relation=3, key=[#0] } - stream={ key=[#3], thinning=(#0..=#2, #4..=#6) } - delta_stage[1] - closure - project=(#1..=#3, #0, #4..=#6) - lookup={ relation=2, key=[#0] } - stream={ key=[#3], thinning=(#0..=#2, #4, #5) } - delta_stage[0] - closure - project=(#0, #3, #4, #0..=#2) - lookup={ relation=0, key=[#0] } - stream={ key=[#0], thinning=(#1, #2) } - source={ relation=1, key=[#0] } - plan_path[2] - delta_stage[2] - closure - project=(#1, #4, #6, #2, #3, #5, #7) - lookup={ relation=3, key=[#0] } - stream={ key=[#5], thinning=(#0..=#4, #6) } - delta_stage[1] - closure - project=(#1..=#3, #5, #6, #0, #4) - lookup={ relation=1, key=[#0] } - stream={ key=[#3], thinning=(#0..=#2, #4) } - delta_stage[0] - closure - project=(#0, #2, #3, #0, #1) - lookup={ relation=0, key=[#0] } - stream={ key=[#0], thinning=(#1) } - source={ relation=2, key=[#0] } - plan_path[3] - delta_stage[2] - closure - project=(#1, #4, #7, #2, #3, #5, #6) - lookup={ relation=2, key=[#0] } - stream={ key=[#5], thinning=(#0..=#4, #6) } - delta_stage[1] - closure - project=(#1..=#3, #5, #6, #0, #4) - lookup={ relation=1, key=[#0] } - stream={ key=[#3], thinning=(#0..=#2, #4) } - delta_stage[0] - closure - project=(#0, #2, #3, #0, #1) - lookup={ relation=0, key=[#0] } - stream={ key=[#0], thinning=(#1) } - source={ relation=3, key=[#0] } - Reduce::Hierarchical + Reduce::Collation + aggregate_types=[a, b, h, h, a, b] + accumulable + simple_aggrs[0]=(1, 4, sum(#1{b})) + distinct_aggrs[0]=(0, 0, count(distinct #1{b})) + hierarchical aggr_funcs=[min, max] - skips=[0, 0] + skips=[2, 0] buckets=[268435456, 16777216, 1048576, 65536, 4096, 256, 16] - val_plan - project=(#1, #1) - key_plan - project=(#0) - input_key=#0{a} - Get::PassArrangements materialize.public.t - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] - Reduce::Accumulable - simple_aggrs[0]=(1, 1, sum(#1{b})) - distinct_aggrs[0]=(0, 0, count(distinct #1{b})) - val_plan - project=(#1, #1) - key_plan - project=(#0) - input_key=#0{a} - Get::PassArrangements materialize.public.t - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] - Reduce::Basic - aggr=(0, string_agg[order_by=[]](row(row((integer_to_text(#1{b}) || █), █)))) - val_plan - project=(#2) - map=(row(row((integer_to_text(#1{b}) || █), █))) - key_plan - project=(#0) - input_key=#0{a} - Get::PassArrangements materialize.public.t - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] - Reduce::Basic - aggr=(0, string_agg[order_by=[]](row(row((integer_to_text(#1{b}) || █), █)))) - val_plan - project=(#2) - map=(row(row((integer_to_text(#1{b}) || █), █))) - key_plan - project=(#0) - input_key=#0{a} - Get::PassArrangements materialize.public.t - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] + basic + aggrs[0]=(1, string_agg[order_by=[]](row(row((integer_to_text(#1{b}) || █), █)))) + aggrs[1]=(5, string_agg[order_by=[]](row(row((integer_to_text(#1{b}) || █), █)))) + val_plan + project=(#1, #3, #1, #1, #1, #4) + map=(integer_to_text(#1{b}), row(row((#2 || █), █)), row(row((#2 || █), █))) + key_plan + project=(#0) + input_key=#0{a} + Get::PassArrangements materialize.public.t + raw=false + arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } + types=[integer?, integer?] Used Indexes: - materialize.public.t_a_idx (*** full scan ***) @@ -915,121 +797,29 @@ EXPLAIN PHYSICAL PLAN WITH(redacted) AS VERBOSE TEXT FOR SELECT * FROM collated_group_by ---- Explained Query: - Join::Delta - plan_path[0] - delta_stage[2] - closure - project=(#0, #3, #5, #1, #2, #4, #6) - lookup={ relation=3, key=[#0] } - stream={ key=[#0], thinning=(#1..=#5) } - delta_stage[1] - lookup={ relation=2, key=[#0] } - stream={ key=[#0], thinning=(#1..=#4) } - delta_stage[0] - lookup={ relation=1, key=[#0] } - stream={ key=[#0], thinning=(#1, #2) } - source={ relation=0, key=[#0] } - plan_path[1] - delta_stage[2] - closure - project=(#1, #4, #6, #2, #3, #5, #7) - lookup={ relation=3, key=[#0] } - stream={ key=[#3], thinning=(#0..=#2, #4..=#6) } - delta_stage[1] - closure - project=(#1..=#3, #0, #4..=#6) - lookup={ relation=2, key=[#0] } - stream={ key=[#3], thinning=(#0..=#2, #4, #5) } - delta_stage[0] - closure - project=(#0, #3, #4, #0..=#2) - lookup={ relation=0, key=[#0] } - stream={ key=[#0], thinning=(#1, #2) } - source={ relation=1, key=[#0] } - plan_path[2] - delta_stage[2] - closure - project=(#1, #4, #6, #2, #3, #5, #7) - lookup={ relation=3, key=[#0] } - stream={ key=[#5], thinning=(#0..=#4, #6) } - delta_stage[1] - closure - project=(#1..=#3, #5, #6, #0, #4) - lookup={ relation=1, key=[#0] } - stream={ key=[#3], thinning=(#0..=#2, #4) } - delta_stage[0] - closure - project=(#0, #2, #3, #0, #1) - lookup={ relation=0, key=[#0] } - stream={ key=[#0], thinning=(#1) } - source={ relation=2, key=[#0] } - plan_path[3] - delta_stage[2] - closure - project=(#1, #4, #7, #2, #3, #5, #6) - lookup={ relation=2, key=[#0] } - stream={ key=[#5], thinning=(#0..=#4, #6) } - delta_stage[1] - closure - project=(#1..=#3, #5, #6, #0, #4) - lookup={ relation=1, key=[#0] } - stream={ key=[#3], thinning=(#0..=#2, #4) } - delta_stage[0] - closure - project=(#0, #2, #3, #0, #1) - lookup={ relation=0, key=[#0] } - stream={ key=[#0], thinning=(#1) } - source={ relation=3, key=[#0] } - Reduce::Hierarchical + Reduce::Collation + aggregate_types=[a, b, h, h, a, b] + accumulable + simple_aggrs[0]=(1, 4, sum(#1{b})) + distinct_aggrs[0]=(0, 0, count(distinct #1{b})) + hierarchical aggr_funcs=[min, max] - skips=[0, 0] + skips=[2, 0] monotonic must_consolidate - val_plan - project=(#1, #1) - key_plan - project=(#0) - input_key=#0{a} - Get::PassArrangements materialize.public.t - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] - Reduce::Accumulable - simple_aggrs[0]=(1, 1, sum(#1{b})) - distinct_aggrs[0]=(0, 0, count(distinct #1{b})) - val_plan - project=(#1, #1) - key_plan - project=(#0) - input_key=#0{a} - Get::PassArrangements materialize.public.t - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] - Reduce::Basic - aggr=(0, string_agg[order_by=[]](row(row((integer_to_text(#1{b}) || █), █)))) - val_plan - project=(#2) - map=(row(row((integer_to_text(#1{b}) || █), █))) - key_plan - project=(#0) - input_key=#0{a} - Get::PassArrangements materialize.public.t - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] - Reduce::Basic - aggr=(0, string_agg[order_by=[]](row(row((integer_to_text(#1{b}) || █), █)))) - val_plan - project=(#2) - map=(row(row((integer_to_text(#1{b}) || █), █))) - key_plan - project=(#0) - input_key=#0{a} - Get::PassArrangements materialize.public.t - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] + basic + aggrs[0]=(1, string_agg[order_by=[]](row(row((integer_to_text(#1{b}) || █), █)))) + aggrs[1]=(5, string_agg[order_by=[]](row(row((integer_to_text(#1{b}) || █), █)))) + val_plan + project=(#1, #3, #1, #1, #1, #4) + map=(integer_to_text(#1{b}), row(row((#2 || █), █)), row(row((#2 || █), █))) + key_plan + project=(#0) + input_key=#0{a} + Get::PassArrangements materialize.public.t + raw=false + arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } + types=[integer?, integer?] Used Indexes: - materialize.public.t_a_idx (*** full scan ***) @@ -1046,120 +836,47 @@ MATERIALIZED VIEW collated_global_mv materialize.public.collated_global_mv: With cte l0 = - Get::Arrangement materialize.public.t - project=(#1) - key=#0{a} - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] - cte l1 = - Join::Delta - plan_path[0] - delta_stage[2] - lookup={ relation=3, key=[] } - stream={ key=[], thinning=(#0..=#4) } - delta_stage[1] - lookup={ relation=2, key=[] } - stream={ key=[], thinning=(#0..=#3) } - delta_stage[0] - lookup={ relation=1, key=[] } - stream={ key=[], thinning=(#0, #1) } - source={ relation=0, key=[] } - plan_path[1] - delta_stage[2] - lookup={ relation=3, key=[] } - stream={ key=[], thinning=(#0..=#4) } - delta_stage[1] - lookup={ relation=2, key=[] } - stream={ key=[], thinning=(#0..=#3) } - delta_stage[0] - closure - project=(#2, #3, #0, #1) - lookup={ relation=0, key=[] } - stream={ key=[], thinning=(#0, #1) } - source={ relation=1, key=[] } - plan_path[2] - delta_stage[2] - lookup={ relation=3, key=[] } - stream={ key=[], thinning=(#0..=#4) } - delta_stage[1] - closure - project=(#0, #1, #3, #4, #2) - lookup={ relation=1, key=[] } - stream={ key=[], thinning=(#0..=#2) } - delta_stage[0] - closure - project=(#1, #2, #0) - lookup={ relation=0, key=[] } - stream={ key=[], thinning=(#0) } - source={ relation=2, key=[] } - plan_path[3] - delta_stage[2] - closure - project=(#0..=#3, #5, #4) - lookup={ relation=2, key=[] } - stream={ key=[], thinning=(#0..=#4) } - delta_stage[1] - closure - project=(#0, #1, #3, #4, #2) - lookup={ relation=1, key=[] } - stream={ key=[], thinning=(#0..=#2) } - delta_stage[0] - closure - project=(#1, #2, #0) - lookup={ relation=0, key=[] } - stream={ key=[], thinning=(#0) } - source={ relation=3, key=[] } - Reduce::Hierarchical + Reduce::Collation + aggregate_types=[a, b, h, h, a, b] + accumulable + simple_aggrs[0]=(1, 4, sum(#0{b})) + distinct_aggrs[0]=(0, 0, count(distinct #0{b})) + hierarchical aggr_funcs=[min, max] - skips=[0, 0] + skips=[2, 0] buckets=[268435456, 16777216, 1048576, 65536, 4096, 256, 16] - val_plan - project=(#0, #0) - key_plan - project=() - Get::PassArrangements l0 - raw=true - Reduce::Accumulable - simple_aggrs[0]=(1, 1, sum(#0{b})) - distinct_aggrs[0]=(0, 0, count(distinct #0{b})) - val_plan - project=(#0, #0) - key_plan - project=() - Get::PassArrangements l0 - raw=true - Reduce::Basic - aggr=(0, string_agg[order_by=[]](row(row((integer_to_text(#0{b}) || █), █)))) - val_plan - project=(#1) - map=(row(row((integer_to_text(#0{b}) || █), █))) - key_plan - project=() - Get::PassArrangements l0 - raw=true - Reduce::Basic - aggr=(0, string_agg[order_by=[]](row(row((integer_to_text(#0{b}) || █), █)))) - val_plan - project=(#1) - map=(row(row((integer_to_text(#0{b}) || █), █))) - key_plan - project=() - Get::PassArrangements l0 - raw=true + basic + aggrs[0]=(1, string_agg[order_by=[]](row(row((integer_to_text(#0{b}) || █), █)))) + aggrs[1]=(5, string_agg[order_by=[]](row(row((integer_to_text(#0{b}) || █), █)))) + val_plan + project=(#0, #2, #0, #0, #0, #3) + map=(integer_to_text(#0{b}), row(row((#1 || █), █)), row(row((#1 || █), █))) + key_plan + project=() + Get::Arrangement materialize.public.t + project=(#1) + key=#0{a} + raw=false + arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } + types=[integer?, integer?] Return Union - Get::Collection l1 - project=(#2, #4, #0, #1, #3, #5) + ArrangeBy + input_key=[] raw=true + Get::PassArrangements l0 + raw=false + arrangements[0]={ key=[], permutation=id, thinning=(#0..=#5) } Mfp project=(#0..=#5) map=(█, █, █, █, █, █) Union consolidate_output=true Negate - Get::Collection l1 + Get::Arrangement l0 project=() - raw=true + key= + raw=false + arrangements[0]={ key=[], permutation=id, thinning=(#0..=#5) } Constant - () @@ -1178,121 +895,48 @@ SELECT * FROM collated_global Explained Query: With cte l0 = - Get::Arrangement materialize.public.t - project=(#1) - key=#0{a} - raw=false - arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } - types=[integer?, integer?] - cte l1 = - Join::Delta - plan_path[0] - delta_stage[2] - lookup={ relation=3, key=[] } - stream={ key=[], thinning=(#0..=#4) } - delta_stage[1] - lookup={ relation=2, key=[] } - stream={ key=[], thinning=(#0..=#3) } - delta_stage[0] - lookup={ relation=1, key=[] } - stream={ key=[], thinning=(#0, #1) } - source={ relation=0, key=[] } - plan_path[1] - delta_stage[2] - lookup={ relation=3, key=[] } - stream={ key=[], thinning=(#0..=#4) } - delta_stage[1] - lookup={ relation=2, key=[] } - stream={ key=[], thinning=(#0..=#3) } - delta_stage[0] - closure - project=(#2, #3, #0, #1) - lookup={ relation=0, key=[] } - stream={ key=[], thinning=(#0, #1) } - source={ relation=1, key=[] } - plan_path[2] - delta_stage[2] - lookup={ relation=3, key=[] } - stream={ key=[], thinning=(#0..=#4) } - delta_stage[1] - closure - project=(#0, #1, #3, #4, #2) - lookup={ relation=1, key=[] } - stream={ key=[], thinning=(#0..=#2) } - delta_stage[0] - closure - project=(#1, #2, #0) - lookup={ relation=0, key=[] } - stream={ key=[], thinning=(#0) } - source={ relation=2, key=[] } - plan_path[3] - delta_stage[2] - closure - project=(#0..=#3, #5, #4) - lookup={ relation=2, key=[] } - stream={ key=[], thinning=(#0..=#4) } - delta_stage[1] - closure - project=(#0, #1, #3, #4, #2) - lookup={ relation=1, key=[] } - stream={ key=[], thinning=(#0..=#2) } - delta_stage[0] - closure - project=(#1, #2, #0) - lookup={ relation=0, key=[] } - stream={ key=[], thinning=(#0) } - source={ relation=3, key=[] } - Reduce::Hierarchical + Reduce::Collation + aggregate_types=[a, b, h, h, a, b] + accumulable + simple_aggrs[0]=(1, 4, sum(#0{b})) + distinct_aggrs[0]=(0, 0, count(distinct #0{b})) + hierarchical aggr_funcs=[min, max] - skips=[0, 0] + skips=[2, 0] monotonic must_consolidate - val_plan - project=(#0, #0) - key_plan - project=() - Get::PassArrangements l0 - raw=true - Reduce::Accumulable - simple_aggrs[0]=(1, 1, sum(#0{b})) - distinct_aggrs[0]=(0, 0, count(distinct #0{b})) - val_plan - project=(#0, #0) - key_plan - project=() - Get::PassArrangements l0 - raw=true - Reduce::Basic - aggr=(0, string_agg[order_by=[]](row(row((integer_to_text(#0{b}) || █), █)))) - val_plan - project=(#1) - map=(row(row((integer_to_text(#0{b}) || █), █))) - key_plan - project=() - Get::PassArrangements l0 - raw=true - Reduce::Basic - aggr=(0, string_agg[order_by=[]](row(row((integer_to_text(#0{b}) || █), █)))) - val_plan - project=(#1) - map=(row(row((integer_to_text(#0{b}) || █), █))) - key_plan - project=() - Get::PassArrangements l0 - raw=true + basic + aggrs[0]=(1, string_agg[order_by=[]](row(row((integer_to_text(#0{b}) || █), █)))) + aggrs[1]=(5, string_agg[order_by=[]](row(row((integer_to_text(#0{b}) || █), █)))) + val_plan + project=(#0, #2, #0, #0, #0, #3) + map=(integer_to_text(#0{b}), row(row((#1 || █), █)), row(row((#1 || █), █))) + key_plan + project=() + Get::Arrangement materialize.public.t + project=(#1) + key=#0{a} + raw=false + arrangements[0]={ key=[#0{a}], permutation=id, thinning=(#1) } + types=[integer?, integer?] Return Union - Get::Collection l1 - project=(#2, #4, #0, #1, #3, #5) + ArrangeBy + input_key=[] raw=true + Get::PassArrangements l0 + raw=false + arrangements[0]={ key=[], permutation=id, thinning=(#0..=#5) } Mfp project=(#0..=#5) map=(█, █, █, █, █, █) Union consolidate_output=true Negate - Get::Collection l1 + Get::Arrangement l0 project=() - raw=true + key= + raw=false + arrangements[0]={ key=[], permutation=id, thinning=(#0..=#5) } Constant - () diff --git a/test/sqllogictest/extract.slt b/test/sqllogictest/extract.slt index c816eb5ddce2e..6a6479f710f76 100644 --- a/test/sqllogictest/extract.slt +++ b/test/sqllogictest/extract.slt @@ -27,6 +27,16 @@ asdf asdf NULL asdfjkl asdf NULL jkl NULL jkl +query TTTI colnames +SELECT data.*, reg.* +FROM data, regexp_extract('(asdf)|(?Pjkl)', data.input) reg WITH ORDINALITY +ORDER BY data.input +---- +input column1 foo ordinality +asdf asdf NULL 1 +asdfjkl asdf NULL 1 +jkl NULL jkl 1 + # TODO(brennan): test that the regex columns have the correct nullability, once # they actually do (database-issues#612). diff --git a/test/sqllogictest/func_aliases.slt b/test/sqllogictest/func_aliases.slt new file mode 100644 index 0000000000000..dea00c6584713 --- /dev/null +++ b/test/sqllogictest/func_aliases.slt @@ -0,0 +1,33 @@ +# Copyright Materialize, Inc. and contributors. All rights reserved. +# +# Use of this software is governed by the Business Source License +# included in the LICENSE file at the root of this repository. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0. + +query T +SELECT current_user = current_user() +---- +true + +query T +SELECT current_role = current_role() +---- +true + +query T +SELECT current_schema = current_schema() +---- +true + +query T +SELECT current_catalog = current_catalog() +---- +true + +query T +SELECT session_user = session_user() +---- +true diff --git a/test/sqllogictest/mz_catalog_server_index_accounting.slt b/test/sqllogictest/mz_catalog_server_index_accounting.slt index 397a6b8a6e551..ad6c943d8a371 100644 --- a/test/sqllogictest/mz_catalog_server_index_accounting.slt +++ b/test/sqllogictest/mz_catalog_server_index_accounting.slt @@ -851,6 +851,7 @@ pg_class_all_databases relispartition pg_class_all_databases relkind pg_class_all_databases relname pg_class_all_databases relnamespace +pg_class_all_databases relnatts pg_class_all_databases reloftype pg_class_all_databases reloptions pg_class_all_databases relowner diff --git a/test/sqllogictest/not-null-propagation.slt b/test/sqllogictest/not-null-propagation.slt index e8fc2d029a5e3..7c93298b61d60 100644 --- a/test/sqllogictest/not-null-propagation.slt +++ b/test/sqllogictest/not-null-propagation.slt @@ -319,29 +319,19 @@ EXPLAIN OPTIMIZED PLAN WITH(types, no fast path, humanized expressions) AS VERBO Explained Query: With cte l0 = - Project (#1{col_not_null}) // { types: "(integer)" } - ReadStorage materialize.public.int_table // { types: "(integer?, integer)" } - cte l1 = - CrossJoin type=delta // { types: "(integer, integer, bigint, bigint, numeric, numeric, bigint, integer list)" } - ArrangeBy keys=[[]] // { types: "(integer, integer)" } - Reduce aggregates=[min(#0{col_not_null}), max(#0{col_not_null})] // { types: "(integer, integer)" } - Get l0 // { types: "(integer)" } - ArrangeBy keys=[[]] // { types: "(bigint, bigint, numeric, numeric, bigint)" } - Reduce aggregates=[sum(#0{col_not_null}), count(*), sum((integer_to_numeric(#0{col_not_null}) * integer_to_numeric(#0{col_not_null}))), sum(integer_to_numeric(#0{col_not_null})), count(integer_to_numeric(#0{col_not_null}))] // { types: "(bigint, bigint, numeric, numeric, bigint)" } - Get l0 // { types: "(integer)" } - ArrangeBy keys=[[]] // { types: "(integer list)" } - Reduce aggregates=[list_agg[order_by=[]](row(list[#0{col_not_null}]))] // { types: "(integer list)" } - Get l0 // { types: "(integer)" } + Reduce aggregates=[min(#0{col_not_null}), max(#0{col_not_null}), sum(#0{col_not_null}), count(*), sum((integer_to_numeric(#0{col_not_null}) * integer_to_numeric(#0{col_not_null}))), sum(integer_to_numeric(#0{col_not_null})), count(integer_to_numeric(#0{col_not_null})), list_agg[order_by=[]](row(list[#0{col_not_null}]))] // { types: "(integer, integer, bigint, bigint, numeric, numeric, bigint, integer list)" } + Project (#1{col_not_null}) // { types: "(integer)" } + ReadStorage materialize.public.int_table // { types: "(integer?, integer)" } Return // { types: "(integer?, integer?, numeric?, numeric?, integer list?)" } Project (#0{min_col_not_null}, #1{max_col_not_null}, #8, #9, #7{list_agg}) // { types: "(integer?, integer?, numeric?, numeric?, integer list?)" } Map ((bigint_to_numeric(#2{sum_col_not_null}) / bigint_to_numeric(case when (#3{count} = 0) then null else #3{count} end)), sqrtnumeric(case when ((#4{sum}) IS NULL OR (#5{sum}) IS NULL OR (case when (#6{count} = 0) then null else #6{count} end) IS NULL OR (case when (0 = (#6{count} - 1)) then null else (#6{count} - 1) end) IS NULL) then null else greatest(((#4{sum} - ((#5{sum} * #5{sum}) / bigint_to_numeric(case when (#6{count} = 0) then null else #6{count} end))) / bigint_to_numeric(case when (0 = (#6{count} - 1)) then null else (#6{count} - 1) end)), 0) end)) // { types: "(integer?, integer?, bigint?, bigint, numeric?, numeric?, bigint, integer list?, numeric?, numeric?)" } Union // { types: "(integer?, integer?, bigint?, bigint, numeric?, numeric?, bigint, integer list?)" } - Get l1 // { types: "(integer, integer, bigint, bigint, numeric, numeric, bigint, integer list)" } + Get l0 // { types: "(integer, integer, bigint, bigint, numeric, numeric, bigint, integer list)" } Map (null, null, null, 0, null, null, 0, null) // { types: "(integer?, integer?, bigint?, bigint, numeric?, numeric?, bigint, integer list?)" } Union // { types: "()" } Negate // { types: "()" } Project () // { types: "()" } - Get l1 // { types: "(integer, integer, bigint, bigint, numeric, numeric, bigint, integer list)" } + Get l0 // { types: "(integer, integer, bigint, bigint, numeric, numeric, bigint, integer list)" } Constant // { types: "()" } - () diff --git a/test/sqllogictest/pg_catalog_class.slt b/test/sqllogictest/pg_catalog_class.slt index 74522c5342a4e..7901c78d1c4c2 100644 --- a/test/sqllogictest/pg_catalog_class.slt +++ b/test/sqllogictest/pg_catalog_class.slt @@ -30,6 +30,12 @@ WHERE relname = 'a'; ---- a 0 0 0 0 false p r 0 false false false false d false false -1 +# Test that pg_class reports the correct number of columns of a relation +query I +SELECT relnatts FROM pg_catalog.pg_class where relname = 'a' +---- +1 + statement ok CREATE DEFAULT INDEX ON a diff --git a/test/sqllogictest/pg_catalog_index.slt b/test/sqllogictest/pg_catalog_index.slt new file mode 100644 index 0000000000000..a93dbbeddf3c8 --- /dev/null +++ b/test/sqllogictest/pg_catalog_index.slt @@ -0,0 +1,24 @@ +# Copyright Materialize, Inc. and contributors. All rights reserved. +# +# Use of this software is governed by the Business Source License +# included in the LICENSE file at the root of this repository. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0. + +mode cockroach + +statement ok +CREATE TABLE a (b int, c int); + +statement ok +CREATE INDEX a_index ON a (b); + +# Test that pg_class reports the correct number of columns of an index's relation +query I +SELECT indnatts FROM pg_catalog.pg_index +JOIN mz_catalog.mz_relations ON pg_catalog.pg_index.indrelid = mz_catalog.mz_relations.oid +WHERE mz_catalog.mz_relations.name = 'a' +---- +2 diff --git a/test/sqllogictest/role.slt b/test/sqllogictest/role.slt index 74a7569837643..455f29b833fa8 100644 --- a/test/sqllogictest/role.slt +++ b/test/sqllogictest/role.slt @@ -275,3 +275,234 @@ query TT SHOW ROLES LIKE 'f%' ---- foo (empty) + +# Enable password authentication in the system. +simple conn=mz_system,user=mz_system +ALTER SYSTEM SET enable_password_auth = true; +---- +COMPLETE 0 + +# Test creating and using a role with password +statement ok +CREATE ROLE password_user WITH LOGIN PASSWORD 'test_password123' + +# Grant necessary permissions to the new role (using mz_system which has appropriate privileges) +simple conn=mz_system,user=mz_system +GRANT USAGE ON DATABASE materialize TO password_user +---- +COMPLETE 0 + +simple conn=mz_system,user=mz_system +GRANT CREATE ON SCHEMA materialize.public TO password_user +---- +COMPLETE 0 + +# Test connecting with the password +simple conn=password_conn,user=password_user,password=test_password123 +SELECT current_user(); +---- +password_user +COMPLETE 1 + +# Test that connection fails with wrong password (should error) +simple conn=bad_password_conn,user=password_user,password=wrong_password +SELECT current_user(); +---- +db error: FATAL: invalid password + +# Test that connection succeeds even without password when using the no-auth listener +# This is expected behavior - the external listener doesn't require authentication +simple conn=no_password_conn,user=password_user +SELECT current_user(); +---- +password_user +COMPLETE 1 + +# Clean up +# First revoke the privileges that were granted to password_user +simple conn=mz_system,user=mz_system +REVOKE USAGE ON DATABASE materialize FROM password_user +---- +COMPLETE 0 + +simple conn=mz_system,user=mz_system +REVOKE CREATE ON SCHEMA materialize.public FROM password_user +---- +COMPLETE 0 + +# Now drop the role using the default connection +simple +DROP ROLE password_user +---- +COMPLETE 0 + +# Test role inheritance with password authentication +# Create a base role to grant privileges +statement ok +CREATE ROLE base_role + +# Create a user with LOGIN and PASSWORD +statement ok +CREATE ROLE test_user WITH LOGIN PASSWORD 'secure_password456' + +# Grant the base role to the user +statement ok +GRANT base_role TO test_user + +# Grant necessary permissions for the user to connect +simple conn=mz_system,user=mz_system +GRANT USAGE ON DATABASE materialize TO test_user +---- +COMPLETE 0 + +# Test that the user can login with password +simple conn=test_user_conn,user=test_user,password=secure_password456 +SELECT current_user() +---- +test_user +COMPLETE 1 + +# Create another role +statement ok +CREATE ROLE additional_role + +# Grant the additional role to the user +statement ok +GRANT additional_role TO test_user + +# Verify the user can still login after being granted another role +simple conn=test_user_conn2,user=test_user,password=secure_password456 +SELECT current_user() +---- +test_user +COMPLETE 1 + +# Clean up - revoke privileges and drop roles +simple conn=mz_system,user=mz_system +REVOKE USAGE ON DATABASE materialize FROM test_user +---- +COMPLETE 0 + +statement ok +REVOKE base_role FROM test_user + +statement ok +REVOKE additional_role FROM test_user + +statement ok +DROP ROLE test_user + +statement ok +DROP ROLE base_role + +statement ok +DROP ROLE additional_role + +# Test removing password from a role +# Create a role with a password +statement ok +CREATE ROLE password_removal_test WITH LOGIN PASSWORD 'initial_password789' + +# Grant necessary permissions for connection +simple conn=mz_system,user=mz_system +GRANT USAGE ON DATABASE materialize TO password_removal_test +---- +COMPLETE 0 + +# Verify login works with password +simple conn=pwd_test_conn,user=password_removal_test,password=initial_password789 +SELECT current_user() +---- +password_removal_test +COMPLETE 1 + +# Remove the password from the role +statement ok +ALTER ROLE password_removal_test PASSWORD NULL + +# Test that connection with the old password now fails +simple conn=pwd_test_conn2,user=password_removal_test,password=initial_password789 +SELECT current_user() +---- +db error: FATAL: invalid password + +# Test that connection without password still works on no-auth listener +simple conn=pwd_test_noauth,user=password_removal_test +SELECT current_user() +---- +password_removal_test +COMPLETE 1 + +# Clean up +simple conn=mz_system,user=mz_system +REVOKE USAGE ON DATABASE materialize FROM password_removal_test +---- +COMPLETE 0 + +statement ok +DROP ROLE password_removal_test + +# Test that granting a role with a password to a role without a password +# only inherits privileges, not the password +statement ok +CREATE ROLE role_with_password WITH LOGIN PASSWORD 'secure_password123' + +statement ok +CREATE ROLE role_without_password + +# Grant necessary permissions to the role with password +simple conn=mz_system,user=mz_system +GRANT USAGE ON DATABASE materialize TO role_with_password +---- +COMPLETE 0 + +# Grant the role with password to the role without password +statement ok +GRANT role_with_password TO role_without_password + +# Grant USAGE permission directly to role_without_password so it can connect +simple conn=mz_system,user=mz_system +GRANT USAGE ON DATABASE materialize TO role_without_password +---- +COMPLETE 0 + +# Verify that role_without_password can connect without a password +simple conn=no_pwd_inherit,user=role_without_password +SELECT current_user() +---- +role_without_password +COMPLETE 1 + +# Verify that trying to connect to role_without_password with the password +# from role_with_password fails (password is not inherited) +simple conn=pwd_inherit_test,user=role_without_password,password=secure_password123 +SELECT current_user() +---- +db error: FATAL: invalid password + +# Verify that role_with_password still requires its password +simple conn=with_pwd_test,user=role_with_password,password=secure_password123 +SELECT current_user() +---- +role_with_password +COMPLETE 1 + +# Clean up +simple conn=mz_system,user=mz_system +REVOKE USAGE ON DATABASE materialize FROM role_with_password +---- +COMPLETE 0 + +simple conn=mz_system,user=mz_system +REVOKE USAGE ON DATABASE materialize FROM role_without_password +---- +COMPLETE 0 + +statement ok +REVOKE role_with_password FROM role_without_password + +statement ok +DROP ROLE role_without_password + +statement ok +DROP ROLE role_with_password diff --git a/test/sqllogictest/sqlite b/test/sqllogictest/sqlite index 262d86a2a2073..088516bab5084 160000 --- a/test/sqllogictest/sqlite +++ b/test/sqllogictest/sqlite @@ -1 +1 @@ -Subproject commit 262d86a2a2073c7a707beaa34be283d3b0ead301 +Subproject commit 088516bab508499a2e2981f235890313af03ecfb diff --git a/test/sqllogictest/system-cluster.slt b/test/sqllogictest/system-cluster.slt index da211e5c0dbe4..f9a40d3775359 100644 --- a/test/sqllogictest/system-cluster.slt +++ b/test/sqllogictest/system-cluster.slt @@ -494,22 +494,12 @@ LIMIT 10; ---- Explained Query: Finish order_by=[#0{max} desc nulls_first] limit=10 output=[#0..=#2] - With - cte l0 = + Project (#1{max}, #0{error}, #2{count}) + Reduce group_by=[#1{error}] aggregates=[max((extract_epoch_tstz(#0{occurred_at}) * 1000)), count(*)] Project (#0{occurred_at}, #3{error}) Filter (#7 <= 100) AND (#7 >= 0) AND (#3{error}) IS NOT NULL Map (timestamp_tz_to_mz_timestamp(#0{occurred_at})) ReadIndex on=mz_internal.mz_source_status_history mz_source_status_history_ind=[lookup value=("u6")] - Return - Project (#1{max}, #0{error}, #3{count}) - Join on=(#0{error} = #2{error}) type=differential - ArrangeBy keys=[[#0{error}]] - Reduce group_by=[#1{error}] aggregates=[max((extract_epoch_tstz(#0{occurred_at}) * 1000))] - Get l0 - ArrangeBy keys=[[#0{error}]] - Reduce group_by=[#0{error}] aggregates=[count(*)] - Project (#1{error}) - Get l0 Used Indexes: - mz_internal.mz_source_status_history_ind (lookup) diff --git a/test/sqllogictest/table_func.slt b/test/sqllogictest/table_func.slt index 0f2425050718b..159deddbec54d 100644 --- a/test/sqllogictest/table_func.slt +++ b/test/sqllogictest/table_func.slt @@ -27,12 +27,23 @@ ALTER SYSTEM SET enable_repeat_row = true COMPLETE 0 statement ok -CREATE TABLE y (a JSONB) +CREATE TABLE y (a JSONB); + +statement ok +INSERT INTO y VALUES ('[-1, 27]'); # Ensure this does not panic. -query TTTT -SELECT * FROM y a, y b, jsonb_each(a.a); +query TTT +SELECT * FROM y a, y b, jsonb_array_elements(a.a); ---- +[-1,27] [-1,27] -1 +[-1,27] [-1,27] 27 + +query TTTI +SELECT * FROM y a, y b, jsonb_array_elements(a.a) WITH ORDINALITY; +---- +[-1,27] [-1,27] -1 1 +[-1,27] [-1,27] 27 2 query I rowsort SELECT generate_series FROM generate_series(1, 3) @@ -56,6 +67,85 @@ generate_series ordinality 3 2 4 3 +query TI colnames,rowsort +SELECT * +FROM unnest(ARRAY['a','b','c']) WITH ORDINALITY AS t(letter, position); +---- +letter position +a 1 +b 2 +c 3 + +# Weird order; supported only for backcompat reasons +query TI colnames,rowsort +SELECT * +FROM unnest(ARRAY['a','b','c']) AS t(letter, position) WITH ORDINALITY; +---- +letter position +a 1 +b 2 +c 3 + +query T colnames,rowsort +SELECT * +FROM unnest(ARRAY['a','b','c']) AS t(letter); +---- +letter +a +b +c + +query II colnames,rowsort +SELECT * FROM ROWS FROM (generate_series(1, 2), generate_series(3, 5)) +---- +generate_series generate_series +1 3 +2 4 +NULL 5 + +query III colnames,rowsort +SELECT * FROM ROWS FROM (generate_series(1, 2), generate_series(3, 5)) WITH ORDINALITY +---- +generate_series generate_series ordinality +1 3 1 +2 4 2 +NULL 5 3 + +query II colnames,rowsort +SELECT * FROM ROWS FROM (generate_series(1, 2), generate_series(3, 5)) AS t (a) +---- +a generate_series +1 3 +2 4 +NULL 5 + +query II colnames,rowsort +SELECT * FROM ROWS FROM (generate_series(1, 2), generate_series(3, 5)) AS t (a, b) +---- +a b +1 3 +2 4 +NULL 5 + +query III colnames,rowsort +SELECT * FROM ROWS FROM (generate_series(1, 2), generate_series(3, 5)) WITH ORDINALITY AS t (a, b) +---- +a b ordinality +1 3 1 +2 4 2 +NULL 5 3 + +query III colnames,rowsort +SELECT * FROM ROWS FROM (generate_series(1, 2), generate_series(3, 5)) WITH ORDINALITY AS t (a, b, c) +---- +a b c +1 3 1 +2 4 2 +NULL 5 3 + +query error db error: ERROR: t has 3 columns available but 4 columns specified +SELECT * FROM ROWS FROM (generate_series(1, 2), generate_series(3, 5)) WITH ORDINALITY AS t (a, b, c, d) + query I rowsort SELECT generate_series FROM generate_series(-2, 2) ---- @@ -187,6 +277,28 @@ SELECT x.a, generate_series FROM x, LATERAL (SELECT * FROM generate_series(1, x. 3 2 3 3 +query III +SELECT x.a, generate_series, ordinality +FROM x, LATERAL (SELECT * FROM generate_series(1, x.a) WITH ORDINALITY) +---- +1 1 1 +2 1 1 +2 2 2 +3 1 1 +3 2 2 +3 3 3 + +query III +SELECT x.a, g, o +FROM x, LATERAL (SELECT * FROM generate_series(1, x.a) WITH ORDINALITY AS t(g, o)) +---- +1 1 1 +2 1 1 +2 2 2 +3 1 1 +3 2 2 +3 3 3 + # Regression test for database-issues#1700: crash when a filter references an output column of # a table function query IIIII rowsort @@ -350,6 +462,24 @@ SELECT * FROM generate_series(0,3), repeat_row(generate_series); 3 3 +query II +SELECT * FROM generate_series(0,3), repeat_row(generate_series) WITH ORDINALITY; +---- +1 1 +2 1 +2 2 +3 1 +3 2 +3 3 + +query II +SELECT * FROM generate_series(0,3), repeat_row(abs(generate_series - 1)) WITH ORDINALITY; +---- +0 1 +2 1 +3 1 +3 2 + query I SELECT abs(generate_series) FROM generate_series(-1, 2), repeat_row(generate_series); ---- @@ -1095,6 +1225,13 @@ NULL NULL NULL 10 NULL NULL NULL 11 NULL NULL NULL 12 +######################################################################################################################## +# All these tests that involve `_pg_expandarray` and `ROWS FROM` currently emit +# ERROR mz_sql::plan::query: Using the legacy WITH ORDINALITY / ROWS FROM implementation for table function information_schema._pg_expandarray +# We are hoping that this is a rare combination in practice, but sooner or later we'll probably need to do something +# about this. For further details, see comment in `plan_table_function_internal`. +######################################################################################################################## + query IIII colnames SELECT * FROM ROWS FROM (generate_series(1, 2), information_schema._pg_expandarray(array[9]), generate_series(3, 6)) ORDER BY 4; ---- @@ -1194,6 +1331,17 @@ SELECT * FROM LATERAL ROWS FROM (generate_series(1,1), information_schema._pg_ex a b c 1 1 1 +# ROWS FROM together with WITH ORDINALITY explicitly specified +query III colnames +SELECT * +FROM ROWS FROM (generate_series(6, 7), generate_series(10, 12)) WITH ORDINALITY +ORDER BY ordinality; +---- +generate_series generate_series ordinality +6 10 1 +7 11 2 +NULL 12 3 + # Multiple table funcs in SELECT projection. query II colnames rowsort @@ -1407,6 +1555,13 @@ SELECT * FROM generate_series(1, 3) as foo(a), ROWS FROM (generate_series(foo.a, statement ok create view bar as select * from y, rows from (generate_series(1, 2), jsonb_array_elements(y.a)); +query TIT +SELECT * +FROM bar +---- +[-1,27] 1 -1 +[-1,27] 2 27 + # Regression for database-issues#3078 query IT WITH a(x) AS (SELECT 'a') SELECT generate_series(1, 2), * FROM a ORDER BY generate_series @@ -1450,10 +1605,9 @@ Project (#5, #6) Project (#0, #1, #3..=#5) Map (coalesce(#2, #4)) FullOuterJoin (#2 = #4) - Map (row_number() over (order by []), #1) - CallTable jsonb_object_keys(text_to_jsonb("{\"1\":2}")) - Map (row_number() over (order by [])) - CallTable jsonb_object_keys(text_to_jsonb("{\"3\":4}")) + Map (#1) + CallTable jsonb_object_keys[with_ordinality](text_to_jsonb("{\"1\":2}")) + CallTable jsonb_object_keys[with_ordinality](text_to_jsonb("{\"3\":4}")) Target cluster: quickstart @@ -1476,3 +1630,203 @@ SELECT generate_series FROM generate_series(DISTINCT 1, 3); # Regression for https://github.com/MaterializeInc/database-issues/issues/6180 query error db error: ERROR: regexp_extract must specify at least one capture group select regexp_extract('aaa', 'a') + +# Test `FlatMapElimination` +query T multiline +EXPLAIN OPTIMIZED PLAN AS TEXT FOR +SELECT unnest(LIST[5]) from x; +---- +Explained Query: + Project (#2) + Map (5) + ReadStorage materialize.public.x + +Source materialize.public.x + +Target cluster: quickstart + +EOF + +query I +SELECT unnest(LIST[5]) from x; +---- +5 +5 +5 + +query T multiline +EXPLAIN OPTIMIZED PLAN AS TEXT FOR +SELECT * +FROM x, unnest(LIST[5]) WITH ORDINALITY; +---- +Explained Query: + Map (5, 1) + ReadStorage materialize.public.x + +Source materialize.public.x + +Target cluster: quickstart + +EOF + +query IIII +SELECT * +FROM x, unnest(LIST[5]) WITH ORDINALITY; +---- +1 2 5 1 +2 3 5 1 +3 4 5 1 + +# Regression test for the ordering issue with the old WITH ORDINALITY implementation +# https://github.com/MaterializeInc/database-issues/issues/4764 +query TTI +SELECT * +FROM + (VALUES (ARRAY['bb','a'])) AS v(x), + LATERAL unnest(x) WITH ORDINALITY; +---- +{bb,a} a 2 +{bb,a} bb 1 + +# This query is carefully constructed to exercise `FlatMapElimination`: +# - The CASE is there to prevent HIR lowering from seeing unnest's argument as a constant, in which case it would make +# it a cross join with a FlatMap on a constant, which would be simplified by `FoldConstants` before +# `FlatMapElimination` would have a chance to see it. +# - However, MIR optimization is able to remove the CASE, because `x.a` can never be null, as it is a primary key. +# Therefore, `unnest`'s argument becomes a constant (later than HIR lowering), on which `FlatMapElimination` can do +# its magic. +query T multiline +EXPLAIN OPTIMIZED PLAN FOR +SELECT * FROM + x, + LATERAL unnest(LIST[1 + CASE WHEN x.a IS NULL THEN x.a ELSE 0 END]) WITH ORDINALITY; +---- +Explained Query: + Map (1, 1) + ReadStorage materialize.public.x + +Source materialize.public.x + +Target cluster: quickstart + +EOF + +query T multiline +EXPLAIN OPTIMIZED PLAN WITH (NO FAST PATH) FOR +SELECT * +FROM x, generate_series(1, x.a) +WHERE x.a = 1; +---- +Explained Query: + FlatMap generate_series(1, 1, 1) + Filter (#0{a} = 1) + ReadStorage materialize.public.x + +Source materialize.public.x + filter=((#0{a} = 1)) + +Target cluster: quickstart + +EOF + +query T multiline +EXPLAIN OPTIMIZED PLAN WITH (NO FAST PATH) FOR +SELECT * +FROM x, generate_series(5, x.a) +WHERE x.a = 1; +---- +Explained Query: + FlatMap generate_series(5, 1, 1) + Filter (#0{a} = 1) + ReadStorage materialize.public.x + +Source materialize.public.x + filter=((#0{a} = 1)) + +Target cluster: quickstart + +EOF + +query T multiline +EXPLAIN OPTIMIZED PLAN WITH (NO FAST PATH) FOR +SELECT * +FROM x, LATERAL (VALUES ((1), (x.a))) +WHERE x.a = 1; +---- +Explained Query: + Project (#0, #1, #0, #0) + Filter (#0{a} = 1) + ReadStorage materialize.public.x + +Source materialize.public.x + +Target cluster: quickstart + +EOF + +# Regression test for https://github.com/MaterializeInc/database-issues/issues/9592: +# ROWS FROM + WITH ORDINALITY with many table functions. +query II +SELECT * FROM ROWS FROM (generate_series(1, 1)) WITH ORDINALITY AS t(g1, o); +---- +1 1 + +query III +SELECT * FROM ROWS FROM (generate_series(1, 1), generate_series(1, 2)) WITH ORDINALITY AS t(g1, g2, o); +---- +NULL 2 2 +1 1 1 + +query IIII +SELECT * FROM ROWS FROM (generate_series(1, 1), generate_series(1, 2), generate_series(1,3)) WITH ORDINALITY AS t(g1, g2, g3, o); +---- +NULL NULL 3 3 +NULL 2 2 2 +1 1 1 1 + +query IIIII +SELECT * FROM ROWS FROM (generate_series(1, 1), generate_series(1, 2), generate_series(1,3), generate_series(1,4)) WITH ORDINALITY AS t(g1, g2, g3, g4, o); +---- +NULL NULL NULL 4 4 +NULL NULL 3 3 3 +NULL 2 2 2 2 +1 1 1 1 1 + +query IIIIII +SELECT * FROM ROWS FROM (generate_series(1, 1), generate_series(1, 2), generate_series(1,3), generate_series(1,4), generate_series(1,5)) WITH ORDINALITY AS t(g1, g2, g3, g4, g5, o); +---- +NULL NULL NULL NULL 5 5 +NULL NULL NULL 4 4 4 +NULL NULL 3 3 3 3 +NULL 2 2 2 2 2 +1 1 1 1 1 1 + +query IIIIIII +SELECT * FROM ROWS FROM (generate_series(1, 1), generate_series(1, 2), generate_series(1,3), generate_series(1,4), generate_series(1,5), generate_series(1,6)) WITH ORDINALITY AS t(g1, g2, g3, g4, g5, g6, o); +---- +NULL NULL NULL NULL NULL 6 6 +NULL NULL NULL NULL 5 5 5 +NULL NULL NULL 4 4 4 4 +NULL NULL 3 3 3 3 3 +NULL 2 2 2 2 2 2 +1 1 1 1 1 1 1 + +query IIIIIII +SELECT * FROM ROWS FROM (generate_series(1,6), generate_series(1, 1), generate_series(1, 2), generate_series(1,3), generate_series(1,4), generate_series(1,5)) WITH ORDINALITY AS t(g1, g2, g3, g4, g5, g6, o); +---- +6 NULL NULL NULL NULL NULL 6 +5 NULL NULL NULL NULL 5 5 +4 NULL NULL NULL 4 4 4 +3 NULL NULL 3 3 3 3 +2 NULL 2 2 2 2 2 +1 1 1 1 1 1 1 + +query IIIIIII +SELECT * FROM ROWS FROM (generate_series(1, 1), generate_series(1, 2), generate_series(1,6), generate_series(1,3), generate_series(1,4), generate_series(1,5)) WITH ORDINALITY AS t(g1, g2, g3, g4, g5, g6, o); +---- +NULL NULL 6 NULL NULL NULL 6 +NULL NULL 5 NULL NULL 5 5 +NULL NULL 4 NULL 4 4 4 +NULL NULL 3 3 3 3 3 +NULL 2 2 2 2 2 2 +1 1 1 1 1 1 1 diff --git a/test/sqllogictest/transform/aggregation_nullability.slt b/test/sqllogictest/transform/aggregation_nullability.slt index 2df3c42d1ec28..c637da0f8d934 100644 --- a/test/sqllogictest/transform/aggregation_nullability.slt +++ b/test/sqllogictest/transform/aggregation_nullability.slt @@ -126,7 +126,8 @@ Explained Query: Project (#0{f1}) // { arity: 1 } Filter (#0{f1}) IS NOT NULL // { arity: 2 } ReadStorage materialize.public.t2 // { arity: 2 } - cte l2 = + Return // { arity: 9 } + Reduce group_by=[#0{f1}] aggregates=[count(#2{f1}), sum(#2{f1}), max(#2{f1}), min(#2{f1}), count(#1{f2}), sum(#1{f2}), min(#1{f2}), max(#1{f2})] // { arity: 9 } Union // { arity: 3 } Map (null) // { arity: 3 } Union // { arity: 2 } @@ -143,17 +144,6 @@ Explained Query: ReadStorage materialize.public.t1 // { arity: 2 } Project (#0{f1}, #1{f2}, #0{f1}) // { arity: 3 } Get l1 // { arity: 2 } - Return // { arity: 9 } - Project (#0{f1}, #6{count_f1}, #7{sum_f1}, #1{max_f1}, #2{min_f1}, #8{count_f2}, #9{sum_f2}, #3{min_f2}, #4{max_f2}) // { arity: 9 } - Join on=(#0{f1} = #5{f1}) type=differential // { arity: 10 } - implementation - %0[#0]UKA » %1[#0]UKA - ArrangeBy keys=[[#0{f1}]] // { arity: 5 } - Reduce group_by=[#0{f1}] aggregates=[max(#2{f1}), min(#2{f1}), min(#1{f2}), max(#1{f2})] // { arity: 5 } - Get l2 // { arity: 3 } - ArrangeBy keys=[[#0{f1}]] // { arity: 5 } - Reduce group_by=[#0{f1}] aggregates=[count(#2{f1}), sum(#2{f1}), count(#1{f2}), sum(#1{f2})] // { arity: 5 } - Get l2 // { arity: 3 } Source materialize.public.t1 Source materialize.public.t2 @@ -182,35 +172,25 @@ Explained Query: Project (#0{f1}) // { arity: 1 } Filter (#0{f1}) IS NOT NULL // { arity: 2 } ReadStorage materialize.public.t2 // { arity: 2 } - cte l2 = - Union // { arity: 3 } - Map (null) // { arity: 3 } - Union // { arity: 2 } - Negate // { arity: 2 } - Project (#0{f1}, #1{f2}) // { arity: 2 } - Join on=(#0{f1} = #2{f1}) type=differential // { arity: 3 } - implementation - %1[#0]UKA » %0:l0[#0{f1}]K - Get l0 // { arity: 2 } - ArrangeBy keys=[[#0{f1}]] // { arity: 1 } - Distinct project=[#0{f1}] // { arity: 1 } - Project (#0{f1}) // { arity: 1 } - Get l1 // { arity: 2 } - ReadStorage materialize.public.t1 // { arity: 2 } - Project (#0{f1}, #1{f2}, #0{f1}) // { arity: 3 } - Get l1 // { arity: 2 } Return // { arity: 9 } - Project (#0{f1}, #6{count_f1}, #7{sum_f1}, #1{max_f1}, #2{min_f1}, #8{count_f2}, #9{sum_f2}, #3{min_f2}, #4{max_f2}) // { arity: 9 } - Filter (#9{sum_f2} >= 0) // { arity: 10 } - Join on=(#0{f1} = #5{f1}) type=differential // { arity: 10 } - implementation - %1[#0]UKAif » %0[#0]UKAif - ArrangeBy keys=[[#0{f1}]] // { arity: 5 } - Reduce group_by=[#0{f1}] aggregates=[max(#2{f1}), min(#2{f1}), min(#1{f2}), max(#1{f2})] // { arity: 5 } - Get l2 // { arity: 3 } - ArrangeBy keys=[[#0{f1}]] // { arity: 5 } - Reduce group_by=[#0{f1}] aggregates=[count(#2{f1}), sum(#2{f1}), count(#1{f2}), sum(#1{f2})] // { arity: 5 } - Get l2 // { arity: 3 } + Filter (#6{sum_f2} >= 0) // { arity: 9 } + Reduce group_by=[#0{f1}] aggregates=[count(#2{f1}), sum(#2{f1}), max(#2{f1}), min(#2{f1}), count(#1{f2}), sum(#1{f2}), min(#1{f2}), max(#1{f2})] // { arity: 9 } + Union // { arity: 3 } + Map (null) // { arity: 3 } + Union // { arity: 2 } + Negate // { arity: 2 } + Project (#0{f1}, #1{f2}) // { arity: 2 } + Join on=(#0{f1} = #2{f1}) type=differential // { arity: 3 } + implementation + %1[#0]UKA » %0:l0[#0{f1}]K + Get l0 // { arity: 2 } + ArrangeBy keys=[[#0{f1}]] // { arity: 1 } + Distinct project=[#0{f1}] // { arity: 1 } + Project (#0{f1}) // { arity: 1 } + Get l1 // { arity: 2 } + ReadStorage materialize.public.t1 // { arity: 2 } + Project (#0{f1}, #1{f2}, #0{f1}) // { arity: 3 } + Get l1 // { arity: 2 } Source materialize.public.t1 Source materialize.public.t2 @@ -239,35 +219,25 @@ Explained Query: Project (#0{f1}) // { arity: 1 } Filter (#0{f1}) IS NOT NULL // { arity: 2 } ReadStorage materialize.public.t2 // { arity: 2 } - cte l2 = - Union // { arity: 3 } - Map (null) // { arity: 3 } - Union // { arity: 2 } - Negate // { arity: 2 } - Project (#0{f1}, #1{f2}) // { arity: 2 } - Join on=(#0{f1} = #2{f1}) type=differential // { arity: 3 } - implementation - %1[#0]UKA » %0:l0[#0{f1}]K - Get l0 // { arity: 2 } - ArrangeBy keys=[[#0{f1}]] // { arity: 1 } - Distinct project=[#0{f1}] // { arity: 1 } - Project (#0{f1}) // { arity: 1 } - Get l1 // { arity: 2 } - ReadStorage materialize.public.t1 // { arity: 2 } - Project (#0{f1}, #1{f2}, #0{f1}) // { arity: 3 } - Get l1 // { arity: 2 } Return // { arity: 9 } - Project (#0{f1}, #6{count_f1}, #7{sum_f1}, #1{max_f1}, #2{min_f1}, #8{count_f2}, #9{sum_f2}, #3{min_f2}, #4{max_f2}) // { arity: 9 } - Filter (#7{sum_f1} >= 0) // { arity: 10 } - Join on=(#0{f1} = #5{f1}) type=differential // { arity: 10 } - implementation - %1[#0]UKAif » %0[#0]UKAif - ArrangeBy keys=[[#0{f1}]] // { arity: 5 } - Reduce group_by=[#0{f1}] aggregates=[max(#2{f1}), min(#2{f1}), min(#1{f2}), max(#1{f2})] // { arity: 5 } - Get l2 // { arity: 3 } - ArrangeBy keys=[[#0{f1}]] // { arity: 5 } - Reduce group_by=[#0{f1}] aggregates=[count(#2{f1}), sum(#2{f1}), count(#1{f2}), sum(#1{f2})] // { arity: 5 } - Get l2 // { arity: 3 } + Filter (#2{sum_f1} >= 0) // { arity: 9 } + Reduce group_by=[#0{f1}] aggregates=[count(#2{f1}), sum(#2{f1}), max(#2{f1}), min(#2{f1}), count(#1{f2}), sum(#1{f2}), min(#1{f2}), max(#1{f2})] // { arity: 9 } + Union // { arity: 3 } + Map (null) // { arity: 3 } + Union // { arity: 2 } + Negate // { arity: 2 } + Project (#0{f1}, #1{f2}) // { arity: 2 } + Join on=(#0{f1} = #2{f1}) type=differential // { arity: 3 } + implementation + %1[#0]UKA » %0:l0[#0{f1}]K + Get l0 // { arity: 2 } + ArrangeBy keys=[[#0{f1}]] // { arity: 1 } + Distinct project=[#0{f1}] // { arity: 1 } + Project (#0{f1}) // { arity: 1 } + Get l1 // { arity: 2 } + ReadStorage materialize.public.t1 // { arity: 2 } + Project (#0{f1}, #1{f2}, #0{f1}) // { arity: 3 } + Get l1 // { arity: 2 } Source materialize.public.t1 Source materialize.public.t2 @@ -310,31 +280,19 @@ query T multiline EXPLAIN OPTIMIZED PLAN WITH(humanized expressions, arity, join implementations) AS VERBOSE TEXT FOR select t1.f1, sum(t2.f1), max(t2.f1) from t1 LEFT JOIN t2 ON t1.f1 = t2.f1 group by t1.f1 having max(t2.f1) >= 0; ---- Explained Query: - With - cte l0 = - Project (#0{f1}) // { arity: 1 } - Join on=(#0{f1} = #1{f1}) type=differential // { arity: 2 } - implementation - %0:t1[#0{f1}]Kif » %1:t2[#0{f1}]Kiif - ArrangeBy keys=[[#0{f1}]] // { arity: 1 } - Project (#0{f1}) // { arity: 1 } - Filter (#0{f1} >= 0) // { arity: 2 } - ReadStorage materialize.public.t1 // { arity: 2 } - ArrangeBy keys=[[#0{f1}]] // { arity: 1 } - Project (#0{f1}) // { arity: 1 } - Filter (#0{f1} >= 0) // { arity: 2 } - ReadStorage materialize.public.t2 // { arity: 2 } - Return // { arity: 3 } - Project (#0{f1}, #3{sum_f1}, #1{max_f1}) // { arity: 3 } - Join on=(#0{f1} = #2{f1}) type=differential // { arity: 4 } + Reduce group_by=[#0{f1}] aggregates=[sum(#0{f1}), max(#0{f1})] // { arity: 3 } + Project (#0{f1}) // { arity: 1 } + Join on=(#0{f1} = #1{f1}) type=differential // { arity: 2 } implementation - %0[#0]UKA » %1[#0]UKA - ArrangeBy keys=[[#0{f1}]] // { arity: 2 } - Reduce group_by=[#0{f1}] aggregates=[max(#0{f1})] // { arity: 2 } - Get l0 // { arity: 1 } - ArrangeBy keys=[[#0{f1}]] // { arity: 2 } - Reduce group_by=[#0{f1}] aggregates=[sum(#0{f1})] // { arity: 2 } - Get l0 // { arity: 1 } + %0:t1[#0{f1}]Kif » %1:t2[#0{f1}]Kiif + ArrangeBy keys=[[#0{f1}]] // { arity: 1 } + Project (#0{f1}) // { arity: 1 } + Filter (#0{f1} >= 0) // { arity: 2 } + ReadStorage materialize.public.t1 // { arity: 2 } + ArrangeBy keys=[[#0{f1}]] // { arity: 1 } + Project (#0{f1}) // { arity: 1 } + Filter (#0{f1} >= 0) // { arity: 2 } + ReadStorage materialize.public.t2 // { arity: 2 } Source materialize.public.t1 filter=((#0{f1} >= 0)) @@ -350,8 +308,8 @@ query T multiline EXPLAIN OPTIMIZED PLAN WITH(humanized expressions, arity, join implementations) AS VERBOSE TEXT FOR select t1.f1, sum(t2.f1), max(t2.f1) from t1 LEFT JOIN t2 ON t1.f1 = t2.f1 group by t1.f1 having max(t2.f1) >= 0 and sum(t2.f1) >= 0; ---- Explained Query: - With - cte l0 = + Filter (#1{sum_f1} >= 0) // { arity: 3 } + Reduce group_by=[#0{f1}] aggregates=[sum(#0{f1}), max(#0{f1})] // { arity: 3 } Project (#0{f1}) // { arity: 1 } Join on=(#0{f1} = #1{f1}) type=differential // { arity: 2 } implementation @@ -364,18 +322,6 @@ Explained Query: Project (#0{f1}) // { arity: 1 } Filter (#0{f1} >= 0) // { arity: 2 } ReadStorage materialize.public.t2 // { arity: 2 } - Return // { arity: 3 } - Project (#0{f1}, #3{sum_f1}, #1{max_f1}) // { arity: 3 } - Filter (#3{sum_f1} >= 0) // { arity: 4 } - Join on=(#0{f1} = #2{f1}) type=differential // { arity: 4 } - implementation - %1[#0]UKAif » %0[#0]UKAif - ArrangeBy keys=[[#0{f1}]] // { arity: 2 } - Reduce group_by=[#0{f1}] aggregates=[max(#0{f1})] // { arity: 2 } - Get l0 // { arity: 1 } - ArrangeBy keys=[[#0{f1}]] // { arity: 2 } - Reduce group_by=[#0{f1}] aggregates=[sum(#0{f1})] // { arity: 2 } - Get l0 // { arity: 1 } Source materialize.public.t1 filter=((#0{f1} >= 0)) @@ -406,38 +352,26 @@ Explained Query: ArrangeBy keys=[[#0{f1}]] // { arity: 2 } Filter (#0{f1}) IS NOT NULL // { arity: 2 } ReadStorage materialize.public.t2 // { arity: 2 } - cte l2 = - Union // { arity: 3 } - Map (null, null) // { arity: 3 } - Union // { arity: 1 } - Negate // { arity: 1 } - Project (#0{f1}) // { arity: 1 } - Join on=(#0{f1} = #1{f1}) type=differential // { arity: 2 } - implementation - %1[#0]UKA » %0:l0[#0{f1}]K - Get l0 // { arity: 1 } - ArrangeBy keys=[[#0{f1}]] // { arity: 1 } - Distinct project=[#0{f1}] // { arity: 1 } - Project (#0{f1}) // { arity: 1 } - Get l1 // { arity: 2 } - Project (#0{f1}) // { arity: 1 } - ReadStorage materialize.public.t1 // { arity: 2 } - Project (#0{f1}, #0{f1}, #1{f2}) // { arity: 3 } - Get l1 // { arity: 2 } Return // { arity: 3 } - Project (#0{f1}, #3{sum_f2}, #1{max_f1}) // { arity: 3 } - Filter (#1{max_f1} >= 0) // { arity: 4 } - Join on=(#0{f1} = #2{f1}) type=differential // { arity: 4 } - implementation - %0[#0]UKAif » %1[#0]UKAif - ArrangeBy keys=[[#0{f1}]] // { arity: 2 } - Reduce group_by=[#0{f1}] aggregates=[max(#1{f1})] // { arity: 2 } - Project (#0{f1}, #1{f1}) // { arity: 2 } - Get l2 // { arity: 3 } - ArrangeBy keys=[[#0{f1}]] // { arity: 2 } - Reduce group_by=[#0{f1}] aggregates=[sum(#1{f2})] // { arity: 2 } - Project (#0{f1}, #2{f2}) // { arity: 2 } - Get l2 // { arity: 3 } + Filter (#2{max_f1} >= 0) // { arity: 3 } + Reduce group_by=[#0{f1}] aggregates=[sum(#2{f2}), max(#1{f1})] // { arity: 3 } + Union // { arity: 3 } + Map (null, null) // { arity: 3 } + Union // { arity: 1 } + Negate // { arity: 1 } + Project (#0{f1}) // { arity: 1 } + Join on=(#0{f1} = #1{f1}) type=differential // { arity: 2 } + implementation + %1[#0]UKA » %0:l0[#0{f1}]K + Get l0 // { arity: 1 } + ArrangeBy keys=[[#0{f1}]] // { arity: 1 } + Distinct project=[#0{f1}] // { arity: 1 } + Project (#0{f1}) // { arity: 1 } + Get l1 // { arity: 2 } + Project (#0{f1}) // { arity: 1 } + ReadStorage materialize.public.t1 // { arity: 2 } + Project (#0{f1}, #0{f1}, #1{f2}) // { arity: 3 } + Get l1 // { arity: 2 } Source materialize.public.t1 Source materialize.public.t2 @@ -452,31 +386,18 @@ query T multiline EXPLAIN OPTIMIZED PLAN WITH(humanized expressions, arity, join implementations) AS VERBOSE TEXT FOR select t1.f1, sum(t2.f1 + t2.f2), max(t2.f1) from t1 LEFT JOIN t2 ON t1.f1 = t2.f1 group by t1.f1 having max(t2.f1) >= 0; ---- Explained Query: - With - cte l0 = - Project (#0{f1}, #2{f2}) // { arity: 2 } - Join on=(#0{f1} = #1{f1}) type=differential // { arity: 3 } - implementation - %0:t1[#0{f1}]Kif » %1:t2[#0{f1}]Kiif - ArrangeBy keys=[[#0{f1}]] // { arity: 1 } - Project (#0{f1}) // { arity: 1 } - Filter (#0{f1} >= 0) // { arity: 2 } - ReadStorage materialize.public.t1 // { arity: 2 } - ArrangeBy keys=[[#0{f1}]] // { arity: 2 } - Filter (#0{f1} >= 0) // { arity: 2 } - ReadStorage materialize.public.t2 // { arity: 2 } - Return // { arity: 3 } - Project (#0{f1}, #3{sum}, #1{max_f1}) // { arity: 3 } - Join on=(#0{f1} = #2{f1}) type=differential // { arity: 4 } + Reduce group_by=[#0{f1}] aggregates=[sum((#0{f1} + #1{f2})), max(#0{f1})] // { arity: 3 } + Project (#0{f1}, #2{f2}) // { arity: 2 } + Join on=(#0{f1} = #1{f1}) type=differential // { arity: 3 } implementation - %0[#0]UKA » %1[#0]UKA - ArrangeBy keys=[[#0{f1}]] // { arity: 2 } - Reduce group_by=[#0{f1}] aggregates=[max(#0{f1})] // { arity: 2 } - Project (#0{f1}) // { arity: 1 } - Get l0 // { arity: 2 } + %0:t1[#0{f1}]Kif » %1:t2[#0{f1}]Kiif + ArrangeBy keys=[[#0{f1}]] // { arity: 1 } + Project (#0{f1}) // { arity: 1 } + Filter (#0{f1} >= 0) // { arity: 2 } + ReadStorage materialize.public.t1 // { arity: 2 } ArrangeBy keys=[[#0{f1}]] // { arity: 2 } - Reduce group_by=[#0{f1}] aggregates=[sum((#0{f1} + #1{f2}))] // { arity: 2 } - Get l0 // { arity: 2 } + Filter (#0{f1} >= 0) // { arity: 2 } + ReadStorage materialize.public.t2 // { arity: 2 } Source materialize.public.t1 filter=((#0{f1} >= 0)) @@ -492,31 +413,19 @@ query T multiline EXPLAIN OPTIMIZED PLAN WITH(humanized expressions, arity, join implementations) AS VERBOSE TEXT FOR select t1.f1, sum(t1.f1 + t2.f1), max(t2.f1) from t1 LEFT JOIN t2 ON t1.f1 = t2.f1 group by t1.f1 having max(t2.f1) >= 0; ---- Explained Query: - With - cte l0 = - Project (#0{f1}) // { arity: 1 } - Join on=(#0{f1} = #1{f1}) type=differential // { arity: 2 } - implementation - %0:t1[#0{f1}]Kif » %1:t2[#0{f1}]Kiif - ArrangeBy keys=[[#0{f1}]] // { arity: 1 } - Project (#0{f1}) // { arity: 1 } - Filter (#0{f1} >= 0) // { arity: 2 } - ReadStorage materialize.public.t1 // { arity: 2 } - ArrangeBy keys=[[#0{f1}]] // { arity: 1 } - Project (#0{f1}) // { arity: 1 } - Filter (#0{f1} >= 0) // { arity: 2 } - ReadStorage materialize.public.t2 // { arity: 2 } - Return // { arity: 3 } - Project (#0{f1}, #3{sum}, #1{max_f1}) // { arity: 3 } - Join on=(#0{f1} = #2{f1}) type=differential // { arity: 4 } + Reduce group_by=[#0{f1}] aggregates=[sum((#0{f1} + #0{f1})), max(#0{f1})] // { arity: 3 } + Project (#0{f1}) // { arity: 1 } + Join on=(#0{f1} = #1{f1}) type=differential // { arity: 2 } implementation - %0[#0]UKA » %1[#0]UKA - ArrangeBy keys=[[#0{f1}]] // { arity: 2 } - Reduce group_by=[#0{f1}] aggregates=[max(#0{f1})] // { arity: 2 } - Get l0 // { arity: 1 } - ArrangeBy keys=[[#0{f1}]] // { arity: 2 } - Reduce group_by=[#0{f1}] aggregates=[sum((#0{f1} + #0{f1}))] // { arity: 2 } - Get l0 // { arity: 1 } + %0:t1[#0{f1}]Kif » %1:t2[#0{f1}]Kiif + ArrangeBy keys=[[#0{f1}]] // { arity: 1 } + Project (#0{f1}) // { arity: 1 } + Filter (#0{f1} >= 0) // { arity: 2 } + ReadStorage materialize.public.t1 // { arity: 2 } + ArrangeBy keys=[[#0{f1}]] // { arity: 1 } + Project (#0{f1}) // { arity: 1 } + Filter (#0{f1} >= 0) // { arity: 2 } + ReadStorage materialize.public.t2 // { arity: 2 } Source materialize.public.t1 filter=((#0{f1} >= 0)) @@ -548,36 +457,25 @@ Explained Query: Project (#0{f1}) // { arity: 1 } Filter (#0{f1}) IS NOT NULL // { arity: 2 } ReadStorage materialize.public.t2 // { arity: 2 } - cte l2 = - Union // { arity: 2 } - Map (null) // { arity: 2 } - Union // { arity: 1 } - Negate // { arity: 1 } - Project (#0{f1}) // { arity: 1 } - Join on=(#0{f1} = #1{f1}) type=differential // { arity: 2 } - implementation - %1[#0]UKA » %0:l0[#0{f1}]K - Get l0 // { arity: 1 } - ArrangeBy keys=[[#0{f1}]] // { arity: 1 } - Distinct project=[#0{f1}] // { arity: 1 } - Get l1 // { arity: 1 } - Project (#0{f1}) // { arity: 1 } - ReadStorage materialize.public.t1 // { arity: 2 } - Project (#0{f1}, #0{f1}) // { arity: 2 } - Get l1 // { arity: 1 } Return // { arity: 3 } - Project (#0{f1}, #3{sum_f1}, #1{max_f1}) // { arity: 3 } - Filter (#1{max_f1} >= 0) // { arity: 4 } - Join on=(#0{f1} = #2{f1}) type=differential // { arity: 4 } - implementation - %0[#0]UKAif » %1[#0]UKAif - ArrangeBy keys=[[#0{f1}]] // { arity: 2 } - Reduce group_by=[#0{f1}] aggregates=[max(#1{f1})] // { arity: 2 } - Get l2 // { arity: 2 } - ArrangeBy keys=[[#0{f1}]] // { arity: 2 } - Reduce group_by=[#0{f1}] aggregates=[sum(#0{f1})] // { arity: 2 } + Filter (#2{max_f1} >= 0) // { arity: 3 } + Reduce group_by=[#0{f1}] aggregates=[sum(#0{f1}), max(#1{f1})] // { arity: 3 } + Union // { arity: 2 } + Map (null) // { arity: 2 } + Union // { arity: 1 } + Negate // { arity: 1 } + Project (#0{f1}) // { arity: 1 } + Join on=(#0{f1} = #1{f1}) type=differential // { arity: 2 } + implementation + %1[#0]UKA » %0:l0[#0{f1}]K + Get l0 // { arity: 1 } + ArrangeBy keys=[[#0{f1}]] // { arity: 1 } + Distinct project=[#0{f1}] // { arity: 1 } + Get l1 // { arity: 1 } Project (#0{f1}) // { arity: 1 } - Get l2 // { arity: 2 } + ReadStorage materialize.public.t1 // { arity: 2 } + Project (#0{f1}, #0{f1}) // { arity: 2 } + Get l1 // { arity: 1 } Source materialize.public.t1 Source materialize.public.t2 @@ -654,7 +552,8 @@ Explained Query: ArrangeBy keys=[[#0{f1}]] // { arity: 2 } Filter (#0{f1}) IS NOT NULL // { arity: 2 } ReadStorage materialize.public.t2 // { arity: 2 } - cte l2 = + Return // { arity: 3 } + Reduce group_by=[#0{f1}] aggregates=[count(#1{f2}), max(#1{f2})] // { arity: 3 } Union // { arity: 2 } Map (null) // { arity: 2 } Union // { arity: 1 } @@ -671,17 +570,6 @@ Explained Query: Project (#0{f1}) // { arity: 1 } ReadStorage materialize.public.t1 // { arity: 2 } Get l1 // { arity: 2 } - Return // { arity: 3 } - Project (#0{f1}, #3{count_f2}, #1{max_f2}) // { arity: 3 } - Join on=(#0{f1} = #2{f1}) type=differential // { arity: 4 } - implementation - %0[#0]UKA » %1[#0]UKA - ArrangeBy keys=[[#0{f1}]] // { arity: 2 } - Reduce group_by=[#0{f1}] aggregates=[max(#1{f2})] // { arity: 2 } - Get l2 // { arity: 2 } - ArrangeBy keys=[[#0{f1}]] // { arity: 2 } - Reduce group_by=[#0{f1}] aggregates=[count(#1{f2})] // { arity: 2 } - Get l2 // { arity: 2 } Source materialize.public.t1 Source materialize.public.t2 @@ -696,8 +584,8 @@ query T multiline EXPLAIN OPTIMIZED PLAN WITH(humanized expressions, arity, join implementations) AS VERBOSE TEXT FOR select t1.f1, count(t2.f2), max(t2.f2) from t1 LEFT JOIN t2 ON t1.f1 = t2.f1 group by t1.f1 having max(t2.f2) > 0; ---- Explained Query: - With - cte l0 = + Filter (#2{max_f2} > 0) // { arity: 3 } + Reduce group_by=[#0{f1}] aggregates=[count(#1{f2}), max(#1{f2})] // { arity: 3 } Project (#0{f1}, #2{f2}) // { arity: 2 } Join on=(#0{f1} = #1{f1}) type=differential // { arity: 3 } implementation @@ -709,18 +597,6 @@ Explained Query: ArrangeBy keys=[[#0{f1}]] // { arity: 2 } Filter (#0{f1}) IS NOT NULL // { arity: 2 } ReadStorage materialize.public.t2 // { arity: 2 } - Return // { arity: 3 } - Project (#0{f1}, #3{count_f2}, #1{max_f2}) // { arity: 3 } - Filter (#1{max_f2} > 0) // { arity: 4 } - Join on=(#0{f1} = #2{f1}) type=differential // { arity: 4 } - implementation - %0[#0]UKAif » %1[#0]UKAif - ArrangeBy keys=[[#0{f1}]] // { arity: 2 } - Reduce group_by=[#0{f1}] aggregates=[max(#1{f2})] // { arity: 2 } - Get l0 // { arity: 2 } - ArrangeBy keys=[[#0{f1}]] // { arity: 2 } - Reduce group_by=[#0{f1}] aggregates=[count(#1{f2})] // { arity: 2 } - Get l0 // { arity: 2 } Source materialize.public.t1 filter=((#0{f1}) IS NOT NULL) @@ -736,8 +612,8 @@ query T multiline EXPLAIN OPTIMIZED PLAN WITH(humanized expressions, arity, join implementations) AS VERBOSE TEXT FOR select t1.f1, max(t1.f1 + t2.f2), sum(t1.f2 + t2.f2) from t1 LEFT JOIN t2 ON t1.f1 = t2.f1 group by t1.f1 having max(t1.f1 + t2.f2) > 0; ---- Explained Query: - With - cte l0 = + Filter (#1{max} > 0) // { arity: 3 } + Reduce group_by=[#0{f1}] aggregates=[max((#0{f1} + #2{f2})), sum((#1{f2} + #2{f2}))] // { arity: 3 } Project (#0{f1}, #1{f2}, #3{f2}) // { arity: 3 } Join on=(#0{f1} = #2{f1}) type=differential // { arity: 4 } implementation @@ -748,19 +624,6 @@ Explained Query: ArrangeBy keys=[[#0{f1}]] // { arity: 2 } Filter (#0{f1}) IS NOT NULL // { arity: 2 } ReadStorage materialize.public.t2 // { arity: 2 } - Return // { arity: 3 } - Project (#0{f1}, #1{max}, #3{sum}) // { arity: 3 } - Filter (#1{max} > 0) // { arity: 4 } - Join on=(#0{f1} = #2{f1}) type=differential // { arity: 4 } - implementation - %0[#0]UKAif » %1[#0]UKAif - ArrangeBy keys=[[#0{f1}]] // { arity: 2 } - Reduce group_by=[#0{f1}] aggregates=[max((#0{f1} + #1{f2}))] // { arity: 2 } - Project (#0{f1}, #2{f2}) // { arity: 2 } - Get l0 // { arity: 3 } - ArrangeBy keys=[[#0{f1}]] // { arity: 2 } - Reduce group_by=[#0{f1}] aggregates=[sum((#1{f2} + #2{f2}))] // { arity: 2 } - Get l0 // { arity: 3 } Source materialize.public.t1 filter=((#0{f1}) IS NOT NULL) @@ -818,7 +681,8 @@ Explained Query: ArrangeBy keys=[[#0{f1}]] // { arity: 2 } Filter (#0{f1}) IS NOT NULL // { arity: 2 } ReadStorage materialize.public.t2 // { arity: 2 } - cte l2 = + Return // { arity: 3 } + Reduce group_by=[#0{f1}] aggregates=[count(#1{f2}), max(#2{f2})] // { arity: 3 } Union // { arity: 3 } Map (null) // { arity: 3 } Union // { arity: 2 } @@ -834,19 +698,6 @@ Explained Query: Get l1 // { arity: 3 } ReadStorage materialize.public.t1 // { arity: 2 } Get l1 // { arity: 3 } - Return // { arity: 3 } - Project (#0{f1}, #3{count_f2}, #1{max_f2}) // { arity: 3 } - Join on=(#0{f1} = #2{f1}) type=differential // { arity: 4 } - implementation - %0[#0]UKA » %1[#0]UKA - ArrangeBy keys=[[#0{f1}]] // { arity: 2 } - Reduce group_by=[#0{f1}] aggregates=[max(#1{f2})] // { arity: 2 } - Project (#0{f1}, #2{f2}) // { arity: 2 } - Get l2 // { arity: 3 } - ArrangeBy keys=[[#0{f1}]] // { arity: 2 } - Reduce group_by=[#0{f1}] aggregates=[count(#1{f2})] // { arity: 2 } - Project (#0{f1}, #1{f2}) // { arity: 2 } - Get l2 // { arity: 3 } Source materialize.public.t1 Source materialize.public.t2 @@ -874,36 +725,24 @@ Explained Query: ArrangeBy keys=[[#0{f1}]] // { arity: 2 } Filter (#0{f1}) IS NOT NULL // { arity: 2 } ReadStorage materialize.public.t2 // { arity: 2 } - cte l2 = - Union // { arity: 3 } - Map (null) // { arity: 3 } - Union // { arity: 2 } - Negate // { arity: 2 } - Project (#0{f1}, #1{f2}) // { arity: 2 } - Join on=(#1{f2} = #2{f2}) type=differential // { arity: 3 } - implementation - %1[#0]UKA » %0:l0[#1{f2}]K - Get l0 // { arity: 2 } - ArrangeBy keys=[[#0{f2}]] // { arity: 1 } - Distinct project=[#0{f2}] // { arity: 1 } - Project (#1{f2}) // { arity: 1 } - Get l1 // { arity: 3 } - ReadStorage materialize.public.t1 // { arity: 2 } - Get l1 // { arity: 3 } Return // { arity: 3 } - Project (#0{f1}, #3{count_f2}, #1{max_f2}) // { arity: 3 } - Filter (#1{max_f2} > 0) // { arity: 4 } - Join on=(#0{f1} = #2{f1}) type=differential // { arity: 4 } - implementation - %0[#0]UKAif » %1[#0]UKAif - ArrangeBy keys=[[#0{f1}]] // { arity: 2 } - Reduce group_by=[#0{f1}] aggregates=[max(#1{f2})] // { arity: 2 } - Project (#0{f1}, #2{f2}) // { arity: 2 } - Get l2 // { arity: 3 } - ArrangeBy keys=[[#0{f1}]] // { arity: 2 } - Reduce group_by=[#0{f1}] aggregates=[count(#1{f2})] // { arity: 2 } - Project (#0{f1}, #1{f2}) // { arity: 2 } - Get l2 // { arity: 3 } + Filter (#2{max_f2} > 0) // { arity: 3 } + Reduce group_by=[#0{f1}] aggregates=[count(#1{f2}), max(#2{f2})] // { arity: 3 } + Union // { arity: 3 } + Map (null) // { arity: 3 } + Union // { arity: 2 } + Negate // { arity: 2 } + Project (#0{f1}, #1{f2}) // { arity: 2 } + Join on=(#1{f2} = #2{f2}) type=differential // { arity: 3 } + implementation + %1[#0]UKA » %0:l0[#1{f2}]K + Get l0 // { arity: 2 } + ArrangeBy keys=[[#0{f2}]] // { arity: 1 } + Distinct project=[#0{f2}] // { arity: 1 } + Project (#1{f2}) // { arity: 1 } + Get l1 // { arity: 3 } + ReadStorage materialize.public.t1 // { arity: 2 } + Get l1 // { arity: 3 } Source materialize.public.t1 Source materialize.public.t2 @@ -1032,44 +871,36 @@ Explained Query: Project (#0{f1}) // { arity: 1 } Filter (#1{f2} = 6) // { arity: 2 } ReadStorage materialize.public.t3 // { arity: 2 } - cte l1 = - Union // { arity: 1 } - Project (#0{f2}) // { arity: 1 } - Get l0 // { arity: 2 } - Project (#4) // { arity: 1 } - Map (null) // { arity: 5 } - Join on=(#0{f1} = #2{f1} AND #1{f2} = #3{f2}) type=differential // { arity: 4 } - implementation - %0[#0, #1]KK » %1:t3[#0, #1]KK - ArrangeBy keys=[[#0{f1}, #1{f2}]] // { arity: 2 } - Union // { arity: 2 } - Negate // { arity: 2 } - Map (6) // { arity: 2 } - Distinct project=[#0{f1}] // { arity: 1 } - Project (#1{f1}) // { arity: 1 } - Get l0 // { arity: 2 } - Distinct project=[#0{f1}, #1{f2}] // { arity: 2 } - ReadStorage materialize.public.t3 // { arity: 2 } - ArrangeBy keys=[[#0{f1}, #1{f2}]] // { arity: 2 } - ReadStorage materialize.public.t3 // { arity: 2 } Return // { arity: 1 } - Project (#2{count}) // { arity: 1 } - Join on=(#0{f2} = #1{max_f2}) type=delta // { arity: 3 } + Project (#1{count}) // { arity: 1 } + Join on=(#0{f2} = #2{max_f2}) type=differential // { arity: 3 } implementation - %0:t1 » %1[#0{agg2}]UK » %2[×]UA - %1 » %2[×]UA » %0:t1[#0{f2}]K - %2 » %1[×]UA » %0:t1[#0{f2}]K + %1[#1{agg2}]UK » %0:t1[#0{f2}]K ArrangeBy keys=[[#0{f2}]] // { arity: 1 } Project (#1{f2}) // { arity: 1 } ReadStorage materialize.public.t1 // { arity: 2 } - ArrangeBy keys=[[], [#0{max_f2}]] // { arity: 1 } - Filter (#0{max_f2}) IS NOT NULL // { arity: 1 } - Reduce aggregates=[max(#0{f2})] // { arity: 1 } - Get l1 // { arity: 1 } - ArrangeBy keys=[[]] // { arity: 1 } - Reduce aggregates=[count(*)] // { arity: 1 } - Project () // { arity: 0 } - Get l1 // { arity: 1 } + ArrangeBy keys=[[#1{max_f2}]] // { arity: 2 } + Filter (#1{max_f2}) IS NOT NULL // { arity: 2 } + Reduce aggregates=[count(*), max(#0{f2})] // { arity: 2 } + Union // { arity: 1 } + Project (#0{f2}) // { arity: 1 } + Get l0 // { arity: 2 } + Project (#4) // { arity: 1 } + Map (null) // { arity: 5 } + Join on=(#0{f1} = #2{f1} AND #1{f2} = #3{f2}) type=differential // { arity: 4 } + implementation + %0[#0, #1]KK » %1:t3[#0, #1]KK + ArrangeBy keys=[[#0{f1}, #1{f2}]] // { arity: 2 } + Union // { arity: 2 } + Negate // { arity: 2 } + Map (6) // { arity: 2 } + Distinct project=[#0{f1}] // { arity: 1 } + Project (#1{f1}) // { arity: 1 } + Get l0 // { arity: 2 } + Distinct project=[#0{f1}, #1{f2}] // { arity: 2 } + ReadStorage materialize.public.t3 // { arity: 2 } + ArrangeBy keys=[[#0{f1}, #1{f2}]] // { arity: 2 } + ReadStorage materialize.public.t3 // { arity: 2 } Source materialize.public.t1 Source materialize.public.t2 diff --git a/test/sqllogictest/transform/fold_vs_dataflow/3_number_aggfns_dataflow.slt b/test/sqllogictest/transform/fold_vs_dataflow/3_number_aggfns_dataflow.slt index 0105f3ba4c19a..fc2e54efab47e 100644 --- a/test/sqllogictest/transform/fold_vs_dataflow/3_number_aggfns_dataflow.slt +++ b/test/sqllogictest/transform/fold_vs_dataflow/3_number_aggfns_dataflow.slt @@ -55,27 +55,19 @@ FROM t_using_dataflow_rendering; Explained Query: With cte l0 = - Project (#0{real1}..=#2{numeric1}) - ReadStorage materialize.public.t_using_dataflow_rendering - cte l1 = - CrossJoin type=differential - ArrangeBy keys=[[]] - Reduce aggregates=[min(#0{real1}), min(#1{double1}), min(#2{numeric1}), min((#0{real1} + #0{real1})), min((#1{double1} + #1{double1})), min((#2{numeric1} + #2{numeric1})), max(#0{real1}), max(#1{double1}), max(#2{numeric1}), max((#0{real1} + #0{real1})), max((#1{double1} + #1{double1})), max((#2{numeric1} + #2{numeric1}))] - Get l0 - ArrangeBy keys=[[]] - Reduce aggregates=[sum(#0{real1}), sum(#1{double1}), sum(#2{numeric1}), sum((#0{real1} + #0{real1})), sum((#1{double1} + #1{double1})), sum((#2{numeric1} + #2{numeric1})), count(#0{real1}), count(#1{double1}), count(#2{numeric1}), count((#0{real1} + #0{real1})), count((#1{double1} + #1{double1})), count((#2{numeric1} + #2{numeric1}))] - Get l0 + Reduce aggregates=[sum(#0{real1}), sum(#1{double1}), sum(#2{numeric1}), sum((#0{real1} + #0{real1})), sum((#1{double1} + #1{double1})), sum((#2{numeric1} + #2{numeric1})), min(#0{real1}), min(#1{double1}), min(#2{numeric1}), min((#0{real1} + #0{real1})), min((#1{double1} + #1{double1})), min((#2{numeric1} + #2{numeric1})), max(#0{real1}), max(#1{double1}), max(#2{numeric1}), max((#0{real1} + #0{real1})), max((#1{double1} + #1{double1})), max((#2{numeric1} + #2{numeric1})), count(#0{real1}), count(#1{double1}), count(#2{numeric1}), count((#0{real1} + #0{real1})), count((#1{double1} + #1{double1})), count((#2{numeric1} + #2{numeric1}))] + Project (#0{real1}..=#2{numeric1}) + ReadStorage materialize.public.t_using_dataflow_rendering Return Project (#0{sum_real1}..=#17{max}, #24..=#29) Map ((#0{sum_real1} / bigint_to_real(case when (#18{count_real1} = 0) then null else #18{count_real1} end)), (#1{sum_double1} / bigint_to_double(case when (#19{count_double1} = 0) then null else #19{count_double1} end)), (#2{sum_numeric1} / bigint_to_numeric(case when (#20{count_numeric1} = 0) then null else #20{count_numeric1} end)), (#3{sum} / bigint_to_real(case when (#21{count} = 0) then null else #21{count} end)), (#4{sum} / bigint_to_double(case when (#22{count} = 0) then null else #22{count} end)), (#5{sum} / bigint_to_numeric(case when (#23{count} = 0) then null else #23{count} end))) Union - Project (#12{sum_real1}..=#17{sum}, #0{min_real1}..=#11{max}, #18{count_real1}..=#23{count}) - Get l1 + Get l0 Map (null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 0, 0, 0, 0, 0, 0) Union Negate Project () - Get l1 + Get l0 Constant - () diff --git a/test/terraform/mzcompose.py b/test/terraform/mzcompose.py index 535360cb81b48..141d1541bcce0 100644 --- a/test/terraform/mzcompose.py +++ b/test/terraform/mzcompose.py @@ -62,7 +62,6 @@ "drop.td", "duplicate-table-names.td", "failpoints.td", - "fetch-tail-as-of.td", "fetch-tail-large-diff.td", "fetch-tail-limit-timeout.td", "fetch-tail-timestamp-zero.td", diff --git a/test/test-certs/create-certs.sh b/test/test-certs/create-certs.sh index a950b1e41ae8e..e2ecdf10f4bd3 100755 --- a/test/test-certs/create-certs.sh +++ b/test/test-certs/create-certs.sh @@ -135,7 +135,7 @@ create_cert() { -passout pass:$SSL_SECRET } -for i in materialized producer postgres certuser balancerd frontegg-mock +for i in materialized producer postgres certuser balancerd frontegg-mock sql-server do create_cert $i "ca" $i diff --git a/test/testdrive/pg-catalog.td b/test/testdrive/pg-catalog.td index 800a07be280f5..2108eb56e92b3 100644 --- a/test/testdrive/pg-catalog.td +++ b/test/testdrive/pg-catalog.td @@ -21,6 +21,7 @@ name nullable type comment oid false oid "" relname false text "" relnamespace false oid "" +relnatts false smallint "" reloftype false oid "" relowner false oid "" relam false oid "" @@ -66,6 +67,7 @@ indisclustered false boolean "" indisvalid false boolean "" indisreplident false boolean "" indkey false int2vector "" +indnatts false smallint "" indoption false int2vector "" indexprs true text "" indpred true text "" diff --git a/test/testdrive/session.td b/test/testdrive/session.td index bdf5c949cad97..3a1ff5f5598ef 100644 --- a/test/testdrive/session.td +++ b/test/testdrive/session.td @@ -32,7 +32,7 @@ emit_plan_insights_notice off "Boolean flag i emit_timestamp_notice off "Boolean flag indicating whether to send a NOTICE with timestamp explanations of queries (Materialize)." emit_trace_id_notice off "Boolean flag indicating whether to send a NOTICE specifying the trace id when available (Materialize)." enable_rbac_checks on "User facing global boolean flag indicating whether to apply RBAC checks before executing statements (Materialize)." -enable_reduce_reduction on "split complex reductions in to simpler ones and a join (Materialize)." +enable_reduce_reduction off "split complex reductions in to simpler ones and a join (Materialize)." enable_session_rbac_checks off "User facing session boolean flag indicating whether to apply RBAC checks before executing statements (Materialize)." extra_float_digits 3 "Adjusts the number of digits displayed for floating-point values (PostgreSQL)." failpoints "Allows failpoints to be dynamically activated."